From 2a9122927ddee9d229b87868d05d33e130e7dc51 Mon Sep 17 00:00:00 2001 From: nick-w-nick <43578531+nick-w-nick@users.noreply.github.com> Date: Tue, 22 Oct 2024 20:56:28 -0400 Subject: [PATCH 001/100] docs: Fix typo (#7045) --- docs/core_docs/docs/introduction.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/core_docs/docs/introduction.mdx b/docs/core_docs/docs/introduction.mdx index 501cebfbaa7e..868895f490ae 100644 --- a/docs/core_docs/docs/introduction.mdx +++ b/docs/core_docs/docs/introduction.mdx @@ -71,7 +71,7 @@ For a deeper dive into LangGraph concepts, check out [this page](https://langcha ## [API reference](https://api.js.langchain.com) -Head to the reference section for full documentation of all classes and methods in the LangChain Python packages. +Head to the reference section for full documentation of all classes and methods in the LangChain JavaScript packages. ## Ecosystem From 59217da89c8a1b8499c043e347976790c2bda908 Mon Sep 17 00:00:00 2001 From: Laurens Tsestigh <90600075+LaurensTsestigh@users.noreply.github.com> Date: Wed, 23 Oct 2024 03:02:15 +0200 Subject: [PATCH 002/100] feat(community): ElasticVectorSearch: add a not_exists filter (#7036) --- .../src/vectorstores/elasticsearch.ts | 6 ++++++ .../vectorstores/tests/elasticsearch.int.test.ts | 15 +++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/libs/langchain-community/src/vectorstores/elasticsearch.ts b/libs/langchain-community/src/vectorstores/elasticsearch.ts index cccc439bb7a9..05a79d3aed4a 100644 --- a/libs/langchain-community/src/vectorstores/elasticsearch.ts +++ b/libs/langchain-community/src/vectorstores/elasticsearch.ts @@ -342,6 +342,12 @@ export class ElasticVectorSearch extends VectorStore { field: metadataField, }, }); + } else if (condition.operator === "not_exists") { + must_not.push({ + exists: { + field: metadataField, + }, + }); } else if (condition.operator === "exclude") { const toExclude = { [metadataField]: condition.value }; must_not.push({ diff --git a/libs/langchain-community/src/vectorstores/tests/elasticsearch.int.test.ts b/libs/langchain-community/src/vectorstores/tests/elasticsearch.int.test.ts index 41045196892a..52f885065796 100644 --- a/libs/langchain-community/src/vectorstores/tests/elasticsearch.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/elasticsearch.int.test.ts @@ -101,8 +101,8 @@ describe("ElasticVectorSearch", () => { { pageContent: "responsible", metadata: { a: createdAt } }, { pageContent: "friendly", metadata: { a: createdAt } }, { pageContent: "confident", metadata: { a: createdAt } }, - { pageContent: "generous", metadata: { a: createdAt } }, - { pageContent: "compassionate", metadata: { a: createdAt } }, + { pageContent: "generous", metadata: { a: null } }, + { pageContent: "compassionate", metadata: {} }, ]); const results = await store.similaritySearch("*", 11); expect(results).toHaveLength(11); @@ -113,7 +113,7 @@ describe("ElasticVectorSearch", () => { operator: "exclude", }, ]); - expect(results2).toHaveLength(1); + expect(results2).toHaveLength(3); const results3 = await store.similaritySearch("*", 11, [ { field: "a", @@ -121,7 +121,14 @@ describe("ElasticVectorSearch", () => { operator: "exclude", }, ]); - expect(results3).toHaveLength(1); + expect(results3).toHaveLength(3); + const results4 = await store.similaritySearch("*", 11, [ + { + field: "a", + operator: "not_exists", + }, + ]); + expect(results4).toHaveLength(2); }); test.skip("ElasticVectorSearch integration with text splitting metadata", async () => { From fb3633f5f58be0202d7c168ef4bbc0d4bb6ac040 Mon Sep 17 00:00:00 2001 From: Rafael Miller <150964962+rafaelsideguide@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:24:56 -0300 Subject: [PATCH 003/100] chore(community): Updated Firecrawl Document Loaders to v1 (#6818) Co-authored-by: jacoblee93 --- .../web_loaders/firecrawl.ipynb | 4 +- libs/langchain-community/package.json | 4 +- .../src/document_loaders/web/firecrawl.ts | 43 ++++++++++++++----- yarn.lock | 32 ++++++++++---- 4 files changed, 62 insertions(+), 21 deletions(-) diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb index 4126bc084b0a..31ef76f0797c 100644 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb @@ -81,7 +81,9 @@ "\n", "Here's an example of how to use the `FireCrawlLoader` to load web search results:\n", "\n", - "Firecrawl offers 2 modes: `scrape` and `crawl`. In `scrape` mode, Firecrawl will only scrape the page you provide. In `crawl` mode, Firecrawl will crawl the entire website.\n", + "Firecrawl offers 3 modes: `scrape`, `crawl`, and `map`. In `scrape` mode, Firecrawl will only scrape the page you provide. In `crawl` mode, Firecrawl will crawl the entire website. In `map` mode, Firecrawl will return semantic links related to the website.\n", + "\n", + "The `formats` (`scrapeOptions.formats` for `crawl` mode) parameter allows selection from `\"markdown\"`, `\"html\"`, or `\"rawHtml\"`. However, the Loaded Document will return content in only one format, prioritizing as follows: `markdown`, then `html`, and finally `rawHtml`.\n", "\n", "Now we can instantiate our model object and load documents:" ] diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index cf2340e1779f..032425862dfb 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -84,7 +84,7 @@ "@langchain/standard-tests": "0.0.0", "@layerup/layerup-security": "^1.5.12", "@libsql/client": "^0.14.0", - "@mendable/firecrawl-js": "^0.0.36", + "@mendable/firecrawl-js": "^1.4.3", "@mlc-ai/web-llm": ">=0.2.62 <0.3.0", "@mozilla/readability": "^0.4.4", "@neondatabase/serverless": "^0.9.1", @@ -249,7 +249,7 @@ "@langchain/core": ">=0.2.21 <0.4.0", "@layerup/layerup-security": "^1.5.12", "@libsql/client": "^0.14.0", - "@mendable/firecrawl-js": "^0.0.13", + "@mendable/firecrawl-js": "^1.4.3", "@mlc-ai/web-llm": "*", "@mozilla/readability": "*", "@neondatabase/serverless": "*", diff --git a/libs/langchain-community/src/document_loaders/web/firecrawl.ts b/libs/langchain-community/src/document_loaders/web/firecrawl.ts index 05e9dad25857..b1ca1f9bfe5a 100644 --- a/libs/langchain-community/src/document_loaders/web/firecrawl.ts +++ b/libs/langchain-community/src/document_loaders/web/firecrawl.ts @@ -23,15 +23,17 @@ interface FirecrawlLoaderParameters { */ apiUrl?: string; /** - * Mode of operation. Can be either "crawl" or "scrape". If not provided, the default value is "crawl". + * Mode of operation. Can be "crawl", "scrape", or "map". If not provided, the default value is "crawl". */ - mode?: "crawl" | "scrape"; + mode?: "crawl" | "scrape" | "map"; params?: Record; } interface FirecrawlDocument { - markdown: string; - metadata: Record; + markdown?: string; + html?: string; + rawHtml?: string; + metadata?: Record; } /** @@ -54,7 +56,7 @@ export class FireCrawlLoader extends BaseDocumentLoader { private url: string; - private mode: "crawl" | "scrape"; + private mode: "crawl" | "scrape" | "map"; private params?: Record; @@ -96,16 +98,37 @@ export class FireCrawlLoader extends BaseDocumentLoader { let firecrawlDocs: FirecrawlDocument[]; if (this.mode === "scrape") { - const response = await app.scrapeUrl(this.url, this.params); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const response = await app.scrapeUrl(this.url, this.params as any); if (!response.success) { throw new Error( `Firecrawl: Failed to scrape URL. Error: ${response.error}` ); } - firecrawlDocs = [response.data as FirecrawlDocument]; + firecrawlDocs = [response] as FirecrawlDocument[]; } else if (this.mode === "crawl") { - const response = await app.crawlUrl(this.url, this.params, true); - firecrawlDocs = response as FirecrawlDocument[]; + const response = await app.crawlUrl(this.url, this.params); + if (!response.success) { + throw new Error( + `Firecrawl: Failed to crawl URL. Error: ${response.error}` + ); + } + firecrawlDocs = response.data as FirecrawlDocument[]; + } else if (this.mode === "map") { + const response = await app.mapUrl(this.url, this.params); + if (!response.success) { + throw new Error( + `Firecrawl: Failed to map URL. Error: ${response.error}` + ); + } + firecrawlDocs = response.links as FirecrawlDocument[]; + + return firecrawlDocs.map( + (doc) => + new Document({ + pageContent: JSON.stringify(doc), + }) + ); } else { throw new Error( `Unrecognized mode '${this.mode}'. Expected one of 'crawl', 'scrape'.` @@ -115,7 +138,7 @@ export class FireCrawlLoader extends BaseDocumentLoader { return firecrawlDocs.map( (doc) => new Document({ - pageContent: doc.markdown || "", + pageContent: doc.markdown || doc.html || doc.rawHtml || "", metadata: doc.metadata || {}, }) ); diff --git a/yarn.lock b/yarn.lock index 9be00a097637..1f85107585eb 100644 --- a/yarn.lock +++ b/yarn.lock @@ -11482,7 +11482,7 @@ __metadata: "@langchain/standard-tests": 0.0.0 "@layerup/layerup-security": ^1.5.12 "@libsql/client": ^0.14.0 - "@mendable/firecrawl-js": ^0.0.36 + "@mendable/firecrawl-js": ^1.4.3 "@mlc-ai/web-llm": ">=0.2.62 <0.3.0" "@mozilla/readability": ^0.4.4 "@neondatabase/serverless": ^0.9.1 @@ -11655,7 +11655,7 @@ __metadata: "@langchain/core": ">=0.2.21 <0.4.0" "@layerup/layerup-security": ^1.5.12 "@libsql/client": ^0.14.0 - "@mendable/firecrawl-js": ^0.0.13 + "@mendable/firecrawl-js": ^1.4.3 "@mlc-ai/web-llm": "*" "@mozilla/readability": "*" "@neondatabase/serverless": "*" @@ -13069,16 +13069,16 @@ __metadata: languageName: node linkType: hard -"@mendable/firecrawl-js@npm:^0.0.36": - version: 0.0.36 - resolution: "@mendable/firecrawl-js@npm:0.0.36" +"@mendable/firecrawl-js@npm:^1.4.3": + version: 1.4.3 + resolution: "@mendable/firecrawl-js@npm:1.4.3" dependencies: axios: ^1.6.8 - dotenv: ^16.4.5 - uuid: ^9.0.1 + isows: ^1.0.4 + typescript-event-target: ^1.1.1 zod: ^3.23.8 zod-to-json-schema: ^3.23.0 - checksum: 93ac8a7d9d25c04d4f618e282c136af06cf7712ec3402922531094c3cdab0e59d6f484a7f583022032eb58f914a0494193f2fd22986edd0f6712a29545edf95a + checksum: ee36a4ceaca326d1ae86a714500dd0698060a63e84e0d5c83fb14967ac36755cd4b0b42a260c5e7b63914551a94ead2f4c712a76b9e58a6580dd5ca8628e851a languageName: node linkType: hard @@ -31332,6 +31332,15 @@ __metadata: languageName: node linkType: hard +"isows@npm:^1.0.4": + version: 1.0.4 + resolution: "isows@npm:1.0.4" + peerDependencies: + ws: "*" + checksum: a3ee62e3d6216abb3adeeb2a551fe2e7835eac87b05a6ecc3e7739259bf5f8e83290501f49e26137390c8093f207fc3378d4a7653aab76ad7bbab4b2dba9c5b9 + languageName: node + linkType: hard + "isstream@npm:0.1.2": version: 0.1.2 resolution: "isstream@npm:0.1.2" @@ -41933,6 +41942,13 @@ __metadata: languageName: node linkType: hard +"typescript-event-target@npm:^1.1.1": + version: 1.1.1 + resolution: "typescript-event-target@npm:1.1.1" + checksum: ad9eaf0f3c161c4062c33d80ac5235e7c32c5b6f79eabcf23f9c39c7617b9337a4d9d4a2249340a84626fa68abeed38f5973dff547fecd71164f96d0b11af516 + languageName: node + linkType: hard + "typescript@npm:<5.2.0, typescript@npm:~5.1.6": version: 5.1.6 resolution: "typescript@npm:5.1.6" From 05cf7b0fd751cba30b39d2b0c5933ea857b08333 Mon Sep 17 00:00:00 2001 From: Lan <913088741@qq.com> Date: Wed, 23 Oct 2024 09:25:16 +0800 Subject: [PATCH 004/100] chore(langchain): export AgentRunnableSequence (#7046) Co-authored-by: Jacob Lee --- langchain/src/agents/index.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/langchain/src/agents/index.ts b/langchain/src/agents/index.ts index e03c64bc50a4..2ddc69144bd2 100644 --- a/langchain/src/agents/index.ts +++ b/langchain/src/agents/index.ts @@ -7,6 +7,7 @@ export { LLMSingleActionAgent, type LLMSingleActionAgentInput, type OutputParserArgs, + type AgentRunnableSequence, } from "./agent.js"; export { JsonToolkit, From 4aee92415b7899045d806d83287ff7403229ff88 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 22 Oct 2024 18:35:11 -0700 Subject: [PATCH 005/100] chore(community): Release 0.3.8 (#7050) --- libs/langchain-community/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 032425862dfb..ce97db70f3c5 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/community", - "version": "0.3.7", + "version": "0.3.8", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { From 7981aa950bd4042510704f8eb73221987aa8fd82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20van=20der=20Gali=C3=ABn?= <4962933+florivdg@users.noreply.github.com> Date: Wed, 23 Oct 2024 18:41:07 +0200 Subject: [PATCH 006/100] fix(community): Escape libSQL vector store inserts by using object syntax with placeholders (#7041) Co-authored-by: jacoblee93 --- .../docs/integrations/vectorstores/libsql.mdx | 15 +++---- .../src/vectorstores/libsql.ts | 36 ++++----------- .../src/vectorstores/tests/libsql.int.test.ts | 45 +++++++++++++++++++ 3 files changed, 61 insertions(+), 35 deletions(-) create mode 100644 libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts diff --git a/docs/core_docs/docs/integrations/vectorstores/libsql.mdx b/docs/core_docs/docs/integrations/vectorstores/libsql.mdx index 2d836b54ae04..19a44cc4ac3c 100644 --- a/docs/core_docs/docs/integrations/vectorstores/libsql.mdx +++ b/docs/core_docs/docs/integrations/vectorstores/libsql.mdx @@ -10,9 +10,9 @@ This guide provides a quick overview for getting started with libSQL vector stor ## Integration details -| Class | Package | JS support | Package latest | +| Class | Package | PY support | Package latest | | ------------------- | ---------------------- | ---------- | ----------------------------------------------------------------- | -| `LibSQLVectorStore` | `@langchain/community` | ✅ | ![npm version](https://img.shields.io/npm/v/@langchain/community) | +| `LibSQLVectorStore` | `@langchain/community` | ❌ | ![npm version](https://img.shields.io/npm/v/@langchain/community) | ## Setup @@ -54,7 +54,7 @@ libsql://[database-name]-[your-username].turso.io Execute the following SQL command to create a new table or add the embedding column to an existing table. -Make sure to mopdify the following parts of the SQL: +Make sure to modify the following parts of the SQL: - `TABLE_NAME` is the name of the table you want to create. - `content` is used to store the `Document.pageContent` values. @@ -70,7 +70,7 @@ CREATE TABLE IF NOT EXISTS TABLE_NAME ( ); ``` -Now create an index on the `EMBEDDING_COLUMN` column: +Now create an index on the `EMBEDDING_COLUMN` column - the index name is important!: ```sql CREATE INDEX IF NOT EXISTS idx_TABLE_NAME_EMBEDDING_COLUMN ON TABLE_NAME(libsql_vector_idx(EMBEDDING_COLUMN)); @@ -103,9 +103,8 @@ const libsqlClient = createClient({ const vectorStore = new LibSQLVectorStore(embeddings, { db: libsqlClient, - tableName: "TABLE_NAME", - embeddingColumn: "EMBEDDING_COLUMN", - dimensions: 1536, + table: "TABLE_NAME", + column: "EMBEDDING_COLUMN", }); ``` @@ -154,7 +153,7 @@ const similaritySearchWithScoreResults = for (const [doc, score] of similaritySearchWithScoreResults) { console.log( - `${score.toFixed(3)} ${doc.pageContent} [${JSON.stringify(doc.metadata)}` + `${score.toFixed(3)} ${doc.pageContent} [${JSON.stringify(doc.metadata)}]` ); } ``` diff --git a/libs/langchain-community/src/vectorstores/libsql.ts b/libs/langchain-community/src/vectorstores/libsql.ts index 3740c62d99db..05c77da7489c 100644 --- a/libs/langchain-community/src/vectorstores/libsql.ts +++ b/libs/langchain-community/src/vectorstores/libsql.ts @@ -82,10 +82,10 @@ export class LibSQLVectorStore extends VectorStore { for (let i = 0; i < rows.length; i += batchSize) { const chunk = rows.slice(i, i + batchSize); - const insertQueries = chunk.map( - (row) => - `INSERT INTO ${this.table} (content, metadata, ${this.column}) VALUES (${row.content}, ${row.metadata}, vector(${row.embedding})) RETURNING id` - ); + const insertQueries = chunk.map((row) => ({ + sql: `INSERT INTO ${this.table} (content, metadata, ${this.column}) VALUES (?, ?, ?) RETURNING id`, + args: [row.content, row.metadata, row.embedding], + })); const results = await this.db.batch(insertQueries); @@ -124,17 +124,19 @@ export class LibSQLVectorStore extends VectorStore { const queryVector = `[${query.join(",")}]`; const sql = ` - SELECT content, metadata, vector_distance_cos(${this.column}, vector(${queryVector})) AS distance - FROM vector_top_k('${this.table}_idx', vector(${queryVector}), ${k}) - JOIN ${this.table} ON ${this.table}.rowid = id + SELECT ${this.table}.id, ${this.table}.content, ${this.table}.metadata, vector_distance_cos(${this.table}.${this.column}, vector('${queryVector}')) AS distance + FROM vector_top_k('idx_${this.table}_${this.column}', vector('${queryVector}'), ${k}) AS top_k + JOIN ${this.table} ON top_k.rowid = ${this.table}.id `; const results = await this.db.execute(sql); + // eslint-disable-next-line @typescript-eslint/no-explicit-any return results.rows.map((row: any) => { const metadata = JSON.parse(row.metadata); const doc = new Document({ + id: row.id, metadata, pageContent: row.content, }); @@ -143,26 +145,6 @@ export class LibSQLVectorStore extends VectorStore { }); } - /** - * Deletes vectors from the store. - * @param {Object} params - Delete parameters. - * @param {string[] | number[]} [params.ids] - The ids of the vectors to delete. - * @returns {Promise} - */ - async delete(params: { ids?: string[] | number[] }): Promise { - if (!params.ids) { - await this.db.execute(`DELETE FROM ${this.table}`); - return; - } - - const idsToDelete = params.ids.join(", "); - - await this.db.execute({ - sql: `DELETE FROM ${this.table} WHERE id IN (?)`, - args: [idsToDelete], - }); - } - /** * Creates a new LibSQLVectorStore instance from texts. * @param {string[]} texts - The texts to add to the store. diff --git a/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts b/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts new file mode 100644 index 000000000000..63fe6cbe2df7 --- /dev/null +++ b/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts @@ -0,0 +1,45 @@ +/* eslint-disable no-process-env */ +/* eslint-disable @typescript-eslint/no-non-null-assertion */ +import { expect, test } from "@jest/globals"; +import { OpenAIEmbeddings } from "@langchain/openai"; +import { Document } from "@langchain/core/documents"; +import { createClient } from "@libsql/client"; + +import { LibSQLVectorStore } from "../libsql.js"; + +test("can create and query", async () => { + const client = createClient({ + url: process.env.LIBSQL_URL!, + authToken: process.env.LIBSQL_AUTH_TOKEN, + }); + const vectorStore = new LibSQLVectorStore( + new OpenAIEmbeddings({ + model: "text-embedding-3-small", + dimensions: 1536, + }), + { + db: client, + table: "documents", + column: "embeddings", + } + ); + const ids = await vectorStore.addDocuments([ + new Document({ + pageContent: "added first page", + }), + new Document({ + pageContent: "added second page", + }), + new Document({ + pageContent: "added third page", + }), + ]); + const nextId = await vectorStore.addDocuments([ + new Document({ + pageContent: "added another first page", + }), + ]); + ids.push(nextId[0]); + const results = await vectorStore.similaritySearchWithScore("added first", 4); + expect(results.length).toBe(4); +}); From 71eed8f7e4474d4c0581b6ea2714b0c552d9a9e6 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Thu, 24 Oct 2024 11:15:52 -0700 Subject: [PATCH 007/100] feat(core): Allow omitting tags from runnable sequences (#7055) --- langchain-core/src/runnables/base.ts | 55 ++++++++++++++----- .../src/runnables/tests/runnable.test.ts | 14 +++++ 2 files changed, 55 insertions(+), 14 deletions(-) diff --git a/langchain-core/src/runnables/base.ts b/langchain-core/src/runnables/base.ts index 0890dc1463cb..281c95b4a881 100644 --- a/langchain-core/src/runnables/base.ts +++ b/langchain-core/src/runnables/base.ts @@ -1701,6 +1701,15 @@ export class RunnableRetry< } } +export type RunnableSequenceFields = { + first: Runnable; + middle?: Runnable[]; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + last: Runnable; + name?: string; + omitSequenceTags?: boolean; +}; + /** * A sequence of runnables, where the output of each is the input of the next. * @example @@ -1729,22 +1738,19 @@ export class RunnableSequence< // eslint-disable-next-line @typescript-eslint/no-explicit-any protected last: Runnable; + omitSequenceTags = false; + lc_serializable = true; lc_namespace = ["langchain_core", "runnables"]; - constructor(fields: { - first: Runnable; - middle?: Runnable[]; - // eslint-disable-next-line @typescript-eslint/no-explicit-any - last: Runnable; - name?: string; - }) { + constructor(fields: RunnableSequenceFields) { super(fields); this.first = fields.first; this.middle = fields.middle ?? this.middle; this.last = fields.last; this.name = fields.name; + this.omitSequenceTags = fields.omitSequenceTags ?? this.omitSequenceTags; } get steps() { @@ -1773,7 +1779,9 @@ export class RunnableSequence< const promise = step.invoke( nextStepInput, patchConfig(config, { - callbacks: runManager?.getChild(`seq:step:${i + 1}`), + callbacks: runManager?.getChild( + this.omitSequenceTags ? undefined : `seq:step:${i + 1}` + ), }) ); nextStepInput = await raceWithSignal(promise, options?.signal); @@ -1785,7 +1793,9 @@ export class RunnableSequence< finalOutput = await this.last.invoke( nextStepInput, patchConfig(config, { - callbacks: runManager?.getChild(`seq:step:${this.steps.length}`), + callbacks: runManager?.getChild( + this.omitSequenceTags ? undefined : `seq:step:${this.steps.length}` + ), }) ); } catch (e) { @@ -1846,7 +1856,9 @@ export class RunnableSequence< const promise = step.batch( nextStepInputs, runManagers.map((runManager, j) => { - const childRunManager = runManager?.getChild(`seq:step:${i + 1}`); + const childRunManager = runManager?.getChild( + this.omitSequenceTags ? undefined : `seq:step:${i + 1}` + ); return patchConfig(configList[j], { callbacks: childRunManager }); }), batchOptions @@ -1892,7 +1904,9 @@ export class RunnableSequence< let finalGenerator = steps[0].transform( inputGenerator(), patchConfig(otherOptions, { - callbacks: runManager?.getChild(`seq:step:1`), + callbacks: runManager?.getChild( + this.omitSequenceTags ? undefined : `seq:step:1` + ), }) ); for (let i = 1; i < steps.length; i += 1) { @@ -1900,7 +1914,9 @@ export class RunnableSequence< finalGenerator = await step.transform( finalGenerator, patchConfig(otherOptions, { - callbacks: runManager?.getChild(`seq:step:${i + 1}`), + callbacks: runManager?.getChild( + this.omitSequenceTags ? undefined : `seq:step:${i + 1}` + ), }) ); } @@ -1998,13 +2014,24 @@ export class RunnableSequence< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunnableLike ], - name?: string + nameOrFields?: + | string + | Omit< + RunnableSequenceFields, + "first" | "middle" | "last" + > ) { + let extra: Record = {}; + if (typeof nameOrFields === "string") { + extra.name = nameOrFields; + } else if (nameOrFields !== undefined) { + extra = nameOrFields; + } return new RunnableSequence>({ + ...extra, first: _coerceToRunnable(first), middle: runnables.slice(0, -1).map(_coerceToRunnable), last: _coerceToRunnable(runnables[runnables.length - 1]), - name, }); } } diff --git a/langchain-core/src/runnables/tests/runnable.test.ts b/langchain-core/src/runnables/tests/runnable.test.ts index 23438aa4b372..c53032042f86 100644 --- a/langchain-core/src/runnables/tests/runnable.test.ts +++ b/langchain-core/src/runnables/tests/runnable.test.ts @@ -437,6 +437,20 @@ test("Create a runnable sequence with a static method with invalid output and ca expect(error?.lc_error_code).toEqual("OUTPUT_PARSING_FAILURE"); }); +test("Create a runnable sequence with a static method with no tags", async () => { + const seq = RunnableSequence.from([() => "foo", () => "bar"], { + omitSequenceTags: true, + }); + const events = []; + for await (const event of seq.streamEvents({}, { version: "v2" })) { + events.push(event); + } + expect(events.length).toBeGreaterThan(1); + for (const event of events) { + expect(event.tags?.find((tag) => tag.startsWith("seq:"))).toBeUndefined(); + } +}); + test("RunnableSequence can pass config to every step in batched request", async () => { let numSeen = 0; From a0a8aef579688fa6fbe6ea037b5d6f0c3a11bdf2 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Thu, 24 Oct 2024 11:43:57 -0700 Subject: [PATCH 008/100] chore(core): Release 0.3.14 (#7056) --- langchain-core/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain-core/package.json b/langchain-core/package.json index 80fa34025c24..86703d311434 100644 --- a/langchain-core/package.json +++ b/langchain-core/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/core", - "version": "0.3.13", + "version": "0.3.14", "description": "Core LangChain.js abstractions and schemas", "type": "module", "engines": { From 6997cb7d78b681809c7518383bfe096ba392ce4f Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Thu, 24 Oct 2024 19:21:24 -0700 Subject: [PATCH 009/100] feat(core,langchain,community): Bump to LangSmith 0.2.0 (#7058) --- examples/package.json | 2 +- langchain-core/package.json | 2 +- .../tracers/tests/langsmith_interop.test.ts | 14 +++++--- .../src/tracers/tracer_langchain.ts | 7 +++- langchain/package.json | 2 +- libs/langchain-community/package.json | 2 +- yarn.lock | 35 +++++-------------- 7 files changed, 27 insertions(+), 37 deletions(-) diff --git a/examples/package.json b/examples/package.json index 4d3048e5912b..48ad17ca960b 100644 --- a/examples/package.json +++ b/examples/package.json @@ -90,7 +90,7 @@ "ioredis": "^5.3.2", "js-yaml": "^4.1.0", "langchain": "workspace:*", - "langsmith": "^0.1.56", + "langsmith": "^0.2.0", "mongodb": "^6.3.0", "pg": "^8.11.0", "pickleparser": "^0.2.1", diff --git a/langchain-core/package.json b/langchain-core/package.json index 86703d311434..cdcb6181e0cc 100644 --- a/langchain-core/package.json +++ b/langchain-core/package.json @@ -37,7 +37,7 @@ "camelcase": "6", "decamelize": "1.2.0", "js-tiktoken": "^1.0.12", - "langsmith": "^0.1.65", + "langsmith": "^0.2.0", "mustache": "^4.2.0", "p-queue": "^6.6.2", "p-retry": "4", diff --git a/langchain-core/src/tracers/tests/langsmith_interop.test.ts b/langchain-core/src/tracers/tests/langsmith_interop.test.ts index 9f684c9d75ef..11d654fca129 100644 --- a/langchain-core/src/tracers/tests/langsmith_interop.test.ts +++ b/langchain-core/src/tracers/tests/langsmith_interop.test.ts @@ -20,11 +20,15 @@ let fetchMock: any; const originalTracingEnvValue = process.env.LANGCHAIN_TRACING_V2; beforeEach(() => { - fetchMock = jest - .spyOn(global, "fetch") - .mockImplementation(() => - Promise.resolve({ ok: true, text: () => "" } as any) - ); + fetchMock = jest.spyOn(global, "fetch").mockImplementation(() => + Promise.resolve({ + ok: true, + text: () => "", + json: () => { + return {}; + }, + } as any) + ); process.env.LANGCHAIN_TRACING_V2 = "true"; }); diff --git a/langchain-core/src/tracers/tracer_langchain.ts b/langchain-core/src/tracers/tracer_langchain.ts index 3e4fa6a54bb7..4c8edd8c12de 100644 --- a/langchain-core/src/tracers/tracer_langchain.ts +++ b/langchain-core/src/tracers/tracer_langchain.ts @@ -59,7 +59,12 @@ export class LangChainTracer getEnvironmentVariable("LANGCHAIN_PROJECT") ?? getEnvironmentVariable("LANGCHAIN_SESSION"); this.exampleId = exampleId; - this.client = client ?? new Client({}); + this.client = + client ?? + new Client({ + // LangChain has its own backgrounding system + blockOnRootRunFinalization: true, + }); const traceableTree = LangChainTracer.getTraceableRunTree(); if (traceableTree) { diff --git a/langchain/package.json b/langchain/package.json index db95724a4bf4..41a125d48253 100644 --- a/langchain/package.json +++ b/langchain/package.json @@ -520,7 +520,7 @@ "js-tiktoken": "^1.0.12", "js-yaml": "^4.1.0", "jsonpointer": "^5.0.1", - "langsmith": "^0.1.56", + "langsmith": "^0.2.0", "openapi-types": "^12.1.3", "p-retry": "4", "uuid": "^10.0.0", diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index ce97db70f3c5..a4e8837a9bb2 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -41,7 +41,7 @@ "flat": "^5.0.2", "js-yaml": "^4.1.0", "langchain": ">=0.2.3 <0.4.0", - "langsmith": "~0.1.56", + "langsmith": "^0.2.0", "uuid": "^10.0.0", "zod": "^3.22.3", "zod-to-json-schema": "^3.22.5" diff --git a/yarn.lock b/yarn.lock index 1f85107585eb..016aa674a020 100644 --- a/yarn.lock +++ b/yarn.lock @@ -11583,7 +11583,7 @@ __metadata: jsdom: ^22.1.0 jsonwebtoken: ^9.0.2 langchain: ">=0.2.3 <0.4.0" - langsmith: ~0.1.56 + langsmith: ^0.2.0 llmonitor: ^0.5.9 lodash: ^4.17.21 lunary: ^0.7.10 @@ -12015,7 +12015,7 @@ __metadata: jest: ^29.5.0 jest-environment-node: ^29.6.4 js-tiktoken: ^1.0.12 - langsmith: ^0.1.65 + langsmith: ^0.2.0 ml-matrix: ^6.10.4 mustache: ^4.2.0 p-queue: ^6.6.2 @@ -27338,7 +27338,7 @@ __metadata: ioredis: ^5.3.2 js-yaml: ^4.1.0 langchain: "workspace:*" - langsmith: ^0.1.56 + langsmith: ^0.2.0 mongodb: ^6.3.0 pg: ^8.11.0 pickleparser: ^0.2.1 @@ -32921,7 +32921,7 @@ __metadata: js-tiktoken: ^1.0.12 js-yaml: ^4.1.0 jsonpointer: ^5.0.1 - langsmith: ^0.1.56 + langsmith: ^0.2.0 openai: ^4.41.1 openapi-types: ^12.1.3 p-retry: 4 @@ -33000,28 +33000,9 @@ __metadata: languageName: unknown linkType: soft -"langsmith@npm:^0.1.56, langsmith@npm:~0.1.56": - version: 0.1.56 - resolution: "langsmith@npm:0.1.56" - dependencies: - "@types/uuid": ^10.0.0 - commander: ^10.0.1 - p-queue: ^6.6.2 - p-retry: 4 - semver: ^7.6.3 - uuid: ^10.0.0 - peerDependencies: - openai: "*" - peerDependenciesMeta: - openai: - optional: true - checksum: 61db6dc3016e35d14d25e78a8ecebcc6356f2efc00310f5582dce9d28a88377525425622d1b98f053e73c0b3233d44c5a2f9d5654ca72ee2e61163edd5be2d28 - languageName: node - linkType: hard - -"langsmith@npm:^0.1.65": - version: 0.1.65 - resolution: "langsmith@npm:0.1.65" +"langsmith@npm:^0.2.0": + version: 0.2.0 + resolution: "langsmith@npm:0.2.0" dependencies: "@types/uuid": ^10.0.0 commander: ^10.0.1 @@ -33034,7 +33015,7 @@ __metadata: peerDependenciesMeta: openai: optional: true - checksum: ca44f26733fbb20675b84f2586b90622b8cf1aedc82123f5574af04e88ba29348e28b2b63f410479aeb7e5c174d2fef13b4bd9eb68581d93a104950b1fafa40f + checksum: 0cd92d0e31526d309af197a3502c93a00ac8c09f6b2864161a18a5c1e8b95b0e8203bad2dfe3b4beb26055fc815a8d70730592a58c9af7e202917b13d01f695c languageName: node linkType: hard From 3f498437c54033b0f83c965a65772af54baefa7f Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Thu, 24 Oct 2024 20:22:51 -0700 Subject: [PATCH 010/100] chore(core): Release 0.3.15 (#7059) --- langchain-core/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain-core/package.json b/langchain-core/package.json index cdcb6181e0cc..0741ec42c6af 100644 --- a/langchain-core/package.json +++ b/langchain-core/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/core", - "version": "0.3.14", + "version": "0.3.15", "description": "Core LangChain.js abstractions and schemas", "type": "module", "engines": { From 95dffc497622787c3d83635f94c4923da1636522 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Thu, 24 Oct 2024 20:29:32 -0700 Subject: [PATCH 011/100] chore(community): Release 0.3.9 (#7060) --- libs/langchain-community/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index a4e8837a9bb2..7aa94523207b 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/community", - "version": "0.3.8", + "version": "0.3.9", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { From a2fe51f56536a4e1a2c73f975109b762851e46ab Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Thu, 24 Oct 2024 20:35:05 -0700 Subject: [PATCH 012/100] chore(langchain): Release 0.3.4 (#7062) --- langchain/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain/package.json b/langchain/package.json index 41a125d48253..aaa82718d396 100644 --- a/langchain/package.json +++ b/langchain/package.json @@ -1,6 +1,6 @@ { "name": "langchain", - "version": "0.3.3", + "version": "0.3.4", "description": "Typescript bindings for langchain", "type": "module", "engines": { From c21474026212c628ce49356701767068ea04b669 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Fri, 25 Oct 2024 17:17:39 -0700 Subject: [PATCH 013/100] chore(community): Bump langchain dep within community (#7071) --- libs/langchain-community/package.json | 2 +- yarn.lock | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 7aa94523207b..bb0ed7742777 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -40,7 +40,7 @@ "expr-eval": "^2.0.2", "flat": "^5.0.2", "js-yaml": "^4.1.0", - "langchain": ">=0.2.3 <0.4.0", + "langchain": ">=0.2.3 <0.3.0 || >=0.3.4 <0.4.0", "langsmith": "^0.2.0", "uuid": "^10.0.0", "zod": "^3.22.3", diff --git a/yarn.lock b/yarn.lock index 016aa674a020..e8e3b962c8ff 100644 --- a/yarn.lock +++ b/yarn.lock @@ -11582,7 +11582,7 @@ __metadata: js-yaml: ^4.1.0 jsdom: ^22.1.0 jsonwebtoken: ^9.0.2 - langchain: ">=0.2.3 <0.4.0" + langchain: ">=0.2.3 <0.3.0 || >=0.3.4 <0.4.0" langsmith: ^0.2.0 llmonitor: ^0.5.9 lodash: ^4.17.21 @@ -32875,7 +32875,7 @@ __metadata: languageName: node linkType: hard -"langchain@>=0.2.3 <0.4.0, langchain@workspace:*, langchain@workspace:langchain": +"langchain@>=0.2.3 <0.3.0 || >=0.3.4 <0.4.0, langchain@workspace:*, langchain@workspace:langchain": version: 0.0.0-use.local resolution: "langchain@workspace:langchain" dependencies: From f75e99bee43c03996425ee1a72fde2472e1c2020 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Fri, 25 Oct 2024 19:59:13 -0700 Subject: [PATCH 014/100] chore(community): Release 0.3.10 (#7073) --- libs/langchain-community/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index bb0ed7742777..20128b15faaa 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/community", - "version": "0.3.9", + "version": "0.3.10", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { From 660edbd9421c6f0f4cb891de4bf377459ad35ba0 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 13:58:30 -0700 Subject: [PATCH 015/100] fix(anthropic): Fixed streaming tool calls with Anthropic (#7081) --- libs/langchain-anthropic/src/chat_models.ts | 6 +- .../src/utils/message_outputs.ts | 16 +++++ libs/langchain-anthropic/src/utils/tools.ts | 58 ------------------- 3 files changed, 18 insertions(+), 62 deletions(-) diff --git a/libs/langchain-anthropic/src/chat_models.ts b/libs/langchain-anthropic/src/chat_models.ts index 3627aed6272c..f474324b37cd 100644 --- a/libs/langchain-anthropic/src/chat_models.ts +++ b/libs/langchain-anthropic/src/chat_models.ts @@ -32,7 +32,7 @@ import type { import { isLangChainTool } from "@langchain/core/utils/function_calling"; import { AnthropicToolsOutputParser } from "./output_parsers.js"; -import { extractToolCallChunk, handleToolChoice } from "./utils/tools.js"; +import { handleToolChoice } from "./utils/tools.js"; import { _convertMessagesToAnthropicPayload } from "./utils/message_inputs.js"; import { _makeMessageChunkFromAnthropicEvent, @@ -815,8 +815,6 @@ export class ChatAnthropicMessages< const { chunk } = result; - const newToolCallChunk = extractToolCallChunk(chunk); - // Extract the text content token for text field and runManager. const token = extractToken(chunk); const generationChunk = new ChatGenerationChunk({ @@ -824,7 +822,7 @@ export class ChatAnthropicMessages< // Just yield chunk as it is and tool_use will be concat by BaseChatModel._generateUncached(). content: chunk.content, additional_kwargs: chunk.additional_kwargs, - tool_call_chunks: newToolCallChunk ? [newToolCallChunk] : undefined, + tool_call_chunks: chunk.tool_call_chunks, usage_metadata: shouldStreamUsage ? chunk.usage_metadata : undefined, response_metadata: chunk.response_metadata, id: chunk.id, diff --git a/libs/langchain-anthropic/src/utils/message_outputs.ts b/libs/langchain-anthropic/src/utils/message_outputs.ts index a04dfba44110..fd34dba87bf7 100644 --- a/libs/langchain-anthropic/src/utils/message_outputs.ts +++ b/libs/langchain-anthropic/src/utils/message_outputs.ts @@ -60,6 +60,8 @@ export function _makeMessageChunkFromAnthropicEvent( data.type === "content_block_start" && data.content_block.type === "tool_use" ) { + const toolCallContentBlock = + data.content_block as Anthropic.Messages.ToolUseBlock; return { chunk: new AIMessageChunk({ content: fields.coerceContentToString @@ -72,6 +74,14 @@ export function _makeMessageChunkFromAnthropicEvent( }, ], additional_kwargs: {}, + tool_call_chunks: [ + { + id: toolCallContentBlock.id, + index: data.index, + name: toolCallContentBlock.name, + args: "", + }, + ], }), }; } else if ( @@ -110,6 +120,12 @@ export function _makeMessageChunkFromAnthropicEvent( }, ], additional_kwargs: {}, + tool_call_chunks: [ + { + index: data.index, + args: data.delta.partial_json, + }, + ], }), }; } else if ( diff --git a/libs/langchain-anthropic/src/utils/tools.ts b/libs/langchain-anthropic/src/utils/tools.ts index a56bd22e2a49..157caa1b7f11 100644 --- a/libs/langchain-anthropic/src/utils/tools.ts +++ b/libs/langchain-anthropic/src/utils/tools.ts @@ -1,6 +1,4 @@ import type { MessageCreateParams } from "@anthropic-ai/sdk/resources/index.mjs"; -import { AIMessageChunk } from "@langchain/core/messages"; -import { ToolCallChunk } from "@langchain/core/messages/tool"; import { AnthropicToolChoice } from "../types.js"; export function handleToolChoice( @@ -29,59 +27,3 @@ export function handleToolChoice( return toolChoice; } } - -export function extractToolCallChunk( - chunk: AIMessageChunk -): ToolCallChunk | undefined { - let newToolCallChunk: ToolCallChunk | undefined; - - // Initial chunk for tool calls from anthropic contains identifying information like ID and name. - // This chunk does not contain any input JSON. - const toolUseChunks = Array.isArray(chunk.content) - ? chunk.content.find((c) => c.type === "tool_use") - : undefined; - if ( - toolUseChunks && - "index" in toolUseChunks && - "name" in toolUseChunks && - "id" in toolUseChunks - ) { - newToolCallChunk = { - args: "", - id: toolUseChunks.id, - name: toolUseChunks.name, - index: toolUseChunks.index, - type: "tool_call_chunk", - }; - } - - // Chunks after the initial chunk only contain the index and partial JSON. - const inputJsonDeltaChunks = Array.isArray(chunk.content) - ? chunk.content.find((c) => c.type === "input_json_delta") - : undefined; - if ( - inputJsonDeltaChunks && - "index" in inputJsonDeltaChunks && - "input" in inputJsonDeltaChunks - ) { - if (typeof inputJsonDeltaChunks.input === "string") { - newToolCallChunk = { - id: inputJsonDeltaChunks.id, - name: inputJsonDeltaChunks.name, - args: inputJsonDeltaChunks.input, - index: inputJsonDeltaChunks.index, - type: "tool_call_chunk", - }; - } else { - newToolCallChunk = { - id: inputJsonDeltaChunks.id, - name: inputJsonDeltaChunks.name, - args: JSON.stringify(inputJsonDeltaChunks.input, null, 2), - index: inputJsonDeltaChunks.index, - type: "tool_call_chunk", - }; - } - } - - return newToolCallChunk; -} From 203c295a9e207aeb6ccc29fc088a65e5dd173707 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 14:00:24 -0700 Subject: [PATCH 016/100] fix(anthropic): Release 0.3.6-rc.0 (#7082) --- libs/langchain-anthropic/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-anthropic/package.json b/libs/langchain-anthropic/package.json index 58e94edb84c4..e0d1d129646a 100644 --- a/libs/langchain-anthropic/package.json +++ b/libs/langchain-anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/anthropic", - "version": "0.3.5", + "version": "0.3.6-rc.0", "description": "Anthropic integrations for LangChain.js", "type": "module", "engines": { From 49acaec559a0d1095b55bcf0f354d740ee1d3e8e Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 14:16:59 -0700 Subject: [PATCH 017/100] fix: Commit yarn.lock (#7083) --- yarn.lock | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/yarn.lock b/yarn.lock index e8e3b962c8ff..cd86a3d14421 100644 --- a/yarn.lock +++ b/yarn.lock @@ -11147,7 +11147,21 @@ __metadata: languageName: node linkType: hard -"@langchain/anthropic@*, @langchain/anthropic@workspace:*, @langchain/anthropic@workspace:libs/langchain-anthropic": +"@langchain/anthropic@npm:*": + version: 0.3.5 + resolution: "@langchain/anthropic@npm:0.3.5" + dependencies: + "@anthropic-ai/sdk": ^0.27.3 + fast-xml-parser: ^4.4.1 + zod: ^3.22.4 + zod-to-json-schema: ^3.22.4 + peerDependencies: + "@langchain/core": ">=0.2.21 <0.4.0" + checksum: a241f3f863dbf1c233802553f3b955e4d71c114e5736eb344ba85aaece8d6d3596a0b4bd9ebabaf5bca180f913da242b0f72bebaba8d7600a88f489e44ced9be + languageName: node + linkType: hard + +"@langchain/anthropic@workspace:*, @langchain/anthropic@workspace:libs/langchain-anthropic": version: 0.0.0-use.local resolution: "@langchain/anthropic@workspace:libs/langchain-anthropic" dependencies: From 34ae98eef12fc3e7908988b30e8c23d2705b1877 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 16:31:06 -0700 Subject: [PATCH 018/100] feat(anthropic): Release 0.3.6 (#7084) --- libs/langchain-anthropic/package.json | 2 +- yarn.lock | 16 +--------------- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/libs/langchain-anthropic/package.json b/libs/langchain-anthropic/package.json index e0d1d129646a..75c0414ca5da 100644 --- a/libs/langchain-anthropic/package.json +++ b/libs/langchain-anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/anthropic", - "version": "0.3.6-rc.0", + "version": "0.3.6", "description": "Anthropic integrations for LangChain.js", "type": "module", "engines": { diff --git a/yarn.lock b/yarn.lock index cd86a3d14421..e8e3b962c8ff 100644 --- a/yarn.lock +++ b/yarn.lock @@ -11147,21 +11147,7 @@ __metadata: languageName: node linkType: hard -"@langchain/anthropic@npm:*": - version: 0.3.5 - resolution: "@langchain/anthropic@npm:0.3.5" - dependencies: - "@anthropic-ai/sdk": ^0.27.3 - fast-xml-parser: ^4.4.1 - zod: ^3.22.4 - zod-to-json-schema: ^3.22.4 - peerDependencies: - "@langchain/core": ">=0.2.21 <0.4.0" - checksum: a241f3f863dbf1c233802553f3b955e4d71c114e5736eb344ba85aaece8d6d3596a0b4bd9ebabaf5bca180f913da242b0f72bebaba8d7600a88f489e44ced9be - languageName: node - linkType: hard - -"@langchain/anthropic@workspace:*, @langchain/anthropic@workspace:libs/langchain-anthropic": +"@langchain/anthropic@*, @langchain/anthropic@workspace:*, @langchain/anthropic@workspace:libs/langchain-anthropic": version: 0.0.0-use.local resolution: "@langchain/anthropic@workspace:libs/langchain-anthropic" dependencies: From 14be2641c842dd94d8471db4e0f18219345b861c Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 17:08:26 -0700 Subject: [PATCH 019/100] fix(langchain): Fix withConfig with universal chat model (#7085) --- .../chat_models/tests/universal.int.test.ts | 43 +++++++++++++++++++ langchain/src/chat_models/universal.ts | 1 + 2 files changed, 44 insertions(+) diff --git a/langchain/src/chat_models/tests/universal.int.test.ts b/langchain/src/chat_models/tests/universal.int.test.ts index 628b123bbcba..eaca76f3ee44 100644 --- a/langchain/src/chat_models/tests/universal.int.test.ts +++ b/langchain/src/chat_models/tests/universal.int.test.ts @@ -558,4 +558,47 @@ describe("Can call base runnable methods", () => { expect(batchResult[0].content).not.toBe(""); expect(batchResult[1].content).not.toBe(""); }); + + it("can call withConfig with tools", async () => { + const weatherTool = { + schema: z + .object({ + location: z + .string() + .describe("The city and state, e.g. San Francisco, CA"), + }) + .describe("Get the current weather in a given location"), + name: "GetWeather", + description: "Get the current weather in a given location", + }; + + const openaiModel = await initChatModel("gpt-4o-mini", { + temperature: 0, + apiKey: openAIApiKey, + }); + + const modelWithTools = openaiModel.bindTools([weatherTool], { + tool_choice: "GetWeather", + }); + expect(modelWithTools._queuedMethodOperations.bindTools).toBeDefined(); + expect(modelWithTools._queuedMethodOperations.bindTools[0][0].name).toBe( + "GetWeather" + ); + const modelWithConfig = modelWithTools.withConfig({ runName: "weather" }); + + expect(modelWithConfig.bound).toHaveProperty("_queuedMethodOperations"); + expect( + (modelWithConfig.bound as any)._queuedMethodOperations.bindTools + ).toBeDefined(); + expect( + (modelWithConfig.bound as any)._queuedMethodOperations.bindTools[0][0] + .name + ).toBe("GetWeather"); + + expect(modelWithConfig.config.runName).toBe("weather"); + + const result = await modelWithConfig.invoke("What's 8x8?"); + expect(result.tool_calls).toBeDefined(); + expect(result.tool_calls?.[0].name).toBe("GetWeather"); + }); }); diff --git a/langchain/src/chat_models/universal.ts b/langchain/src/chat_models/universal.ts index 2682f470cb6c..9488bf5bf4aa 100644 --- a/langchain/src/chat_models/universal.ts +++ b/langchain/src/chat_models/universal.ts @@ -386,6 +386,7 @@ class _ConfigurableModel< ? [...this._configurableFields] : this._configurableFields, configPrefix: this._configPrefix, + queuedMethodOperations: this._queuedMethodOperations, }); return new RunnableBinding({ From b62c4089af5e4e7ed067b2050449f7b55d50cc94 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 17:19:25 -0700 Subject: [PATCH 020/100] fix(google-genai): Type error parsing api res (#7086) --- libs/langchain-google-genai/src/utils/common.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-google-genai/src/utils/common.ts b/libs/langchain-google-genai/src/utils/common.ts index ed0b7a08755c..738bc62f479c 100644 --- a/libs/langchain-google-genai/src/utils/common.ts +++ b/libs/langchain-google-genai/src/utils/common.ts @@ -290,7 +290,7 @@ export function convertResponseContentToChatGenerationChunk( const functionCalls = response.functionCalls(); const [candidate] = response.candidates; const { content, ...generationInfo } = candidate; - const text = content?.parts[0]?.text ?? ""; + const text = content?.parts?.[0]?.text ?? ""; const toolCallChunks: ToolCallChunk[] = []; if (functionCalls) { From b5b19204cb54fe2c98d2e425630d22805c4724e9 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 17:21:47 -0700 Subject: [PATCH 021/100] feat(google-genai): Release 0.1.1 (#7087) --- libs/langchain-google-genai/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-google-genai/package.json b/libs/langchain-google-genai/package.json index b97f308d368f..a1fa32eb00ea 100644 --- a/libs/langchain-google-genai/package.json +++ b/libs/langchain-google-genai/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-genai", - "version": "0.1.0", + "version": "0.1.1", "description": "Google Generative AI integration for LangChain.js", "type": "module", "engines": { From afdad9d5674ee5e9b1ff6e6cb06e8caacb2f7e27 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 17:27:44 -0700 Subject: [PATCH 022/100] Release 0.3.5 (#7088) --- langchain/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain/package.json b/langchain/package.json index aaa82718d396..8f320338a4d2 100644 --- a/langchain/package.json +++ b/langchain/package.json @@ -1,6 +1,6 @@ { "name": "langchain", - "version": "0.3.4", + "version": "0.3.5", "description": "Typescript bindings for langchain", "type": "module", "engines": { From 165f8ca9cf67790d048e5ef80498eb11e6262918 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 18:22:54 -0700 Subject: [PATCH 023/100] fix(google-genai): Add support for tool choice (#7089) --- .../langchain-google-genai/src/chat_models.ts | 78 ++++++++++++++----- .../src/tests/chat_models.int.test.ts | 29 +++++++ 2 files changed, 87 insertions(+), 20 deletions(-) diff --git a/libs/langchain-google-genai/src/chat_models.ts b/libs/langchain-google-genai/src/chat_models.ts index 2120b5c88b91..3211bd1724df 100644 --- a/libs/langchain-google-genai/src/chat_models.ts +++ b/libs/langchain-google-genai/src/chat_models.ts @@ -7,6 +7,9 @@ import { GenerateContentRequest, SafetySetting, Part as GenerativeAIPart, + Tool as GenerativeAITool, + ToolConfig, + FunctionCallingMode, } from "@google/generative-ai"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { @@ -60,6 +63,11 @@ export type BaseMessageExamplePair = { export interface GoogleGenerativeAIChatCallOptions extends BaseChatModelCallOptions { tools?: GoogleGenerativeAIToolType[]; + /** + * Allowed functions to call when the mode is "any". + * If empty, any one of the provided functions are called. + */ + allowedFunctionNames?: string[]; /** * Whether or not to include usage data, like token counts * in the streamed response chunks. @@ -680,34 +688,64 @@ export class ChatGoogleGenerativeAI invocationParams( options?: this["ParsedCallOptions"] ): Omit { - if (options?.tool_choice) { - throw new Error( - "'tool_choice' call option is not supported by ChatGoogleGenerativeAI." - ); - } - - const tools = options?.tools as - | GoogleGenerativeAIFunctionDeclarationsTool[] - | StructuredToolInterface[] - | undefined; + let genaiTools: GenerativeAITool[] | undefined; if ( - Array.isArray(tools) && - !tools.some( + Array.isArray(options?.tools) && + !options?.tools.some( // eslint-disable-next-line @typescript-eslint/no-explicit-any (t: any) => !("lc_namespace" in t) ) ) { // Tools are in StructuredToolInterface format. Convert to GenAI format - return { - tools: convertToGenerativeAITools( - options?.tools as StructuredToolInterface[] - ), - }; + genaiTools = convertToGenerativeAITools( + options?.tools as StructuredToolInterface[] + ); + } else { + genaiTools = options?.tools as GenerativeAITool[]; + } + + let toolConfig: ToolConfig | undefined; + if (genaiTools?.length && options?.tool_choice) { + if (["any", "auto", "none"].some((c) => c === options.tool_choice)) { + const modeMap: Record = { + any: FunctionCallingMode.ANY, + auto: FunctionCallingMode.AUTO, + none: FunctionCallingMode.NONE, + }; + + toolConfig = { + functionCallingConfig: { + mode: + modeMap[options.tool_choice as keyof typeof modeMap] ?? + "MODE_UNSPECIFIED", + allowedFunctionNames: options.allowedFunctionNames, + }, + }; + } else if (typeof options.tool_choice === "string") { + toolConfig = { + functionCallingConfig: { + mode: FunctionCallingMode.ANY, + allowedFunctionNames: [ + ...(options.allowedFunctionNames ?? []), + options.tool_choice, + ], + }, + }; + } + + if (!options.tool_choice && options.allowedFunctionNames) { + toolConfig = { + functionCallingConfig: { + mode: FunctionCallingMode.ANY, + allowedFunctionNames: options.allowedFunctionNames, + }, + }; + } } + return { - tools: options?.tools as - | GoogleGenerativeAIFunctionDeclarationsTool[] - | undefined, + tools: genaiTools, + toolConfig, }; } diff --git a/libs/langchain-google-genai/src/tests/chat_models.int.test.ts b/libs/langchain-google-genai/src/tests/chat_models.int.test.ts index 765240a73ffa..4a2328350d2d 100644 --- a/libs/langchain-google-genai/src/tests/chat_models.int.test.ts +++ b/libs/langchain-google-genai/src/tests/chat_models.int.test.ts @@ -538,3 +538,32 @@ test("Invoke with JSON mode", async () => { res.usage_metadata.input_tokens + res.usage_metadata.output_tokens ); }); + +test.only("Supports tool_choice", async () => { + const model = new ChatGoogleGenerativeAI({}); + const tools = [ + { + name: "get_weather", + description: "Get the weather", + schema: z.object({ + location: z.string(), + }), + }, + { + name: "calculator", + description: "Preform calculations", + schema: z.object({ + expression: z.string(), + }), + }, + ]; + + const modelWithTools = model.bindTools(tools, { + tool_choice: "calculator", + allowedFunctionNames: ["calculator"], + }); + const response = await modelWithTools.invoke( + "What is 27725327 times 283683? Also whats the weather in New York?" + ); + expect(response.tool_calls?.length).toBe(1); +}); From b777c2ffcf9851109d709d7240ba1b4c2fdd2eeb Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Sun, 27 Oct 2024 18:25:00 -0700 Subject: [PATCH 024/100] fix(google-genai): Release 0.1.2 (#7090) --- libs/langchain-google-genai/package.json | 2 +- libs/langchain-google-genai/src/tests/chat_models.int.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/langchain-google-genai/package.json b/libs/langchain-google-genai/package.json index a1fa32eb00ea..e8db02867ea2 100644 --- a/libs/langchain-google-genai/package.json +++ b/libs/langchain-google-genai/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-genai", - "version": "0.1.1", + "version": "0.1.2", "description": "Google Generative AI integration for LangChain.js", "type": "module", "engines": { diff --git a/libs/langchain-google-genai/src/tests/chat_models.int.test.ts b/libs/langchain-google-genai/src/tests/chat_models.int.test.ts index 4a2328350d2d..44ab24819786 100644 --- a/libs/langchain-google-genai/src/tests/chat_models.int.test.ts +++ b/libs/langchain-google-genai/src/tests/chat_models.int.test.ts @@ -539,7 +539,7 @@ test("Invoke with JSON mode", async () => { ); }); -test.only("Supports tool_choice", async () => { +test("Supports tool_choice", async () => { const model = new ChatGoogleGenerativeAI({}); const tools = [ { From c0d1a9ca6b373b7328222d393cc9eeb464972cd8 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 28 Oct 2024 09:58:45 -0700 Subject: [PATCH 025/100] docs: Add missing SerpAPI docs (#7079) --- .../docs/integrations/tools/serpapi.ipynb | 303 ++++++++++++++++++ 1 file changed, 303 insertions(+) create mode 100644 docs/core_docs/docs/integrations/tools/serpapi.ipynb diff --git a/docs/core_docs/docs/integrations/tools/serpapi.ipynb b/docs/core_docs/docs/integrations/tools/serpapi.ipynb new file mode 100644 index 000000000000..0172711a380e --- /dev/null +++ b/docs/core_docs/docs/integrations/tools/serpapi.ipynb @@ -0,0 +1,303 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "10238e62-3465-4973-9279-606cbb7ccf16", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: SerpAPI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "a6f91f20", + "metadata": {}, + "source": [ + "# SerpAPI\n", + "\n", + "[SerpAPI](https://serpapi.com/) allows you to integrate search engine results into your LLM apps\n", + "\n", + "This guide provides a quick overview for getting started with the SerpAPI [tool](/docs/integrations/tools/). For detailed documentation of all `SerpAPI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/_langchain_community.tools_serpapi.SerpAPI.html).\n", + "\n", + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/tools/serpapi/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [SerpAPI](https://api.js.langchain.com/classes/_langchain_community.tools_serpapi.SerpAPI.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "The integration lives in the `@langchain/community` package, which you can install as shown below:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Set up an API key [here](https://serpapi.com/) and set it as an environment variable named `SERPAPI_API_KEY`.\n", + "\n", + "```typescript\n", + "process.env.SERPAPI_API_KEY = \"YOUR_API_KEY\"\n", + "```\n", + "\n", + "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", + "\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "You can import and instantiate an instance of the `SerpAPI` tool like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64", + "metadata": {}, + "outputs": [], + "source": [ + "import { SerpAPI } from \"@langchain/community/tools/serpapi\";\n", + "\n", + "const tool = new SerpAPI();" + ] + }, + { + "cell_type": "markdown", + "id": "74147a1a", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)\n", + "\n", + "You can invoke the tool directly like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\"type\":\"weather_result\",\"temperature\":\"63\",\"unit\":\"Fahrenheit\",\"precipitation\":\"3%\",\"humidity\":\"91%\",\"wind\":\"5 mph\",\"location\":\"San Francisco, CA\",\"date\":\"Sunday 9:00 AM\",\"weather\":\"Mostly cloudy\"}\n" + ] + } + ], + "source": [ + "await tool.invoke({\n", + " input: \"what is the current weather in SF?\"\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "d6e73897", + "metadata": {}, + "source": [ + "### [Invoke with ToolCall](/docs/concepts/#invoke-with-toolcall)\n", + "\n", + "We can also invoke the tool with a model-generated `ToolCall`, in which case a `ToolMessage` will be returned:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "f90e33a7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ToolMessage {\n", + " \"content\": \"{\\\"type\\\":\\\"weather_result\\\",\\\"temperature\\\":\\\"63\\\",\\\"unit\\\":\\\"Fahrenheit\\\",\\\"precipitation\\\":\\\"3%\\\",\\\"humidity\\\":\\\"91%\\\",\\\"wind\\\":\\\"5 mph\\\",\\\"location\\\":\\\"San Francisco, CA\\\",\\\"date\\\":\\\"Sunday 9:00 AM\\\",\\\"weather\\\":\\\"Mostly cloudy\\\"}\",\n", + " \"name\": \"search\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"1\"\n", + "}\n" + ] + } + ], + "source": [ + "// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n", + "const modelGeneratedToolCall = {\n", + " args: {\n", + " input: \"what is the current weather in SF?\"\n", + " },\n", + " id: \"1\",\n", + " name: tool.name,\n", + " type: \"tool_call\",\n", + "}\n", + "\n", + "await tool.invoke(modelGeneratedToolCall)" + ] + }, + { + "cell_type": "markdown", + "id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "af3123ad-7a02-40e5-b58e-7d56e23e5830", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\"\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95", + "metadata": {}, + "outputs": [], + "source": [ + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", \"You are a helpful assistant.\"],\n", + " [\"placeholder\", \"{messages}\"],\n", + " ]\n", + ")\n", + "\n", + "const llmWithTools = llm.bindTools([tool]);\n", + "\n", + "const chain = prompt.pipe(llmWithTools);\n", + "\n", + "const toolChain = RunnableLambda.from(\n", + " async (userInput: string, config) => {\n", + " const humanMessage = new HumanMessage(userInput,);\n", + " const aiMsg = await chain.invoke({\n", + " messages: [new HumanMessage(userInput)],\n", + " }, config);\n", + " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n", + " return chain.invoke({\n", + " messages: [humanMessage, aiMsg, ...toolMsgs],\n", + " }, config);\n", + " }\n", + ");\n", + "\n", + "const toolChainResult = await toolChain.invoke(\"what is the current weather in sf?\");" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "9ac188a2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"tool_calls\": [],\n", + " \"content\": \"The current weather in San Francisco is mostly cloudy, with a temperature of 64°F. The humidity is at 90%, there is a 3% chance of precipitation, and the wind is blowing at 5 mph.\"\n", + "}\n" + ] + } + ], + "source": [ + "const { tool_calls, content } = toolChainResult;\n", + "\n", + "console.log(\"AIMessage\", JSON.stringify({\n", + " tool_calls,\n", + " content,\n", + "}, null, 2));" + ] + }, + { + "cell_type": "markdown", + "id": "573fb391", + "metadata": {}, + "source": [ + "## Agents\n", + "\n", + "For guides on how to use LangChain tools in agents, see the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) docs." + ] + }, + { + "cell_type": "markdown", + "id": "4ac8146c", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `SerpAPI` features and configurations head to the API reference: https://api.js.langchain.com/classes/_langchain_community.tools_serpapi.SerpAPI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 36e27f1d63b13109d98c9f7864ef30b3a4c778fa Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 28 Oct 2024 09:59:02 -0700 Subject: [PATCH 026/100] feat(core): Only set LangSmith blocking if callback backgrounding is false (#7093) --- langchain-core/src/tracers/tracer_langchain.ts | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/langchain-core/src/tracers/tracer_langchain.ts b/langchain-core/src/tracers/tracer_langchain.ts index 4c8edd8c12de..ad31c309d562 100644 --- a/langchain-core/src/tracers/tracer_langchain.ts +++ b/langchain-core/src/tracers/tracer_langchain.ts @@ -59,12 +59,14 @@ export class LangChainTracer getEnvironmentVariable("LANGCHAIN_PROJECT") ?? getEnvironmentVariable("LANGCHAIN_SESSION"); this.exampleId = exampleId; - this.client = - client ?? - new Client({ - // LangChain has its own backgrounding system - blockOnRootRunFinalization: true, - }); + const clientParams = + getEnvironmentVariable("LANGCHAIN_CALLBACKS_BACKGROUND") === "false" + ? { + // LangSmith has its own backgrounding system + blockOnRootRunFinalization: true, + } + : {}; + this.client = client ?? new Client(clientParams); const traceableTree = LangChainTracer.getTraceableRunTree(); if (traceableTree) { From a68361bb5602b37829ec47b9a6f771a3d3fd6e52 Mon Sep 17 00:00:00 2001 From: Manuel <58395553+cr4yfish@users.noreply.github.com> Date: Tue, 29 Oct 2024 20:44:42 +0100 Subject: [PATCH 027/100] feat(ollama): Add support for optional headers (#7052) Co-authored-by: jacoblee93 --- libs/langchain-ollama/package.json | 2 +- libs/langchain-ollama/src/chat_models.ts | 5 +++++ libs/langchain-ollama/src/embeddings.ts | 8 +++++++- libs/langchain-ollama/src/llms.ts | 7 +++++++ yarn.lock | 10 +++++----- 5 files changed, 25 insertions(+), 7 deletions(-) diff --git a/libs/langchain-ollama/package.json b/libs/langchain-ollama/package.json index 5df996b61191..52ba69a2e33f 100644 --- a/libs/langchain-ollama/package.json +++ b/libs/langchain-ollama/package.json @@ -32,7 +32,7 @@ "author": "LangChain", "license": "MIT", "dependencies": { - "ollama": "^0.5.6", + "ollama": "^0.5.9", "uuid": "^10.0.0" }, "peerDependencies": { diff --git a/libs/langchain-ollama/src/chat_models.ts b/libs/langchain-ollama/src/chat_models.ts index f016f80d3b16..4bb1c7a1a4e7 100644 --- a/libs/langchain-ollama/src/chat_models.ts +++ b/libs/langchain-ollama/src/chat_models.ts @@ -70,6 +70,10 @@ export interface ChatOllamaInput * @default "http://127.0.0.1:11434" */ baseUrl?: string; + /** + * Optional HTTP Headers to include in the request. + */ + headers?: Headers; /** * Whether or not to check the model exists on the local machine before * invoking it. If set to `true`, the model will be pulled if it does not @@ -464,6 +468,7 @@ export class ChatOllama this.client = new Ollama({ host: fields?.baseUrl, + headers: fields?.headers, }); this.baseUrl = fields?.baseUrl ?? this.baseUrl; diff --git a/libs/langchain-ollama/src/embeddings.ts b/libs/langchain-ollama/src/embeddings.ts index afdba456c108..5e878322a6d9 100644 --- a/libs/langchain-ollama/src/embeddings.ts +++ b/libs/langchain-ollama/src/embeddings.ts @@ -32,9 +32,14 @@ interface OllamaEmbeddingsParams extends EmbeddingsParams { */ truncate?: boolean; + /** + * Optional HTTP Headers to include in the request. + */ + headers?: Headers; + /** * Advanced Ollama API request parameters in camelCase, see - * https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values + * https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values * for details of the available parameters. */ requestOptions?: OllamaCamelCaseOptions; @@ -58,6 +63,7 @@ export class OllamaEmbeddings extends Embeddings { this.client = new Ollama({ host: fields?.baseUrl, + headers: fields?.headers, }); this.baseUrl = fields?.baseUrl ?? this.baseUrl; diff --git a/libs/langchain-ollama/src/llms.ts b/libs/langchain-ollama/src/llms.ts index bbcd9edf8bc8..b560bc4682ff 100644 --- a/libs/langchain-ollama/src/llms.ts +++ b/libs/langchain-ollama/src/llms.ts @@ -23,7 +23,13 @@ export interface OllamaInput extends BaseLLMParams, OllamaCamelCaseOptions { * @default "http://localhost:11434" */ baseUrl?: string; + format?: string; + + /** + * Optional HTTP Headers to include in the request. + */ + headers?: Headers; } /** @@ -130,6 +136,7 @@ export class Ollama extends LLM implements OllamaInput { : fields?.baseUrl ?? this.baseUrl; this.client = new OllamaClient({ host: this.baseUrl, + headers: fields?.headers, }); this.keepAlive = fields?.keepAlive ?? this.keepAlive; diff --git a/yarn.lock b/yarn.lock index e8e3b962c8ff..8c11df3598a7 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12509,7 +12509,7 @@ __metadata: eslint-plugin-prettier: ^4.2.1 jest: ^29.5.0 jest-environment-node: ^29.6.4 - ollama: ^0.5.6 + ollama: ^0.5.9 prettier: ^2.8.3 release-it: ^17.6.0 rollup: ^4.5.2 @@ -35517,12 +35517,12 @@ __metadata: languageName: node linkType: hard -"ollama@npm:^0.5.6": - version: 0.5.6 - resolution: "ollama@npm:0.5.6" +"ollama@npm:^0.5.9": + version: 0.5.9 + resolution: "ollama@npm:0.5.9" dependencies: whatwg-fetch: ^3.6.20 - checksum: f7aafe4f0cf5e3fee9f5be7501733d3ab4ea0b02e0aafacdae90cb5a8babfa4bb4543d47fab152b5424084d3331185a09e584a5d3c74e2cefcf017dc5964f520 + checksum: bfaadcec6273d86fcc7c94e5e9e571a7b6b84b852b407a473f3bac7dc69b7b11815a163ae549b5318267a00f192d39696225309812319d2edc8a98a079ace475 languageName: node linkType: hard From 5b2f12b19d130a7739d56413fb67d7265060526b Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 29 Oct 2024 12:47:12 -0700 Subject: [PATCH 028/100] chore(ollama): Release 0.1.1 (#7104) --- libs/langchain-ollama/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-ollama/package.json b/libs/langchain-ollama/package.json index 52ba69a2e33f..2b5fc852e605 100644 --- a/libs/langchain-ollama/package.json +++ b/libs/langchain-ollama/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/ollama", - "version": "0.1.0", + "version": "0.1.1", "description": "Ollama integration for LangChain.js", "type": "module", "engines": { From 7142a7b788e0ae814d5733060b9867904ad8d29f Mon Sep 17 00:00:00 2001 From: crisjy Date: Wed, 30 Oct 2024 04:27:47 +0800 Subject: [PATCH 029/100] docs: Add Partner Package for Azure CosmosDB to list (#7092) Co-authored-by: Cris <617072224@qq.com> --- docs/core_docs/docs/integrations/platforms/index.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/core_docs/docs/integrations/platforms/index.mdx b/docs/core_docs/docs/integrations/platforms/index.mdx index f44f4d666c8b..d4b237086040 100644 --- a/docs/core_docs/docs/integrations/platforms/index.mdx +++ b/docs/core_docs/docs/integrations/platforms/index.mdx @@ -28,3 +28,4 @@ These providers have standalone `@langchain/{provider}` packages for improved ve - [Redis](https://www.npmjs.com/package/@langchain/redis) - [Weaviate](https://www.npmjs.com/package/@langchain/weaviate) - [Yandex](https://www.npmjs.com/package/@langchain/yandex) +- [Azure CosmosDB](https://www.npmjs.com/package/@langchain/azure-cosmosdb) From c5d1503378e69b6bcbf8d8280d5dee77244f8f77 Mon Sep 17 00:00:00 2001 From: Pavlo Sobchuk Date: Tue, 29 Oct 2024 20:33:38 +0000 Subject: [PATCH 030/100] docs(core): Add docstring examples for Runnables (#7066) Co-authored-by: jacoblee93 --- langchain-core/src/runnables/base.ts | 250 ++++++++++++++++++++++++- langchain-core/src/runnables/router.ts | 23 +++ 2 files changed, 272 insertions(+), 1 deletion(-) diff --git a/langchain-core/src/runnables/base.ts b/langchain-core/src/runnables/base.ts index 281c95b4a881..1cab402513ec 100644 --- a/langchain-core/src/runnables/base.ts +++ b/langchain-core/src/runnables/base.ts @@ -1181,6 +1181,43 @@ export type RunnableBindingArgs< /** * A runnable that delegates calls to another runnable with a set of kwargs. + * @example + * ```typescript + * import { + * type RunnableConfig, + * RunnableLambda, + * } from "@langchain/core/runnables"; + * + * const enhanceProfile = ( + * profile: Record, + * config?: RunnableConfig + * ) => { + * if (config?.configurable?.role) { + * return { ...profile, role: config.configurable.role }; + * } + * return profile; + * }; + * + * const runnable = RunnableLambda.from(enhanceProfile); + * + * // Bind configuration to the runnable to set the user's role dynamically + * const adminRunnable = runnable.bind({ configurable: { role: "Admin" } }); + * const userRunnable = runnable.bind({ configurable: { role: "User" } }); + * + * const result1 = await adminRunnable.invoke({ + * name: "Alice", + * email: "alice@example.com" + * }); + * + * // { name: "Alice", email: "alice@example.com", role: "Admin" } + * + * const result2 = await userRunnable.invoke({ + * name: "Bob", + * email: "bob@example.com" + * }); + * + * // { name: "Bob", email: "bob@example.com", role: "User" } + * ``` */ export class RunnableBinding< RunInput, @@ -1432,6 +1469,24 @@ export class RunnableBinding< /** * A runnable that delegates calls to another runnable * with each element of the input sequence. + * @example + * ```typescript + * import { RunnableEach, RunnableLambda } from "@langchain/core/runnables"; + * + * const toUpperCase = (input: string): string => input.toUpperCase(); + * const addGreeting = (input: string): string => `Hello, ${input}!`; + * + * const upperCaseLambda = RunnableLambda.from(toUpperCase); + * const greetingLambda = RunnableLambda.from(addGreeting); + * + * const chain = new RunnableEach({ + * bound: upperCaseLambda.pipe(greetingLambda), + * }); + * + * const result = await chain.invoke(["alice", "bob", "carol"]) + * + * // ["Hello, ALICE!", "Hello, BOB!", "Hello, CAROL!"] + * ``` */ export class RunnableEach< RunInputItem, @@ -1526,6 +1581,45 @@ export class RunnableEach< /** * Base class for runnables that can be retried a * specified number of times. + * @example + * ```typescript + * import { + * RunnableLambda, + * RunnableRetry, + * } from "@langchain/core/runnables"; + * + * // Simulate an API call that fails + * const simulateApiCall = (input: string): string => { + * console.log(`Attempting API call with input: ${input}`); + * throw new Error("API call failed due to network issue"); + * }; + * + * const apiCallLambda = RunnableLambda.from(simulateApiCall); + * + * // Apply retry logic using the .withRetry() method + * const apiCallWithRetry = apiCallLambda.withRetry({ stopAfterAttempt: 3 }); + * + * // Alternatively, create a RunnableRetry instance manually + * const manualRetry = new RunnableRetry({ + * bound: apiCallLambda, + * maxAttemptNumber: 3, + * config: {}, + * }); + * + * // Example invocation using the .withRetry() method + * const res = await apiCallWithRetry + * .invoke("Request 1") + * .catch((error) => { + * console.error("Failed after multiple retries:", error.message); + * }); + * + * // Example invocation using the manual retry instance + * const res2 = await manualRetry + * .invoke("Request 2") + * .catch((error) => { + * console.error("Failed after multiple retries:", error.message); + * }); + * ``` */ export class RunnableRetry< // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -2302,7 +2396,30 @@ function assertNonTraceableFunction< } /** - * A runnable that runs a callable. + * A runnable that wraps an arbitrary function that takes a single argument. + * @example + * ```typescript + * import { RunnableLambda } from "@langchain/core/runnables"; + * + * const add = (input: { x: number; y: number }) => input.x + input.y; + * + * const multiply = (input: { value: number; multiplier: number }) => + * input.value * input.multiplier; + * + * // Create runnables for the functions + * const addLambda = RunnableLambda.from(add); + * const multiplyLambda = RunnableLambda.from(multiply); + * + * // Chain the lambdas for a mathematical operation + * const chainedLambda = addLambda.pipe((result) => + * multiplyLambda.invoke({ value: result, multiplier: 2 }) + * ); + * + * // Example invocation of the chainedLambda + * const result = await chainedLambda.invoke({ x: 2, y: 3 }); + * + * // Will log "10" (since (2 + 3) * 2 = 10) + * ``` */ export class RunnableLambda< RunInput, @@ -2579,6 +2696,39 @@ export class RunnableLambda< } } +/** + * A runnable that runs a mapping of runnables in parallel, + * and returns a mapping of their outputs. + * @example + * ```typescript + * import { + * RunnableLambda, + * RunnableParallel, + * } from "@langchain/core/runnables"; + * + * const addYears = (age: number): number => age + 5; + * const yearsToFifty = (age: number): number => 50 - age; + * const yearsToHundred = (age: number): number => 100 - age; + * + * const addYearsLambda = RunnableLambda.from(addYears); + * const milestoneFiftyLambda = RunnableLambda.from(yearsToFifty); + * const milestoneHundredLambda = RunnableLambda.from(yearsToHundred); + * + * // Pipe will coerce objects into RunnableParallel by default, but we + * // explicitly instantiate one here to demonstrate + * const sequence = addYearsLambda.pipe( + * RunnableParallel.from({ + * years_to_fifty: milestoneFiftyLambda, + * years_to_hundred: milestoneHundredLambda, + * }) + * ); + * + * // Invoke the sequence with a single age input + * const res = sequence.invoke(25); + * + * // { years_to_fifty: 25, years_to_hundred: 75 } + * ``` + */ export class RunnableParallel extends RunnableMap {} /** @@ -2599,6 +2749,55 @@ export class RunnableParallel extends RunnableMap {} * When streaming, fallbacks will only be called on failures during the initial * stream creation. Errors that occur after a stream starts will not fallback * to the next Runnable. + * + * @example + * ```typescript + * import { + * RunnableLambda, + * RunnableWithFallbacks, + * } from "@langchain/core/runnables"; + * + * const primaryOperation = (input: string): string => { + * if (input !== "safe") { + * throw new Error("Primary operation failed due to unsafe input"); + * } + * return `Processed: ${input}`; + * }; + * + * // Define a fallback operation that processes the input differently + * const fallbackOperation = (input: string): string => + * `Fallback processed: ${input}`; + * + * const primaryRunnable = RunnableLambda.from(primaryOperation); + * const fallbackRunnable = RunnableLambda.from(fallbackOperation); + * + * // Apply the fallback logic using the .withFallbacks() method + * const runnableWithFallback = primaryRunnable.withFallbacks([fallbackRunnable]); + * + * // Alternatively, create a RunnableWithFallbacks instance manually + * const manualFallbackChain = new RunnableWithFallbacks({ + * runnable: primaryRunnable, + * fallbacks: [fallbackRunnable], + * }); + * + * // Example invocation using .withFallbacks() + * const res = await runnableWithFallback + * .invoke("unsafe input") + * .catch((error) => { + * console.error("Failed after all attempts:", error.message); + * }); + * + * // "Fallback processed: unsafe input" + * + * // Example invocation using manual instantiation + * const res = await manualFallbackChain + * .invoke("safe") + * .catch((error) => { + * console.error("Failed after all attempts:", error.message); + * }); + * + * // "Processed: safe" + * ``` */ export class RunnableWithFallbacks extends Runnable< RunInput, @@ -2849,6 +3048,34 @@ export interface RunnableAssignFields { /** * A runnable that assigns key-value pairs to inputs of type `Record`. + * @example + * ```typescript + * import { + * RunnableAssign, + * RunnableLambda, + * RunnableParallel, + * } from "@langchain/core/runnables"; + * + * const calculateAge = (x: { birthYear: number }): { age: number } => { + * const currentYear = new Date().getFullYear(); + * return { age: currentYear - x.birthYear }; + * }; + * + * const createGreeting = (x: { name: string }): { greeting: string } => { + * return { greeting: `Hello, ${x.name}!` }; + * }; + * + * const mapper = RunnableParallel.from({ + * age_step: RunnableLambda.from(calculateAge), + * greeting_step: RunnableLambda.from(createGreeting), + * }); + * + * const runnableAssign = new RunnableAssign({ mapper }); + * + * const res = await runnableAssign.invoke({ name: "Alice", birthYear: 1990 }); + * + * // { name: "Alice", birthYear: 1990, age_step: { age: 34 }, greeting_step: { greeting: "Hello, Alice!" } } + * ``` */ export class RunnableAssign< // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -2963,6 +3190,27 @@ export interface RunnablePickFields { /** * A runnable that assigns key-value pairs to inputs of type `Record`. + * Useful for streaming, can be automatically created and chained by calling `runnable.pick();`. + * @example + * ```typescript + * import { RunnablePick } from "@langchain/core/runnables"; + * + * const inputData = { + * name: "John", + * age: 30, + * city: "New York", + * country: "USA", + * email: "john.doe@example.com", + * phone: "+1234567890", + * }; + * + * const basicInfoRunnable = new RunnablePick(["name", "city"]); + * + * // Example invocation + * const res = await basicInfoRunnable.invoke(inputData); + * + * // { name: 'John', city: 'New York' } + * ``` */ export class RunnablePick< // eslint-disable-next-line @typescript-eslint/no-explicit-any diff --git a/langchain-core/src/runnables/router.ts b/langchain-core/src/runnables/router.ts index 5404e993e3b8..7a68adce26f4 100644 --- a/langchain-core/src/runnables/router.ts +++ b/langchain-core/src/runnables/router.ts @@ -11,6 +11,29 @@ export type RouterInput = { /** * A runnable that routes to a set of runnables based on Input['key']. * Returns the output of the selected runnable. + * @example + * ```typescript + * import { RouterRunnable, RunnableLambda } from "@langchain/core/runnables"; + * + * const router = new RouterRunnable({ + * runnables: { + * toUpperCase: RunnableLambda.from((text: string) => text.toUpperCase()), + * reverseText: RunnableLambda.from((text: string) => + * text.split("").reverse().join("") + * ), + * }, + * }); + * + * // Invoke the 'reverseText' runnable + * const result1 = router.invoke({ key: "reverseText", input: "Hello World" }); + * + * // "dlroW olleH" + * + * // Invoke the 'toUpperCase' runnable + * const result2 = router.invoke({ key: "toUpperCase", input: "Hello World" }); + * + * // "HELLO WORLD" + * ``` */ export class RouterRunnable< RunInput extends RouterInput, From 8378a57a4e6eb39acc6a79f39430f33ec270c059 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 13:35:08 -0700 Subject: [PATCH 031/100] chore(deps): bump langchain from 0.2.10 to 0.3.0 in /libs/langchain-community/src/vectorstores/tests/faiss.int.test.data in the pip group across 1 directory (#7105) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../src/vectorstores/tests/faiss.int.test.data/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/src/vectorstores/tests/faiss.int.test.data/requirements.txt b/libs/langchain-community/src/vectorstores/tests/faiss.int.test.data/requirements.txt index be1f7b791532..48c48d373c6d 100644 --- a/libs/langchain-community/src/vectorstores/tests/faiss.int.test.data/requirements.txt +++ b/libs/langchain-community/src/vectorstores/tests/faiss.int.test.data/requirements.txt @@ -1,2 +1,2 @@ -langchain==0.2.10 +langchain==0.3.0 langchain-community==0.2.9 \ No newline at end of file From e53531a2e5f723d63683b4d5a42c2ee8fba57dd8 Mon Sep 17 00:00:00 2001 From: Christopher Dierkens Date: Tue, 29 Oct 2024 16:38:42 -0400 Subject: [PATCH 032/100] feat(community): Add delete and allow default row id in libsql (#7053) Co-authored-by: jacoblee93 --- .../src/chat_models/ibm.ts | 2 + .../src/chat_models/tests/ibm.test.ts | 1 + .../src/embeddings/tests/ibm.test.ts | 1 + .../src/llms/tests/ibm.test.ts | 1 + libs/langchain-community/src/utils/ibm.ts | 1 + .../src/vectorstores/libsql.ts | 65 ++-- .../src/vectorstores/tests/libsql.int.test.ts | 296 +++++++++++++++++- 7 files changed, 341 insertions(+), 26 deletions(-) diff --git a/libs/langchain-community/src/chat_models/ibm.ts b/libs/langchain-community/src/chat_models/ibm.ts index d0fae3ac15ce..dd468909a886 100644 --- a/libs/langchain-community/src/chat_models/ibm.ts +++ b/libs/langchain-community/src/chat_models/ibm.ts @@ -1,3 +1,5 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ + import { AIMessage, AIMessageChunk, diff --git a/libs/langchain-community/src/chat_models/tests/ibm.test.ts b/libs/langchain-community/src/chat_models/tests/ibm.test.ts index 3fea7de8504b..8e04c1c26c6b 100644 --- a/libs/langchain-community/src/chat_models/tests/ibm.test.ts +++ b/libs/langchain-community/src/chat_models/tests/ibm.test.ts @@ -1,4 +1,5 @@ /* eslint-disable no-process-env */ +/* eslint-disable @typescript-eslint/no-explicit-any */ import WatsonxAiMlVml_v1 from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; import { ChatWatsonx, ChatWatsonxInput, WatsonxCallParams } from "../ibm.js"; import { authenticateAndSetInstance } from "../../utils/ibm.js"; diff --git a/libs/langchain-community/src/embeddings/tests/ibm.test.ts b/libs/langchain-community/src/embeddings/tests/ibm.test.ts index affa8491807f..05f033f6f1af 100644 --- a/libs/langchain-community/src/embeddings/tests/ibm.test.ts +++ b/libs/langchain-community/src/embeddings/tests/ibm.test.ts @@ -1,4 +1,5 @@ /* eslint-disable no-process-env */ +/* eslint-disable @typescript-eslint/no-explicit-any */ import { testProperties } from "../../llms/tests/ibm.test.js"; import { WatsonxEmbeddings } from "../ibm.js"; diff --git a/libs/langchain-community/src/llms/tests/ibm.test.ts b/libs/langchain-community/src/llms/tests/ibm.test.ts index e0d6f3e4b521..7dfaecd6361c 100644 --- a/libs/langchain-community/src/llms/tests/ibm.test.ts +++ b/libs/langchain-community/src/llms/tests/ibm.test.ts @@ -1,4 +1,5 @@ /* eslint-disable no-process-env */ +/* eslint-disable @typescript-eslint/no-explicit-any */ import WatsonxAiMlVml_v1 from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; import { WatsonxLLM, WatsonxInputLLM } from "../ibm.js"; import { authenticateAndSetInstance } from "../../utils/ibm.js"; diff --git a/libs/langchain-community/src/utils/ibm.ts b/libs/langchain-community/src/utils/ibm.ts index acbb86f1a304..ccbe1204ef60 100644 --- a/libs/langchain-community/src/utils/ibm.ts +++ b/libs/langchain-community/src/utils/ibm.ts @@ -1,3 +1,4 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ import { WatsonXAI } from "@ibm-cloud/watsonx-ai"; import { IamAuthenticator, diff --git a/libs/langchain-community/src/vectorstores/libsql.ts b/libs/langchain-community/src/vectorstores/libsql.ts index 05c77da7489c..dfdaeaca167b 100644 --- a/libs/langchain-community/src/vectorstores/libsql.ts +++ b/libs/langchain-community/src/vectorstores/libsql.ts @@ -1,7 +1,7 @@ -import type { Client } from "@libsql/client"; -import { VectorStore } from "@langchain/core/vectorstores"; -import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { Document } from "@langchain/core/documents"; +import type { EmbeddingsInterface } from "@langchain/core/embeddings"; +import { VectorStore } from "@langchain/core/vectorstores"; +import type { Client, InStatement } from "@libsql/client"; /** * Interface for LibSQLVectorStore configuration options. @@ -82,23 +82,17 @@ export class LibSQLVectorStore extends VectorStore { for (let i = 0; i < rows.length; i += batchSize) { const chunk = rows.slice(i, i + batchSize); - const insertQueries = chunk.map((row) => ({ - sql: `INSERT INTO ${this.table} (content, metadata, ${this.column}) VALUES (?, ?, ?) RETURNING id`, - args: [row.content, row.metadata, row.embedding], + + const insertQueries: InStatement[] = chunk.map((row) => ({ + sql: `INSERT INTO ${this.table} (content, metadata, ${this.column}) VALUES (:content, :metadata, vector(:embedding)) RETURNING ${this.table}.rowid AS id`, + args: row, })); const results = await this.db.batch(insertQueries); - for (const result of results) { - if ( - result && - result.rows && - result.rows.length > 0 && - result.rows[0].id != null - ) { - ids.push(result.rows[0].id.toString()); - } - } + ids.push( + ...results.flatMap((result) => result.rows.map((row) => String(row.id))) + ); } return ids; @@ -123,11 +117,12 @@ export class LibSQLVectorStore extends VectorStore { const queryVector = `[${query.join(",")}]`; - const sql = ` - SELECT ${this.table}.id, ${this.table}.content, ${this.table}.metadata, vector_distance_cos(${this.table}.${this.column}, vector('${queryVector}')) AS distance - FROM vector_top_k('idx_${this.table}_${this.column}', vector('${queryVector}'), ${k}) AS top_k - JOIN ${this.table} ON top_k.rowid = ${this.table}.id - `; + const sql: InStatement = { + sql: `SELECT ${this.table}.rowid as id, ${this.table}.content, ${this.table}.metadata, vector_distance_cos(${this.table}.${this.column}, vector(:queryVector)) AS distance + FROM vector_top_k('idx_${this.table}_${this.column}', vector(:queryVector), CAST(:k AS INTEGER)) as top_k + JOIN ${this.table} ON top_k.rowid = ${this.table}.rowid`, + args: { queryVector, k }, + }; const results = await this.db.execute(sql); @@ -136,7 +131,7 @@ export class LibSQLVectorStore extends VectorStore { const metadata = JSON.parse(row.metadata); const doc = new Document({ - id: row.id, + id: String(row.id), metadata, pageContent: row.content, }); @@ -145,6 +140,32 @@ export class LibSQLVectorStore extends VectorStore { }); } + /** + * Deletes vectors from the store. + * @param {Object} params - Delete parameters. + * @param {string[] | number[]} [params.ids] - The ids of the vectors to delete. + * @returns {Promise} + */ + async delete(params: { + ids?: string[] | number[]; + deleteAll?: boolean; + }): Promise { + if (params.deleteAll) { + await this.db.execute(`DELETE FROM ${this.table}`); + } else if (params.ids !== undefined) { + await this.db.batch( + params.ids.map((id) => ({ + sql: `DELETE FROM ${this.table} WHERE rowid = :id`, + args: { id }, + })) + ); + } else { + throw new Error( + `You must provide an "ids" parameter or a "deleteAll" parameter.` + ); + } + } + /** * Creates a new LibSQLVectorStore instance from texts. * @param {string[]} texts - The texts to add to the store. diff --git a/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts b/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts index 63fe6cbe2df7..5dbec055afff 100644 --- a/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts @@ -1,13 +1,14 @@ /* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { expect, test } from "@jest/globals"; -import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; +import { OpenAIEmbeddings } from "@langchain/openai"; import { createClient } from "@libsql/client"; +import { SyntheticEmbeddings } from "@langchain/core/utils/testing"; +import fs from "node:fs"; +import { LibSQLVectorStore, LibSQLVectorStoreArgs } from "../libsql.js"; -import { LibSQLVectorStore } from "../libsql.js"; - -test("can create and query", async () => { +test("can create and query (cloud)", async () => { const client = createClient({ url: process.env.LIBSQL_URL!, authToken: process.env.LIBSQL_AUTH_TOKEN, @@ -43,3 +44,290 @@ test("can create and query", async () => { const results = await vectorStore.similaritySearchWithScore("added first", 4); expect(results.length).toBe(4); }); + +describe("LibSQLVectorStore (local)", () => { + const client = createClient({ + url: "file:store.db", + }); + + const config: LibSQLVectorStoreArgs = { + db: client, + }; + + const embeddings = new SyntheticEmbeddings({ + vectorSize: 1024, + }); + + afterAll(async () => { + await client.close(); + if (fs.existsSync("store.db")) { + fs.unlinkSync("store.db"); + } + }); + + test("a document with content can be added", async () => { + await client.batch([ + `DROP TABLE IF EXISTS vectors;`, + `CREATE TABLE IF NOT EXISTS vectors ( + content TEXT, + metadata JSON, + embedding F32_BLOB(1024) + );`, + `CREATE INDEX IF NOT EXISTS idx_vectors_embedding + ON vectors (libsql_vector_idx(embedding));`, + ]); + + const store = new LibSQLVectorStore(embeddings, config); + + const ids = await store.addDocuments([ + { + pageContent: "hello", + metadata: { a: 1 }, + }, + ]); + + expect(ids).toHaveLength(1); + + const [id] = ids; + + expect(typeof id).toBe("string"); + + const resultSet = await client.execute(`SELECT * FROM vectors`); + + expect(resultSet.rows).toHaveLength(1); + + const [row] = resultSet.rows; + + expect(row.content).toBe("hello"); + expect(JSON.parse(row.metadata as string)).toEqual({ a: 1 }); + }); + + test("a document with spaces in the content can be added", async () => { + await client.batch([ + `DROP TABLE IF EXISTS vectors;`, + `CREATE TABLE IF NOT EXISTS vectors ( + content TEXT, + metadata JSON, + embedding F32_BLOB(1024) + );`, + `CREATE INDEX IF NOT EXISTS idx_vectors_embedding + ON vectors (libsql_vector_idx(embedding));`, + ]); + + const store = new LibSQLVectorStore(embeddings, config); + + const ids = await store.addDocuments([ + { + pageContent: "hello world", + metadata: { a: 1 }, + }, + ]); + + expect(ids).toHaveLength(1); + + const [id] = ids; + + expect(typeof id).toBe("string"); + + const resultSet = await client.execute(`SELECT * FROM vectors`); + + expect(resultSet.rows).toHaveLength(1); + + const [row] = resultSet.rows; + + expect(row.content).toBe("hello world"); + expect(JSON.parse(row.metadata as string)).toEqual({ a: 1 }); + }); + + test("a similarity search can be performed", async () => { + await client.batch([ + `DROP TABLE IF EXISTS vectors;`, + `CREATE TABLE IF NOT EXISTS vectors ( + content TEXT, + metadata JSON, + embedding F32_BLOB(1024) + );`, + `CREATE INDEX IF NOT EXISTS idx_vectors_embedding + ON vectors (libsql_vector_idx(embedding));`, + ]); + + const store = new LibSQLVectorStore(embeddings, config); + + const ids = await store.addDocuments([ + { + pageContent: "the quick brown fox", + metadata: { a: 1 }, + }, + { + pageContent: "jumped over the lazy dog", + metadata: { a: 2 }, + }, + { + pageContent: "hello world", + metadata: { a: 3 }, + }, + ]); + + expect(ids).toHaveLength(3); + expect(ids.every((id) => typeof id === "string")).toBe(true); + + const results1 = await store.similaritySearch("the quick brown dog", 2); + + expect(results1).toHaveLength(2); + expect( + results1.map((result) => result.id).every((id) => typeof id === "string") + ).toBe(true); + + const results2 = await store.similaritySearch("hello"); + + expect(results2).toHaveLength(3); + expect( + results2.map((result) => result.id).every((id) => typeof id === "string") + ).toBe(true); + }); + + test("a document can be deleted by id", async () => { + await client.batch([ + `DROP TABLE IF EXISTS vectors;`, + `CREATE TABLE IF NOT EXISTS vectors ( + content TEXT, + metadata JSON, + embedding F32_BLOB(1024) + );`, + `CREATE INDEX IF NOT EXISTS idx_vectors_embedding + ON vectors (libsql_vector_idx(embedding));`, + ]); + + const store = new LibSQLVectorStore(embeddings, config); + + const ids = await store.addDocuments([ + { + pageContent: "the quick brown fox", + metadata: { a: 1 }, + }, + { + pageContent: "jumped over the lazy dog", + metadata: { a: 2 }, + }, + { + pageContent: "hello world", + metadata: { a: 3 }, + }, + ]); + + expect(ids).toHaveLength(3); + expect(ids.every((id) => typeof id === "string")).toBe(true); + + const [id1, id2] = ids; + + await store.delete({ ids: [id1, id2] }); + + const resultSet = await client.execute(`SELECT * FROM vectors`); + + expect(resultSet.rows).toHaveLength(1); + + const [row] = resultSet.rows; + + expect(row.content).toBe("hello world"); + expect(JSON.parse(row.metadata as string)).toEqual({ a: 3 }); + }); + + test("all documents can be deleted", async () => { + await client.batch([ + `DROP TABLE IF EXISTS vectors;`, + `CREATE TABLE IF NOT EXISTS vectors ( + content TEXT, + metadata JSON, + embedding F32_BLOB(1024) + );`, + `CREATE INDEX IF NOT EXISTS idx_vectors_embedding + ON vectors (libsql_vector_idx(embedding));`, + ]); + + const store = new LibSQLVectorStore(embeddings, config); + + const ids = await store.addDocuments([ + { + pageContent: "the quick brown fox", + metadata: { a: 1 }, + }, + { + pageContent: "jumped over the lazy dog", + metadata: { a: 2 }, + }, + { + pageContent: "hello world", + metadata: { a: 3 }, + }, + ]); + + expect(ids).toHaveLength(3); + expect(ids.every((id) => typeof id === "string")).toBe(true); + + await store.delete({ + deleteAll: true, + }); + + const resultSet = await client.execute(`SELECT * FROM vectors`); + + expect(resultSet.rows).toHaveLength(0); + }); + + test("the table can have a custom id column name", async () => { + await client.batch([ + `DROP TABLE IF EXISTS vectors;`, + `CREATE TABLE IF NOT EXISTS vectors ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + content TEXT, + metadata JSON, + embedding F32_BLOB(1024) + );`, + `CREATE INDEX IF NOT EXISTS idx_vectors_embedding + ON vectors (libsql_vector_idx(embedding));`, + ]); + + const store = new LibSQLVectorStore(embeddings, config); + + const ids = await store.addDocuments([ + { + pageContent: "the quick brown fox", + metadata: { a: 1 }, + }, + { + pageContent: "jumped over the lazy dog", + metadata: { a: 2 }, + }, + { + pageContent: "hello world", + metadata: { a: 3 }, + }, + ]); + + expect(ids).toHaveLength(3); + expect(ids).toEqual(["1", "2", "3"]); + + const results = await store.similaritySearch("the quick brown dog", 2); + + expect(results).toHaveLength(2); + expect(results.map((result) => result.pageContent)).toEqual([ + "the quick brown fox", + "jumped over the lazy dog", + ]); + expect( + results.map((result) => result.id).every((id) => typeof id === "string") + ).toBe(true); + + const [id1, id2] = ids; + + await store.delete({ ids: [id1, id2] }); + + const resultSet = await client.execute(`SELECT * FROM vectors`); + + expect(resultSet.rows).toHaveLength(1); + + const [row] = resultSet.rows; + + expect(row.content).toBe("hello world"); + expect(JSON.parse(row.metadata as string)).toEqual({ a: 3 }); + }); +}); From 62fbce2ae58b180d1373bf0d53f4eae36a37e402 Mon Sep 17 00:00:00 2001 From: crisjy Date: Wed, 30 Oct 2024 04:56:32 +0800 Subject: [PATCH 033/100] feat(azure-cosmosdb): Vector store - Add hnsw index to Azure Cosmos DB (#7101) Co-authored-by: cris <617072224@qq.com> Co-authored-by: jacoblee93 --- .../src/azure_cosmosdb_mongodb.ts | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_mongodb.ts b/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_mongodb.ts index 97bfc7de4b98..805d7417e9c0 100644 --- a/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_mongodb.ts +++ b/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_mongodb.ts @@ -33,12 +33,18 @@ export type AzureCosmosDBMongoDBSimilarityType = export type AzureCosmosDBMongoDBIndexOptions = { /** Skips automatic index creation. */ readonly skipCreate?: boolean; + + readonly indexType?: "ivf" | "hnsw"; /** Number of clusters that the inverted file (IVF) index uses to group the vector data. */ readonly numLists?: number; /** Number of dimensions for vector similarity. */ readonly dimensions?: number; /** Similarity metric to use with the IVF index. */ readonly similarity?: AzureCosmosDBMongoDBSimilarityType; + /** The max number of connections per layer with the HNSW index. */ + readonly m?: number; + /** The size of the dynamic candidate list for constructing the graph with the HNSW index. */ + readonly efConstruction?: number; }; /** Azure Cosmos DB for MongoDB vCore delete Parameters. */ @@ -214,6 +220,7 @@ export class AzureCosmosDBMongoDBVectorStore extends VectorStore { * documents. * Using a numLists value of 1 is akin to performing brute-force search, * which has limited performance + * @param indexType Index Type for Mongo vCore index. * @param dimensions Number of dimensions for vector similarity. * The maximum number of supported dimensions is 2000. * If no number is provided, it will be determined automatically by @@ -226,8 +233,8 @@ export class AzureCosmosDBMongoDBVectorStore extends VectorStore { * @returns A promise that resolves when the index has been created. */ async createIndex( - numLists = 100, dimensions: number | undefined = undefined, + indexType: "ivf" | "hnsw" = "ivf", similarity: AzureCosmosDBMongoDBSimilarityType = AzureCosmosDBMongoDBSimilarityType.COS ): Promise { await this.connectPromise; @@ -246,8 +253,13 @@ export class AzureCosmosDBMongoDBVectorStore extends VectorStore { name: this.indexName, key: { [this.embeddingKey]: "cosmosSearch" }, cosmosSearchOptions: { - kind: "vector-ivf", - numLists, + kind: indexType === "hnsw" ? "vector-hnsw" : "vector-ivf", + ...(indexType === "hnsw" + ? { + m: this.indexOptions.m ?? 16, + efConstruction: this.indexOptions.efConstruction ?? 200, + } + : { numLists: this.indexOptions.numLists ?? 100 }), similarity, dimensions: vectorLength, }, @@ -437,9 +449,10 @@ export class AzureCosmosDBMongoDBVectorStore extends VectorStore { // Unless skipCreate is set, create the index // This operation is no-op if the index already exists if (!this.indexOptions.skipCreate) { + const indexType = this.indexOptions.indexType === "hnsw" ? "hnsw" : "ivf"; await this.createIndex( - this.indexOptions.numLists, this.indexOptions.dimensions, + indexType, this.indexOptions.similarity ); } From fdffe200e4957590d46967fced300e0c751dea13 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 29 Oct 2024 14:15:35 -0700 Subject: [PATCH 034/100] chore(community): Release 0.3.11 (#7107) --- libs/langchain-community/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 20128b15faaa..e530bac5b532 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/community", - "version": "0.3.10", + "version": "0.3.11", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { From 2ca588c8a3941f46b67f55ac7ebdcbe103634493 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Tue, 29 Oct 2024 14:42:33 -0700 Subject: [PATCH 035/100] fix(core): Include provider name in base lsParams method (#7102) --- langchain-core/src/language_models/chat_models.ts | 5 +++++ .../tests/runnable_stream_events.test.ts | 6 ++++++ .../tests/runnable_stream_events_v2.test.ts | 15 +++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/langchain-core/src/language_models/chat_models.ts b/langchain-core/src/language_models/chat_models.ts index 43d820ed338f..c53f4b4fea76 100644 --- a/langchain-core/src/language_models/chat_models.ts +++ b/langchain-core/src/language_models/chat_models.ts @@ -315,9 +315,14 @@ export abstract class BaseChatModel< } getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { + const providerName = this.getName().startsWith("Chat") + ? this.getName().replace("Chat", "") + : this.getName(); + return { ls_model_type: "chat", ls_stop: options.stop, + ls_provider: providerName, }; } diff --git a/langchain-core/src/runnables/tests/runnable_stream_events.test.ts b/langchain-core/src/runnables/tests/runnable_stream_events.test.ts index 7f6bce337ce8..c2e7d32ae94e 100644 --- a/langchain-core/src/runnables/tests/runnable_stream_events.test.ts +++ b/langchain-core/src/runnables/tests/runnable_stream_events.test.ts @@ -714,6 +714,7 @@ test("Runnable streamEvents method with chat model chain", async () => { foo: "bar", a: "b", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, data: { @@ -732,6 +733,7 @@ test("Runnable streamEvents method with chat model chain", async () => { a: "b", foo: "bar", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", @@ -755,6 +757,7 @@ test("Runnable streamEvents method with chat model chain", async () => { a: "b", foo: "bar", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", @@ -778,6 +781,7 @@ test("Runnable streamEvents method with chat model chain", async () => { a: "b", foo: "bar", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", @@ -801,6 +805,7 @@ test("Runnable streamEvents method with chat model chain", async () => { a: "b", foo: "bar", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", @@ -825,6 +830,7 @@ test("Runnable streamEvents method with chat model chain", async () => { foo: "bar", a: "b", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, data: { diff --git a/langchain-core/src/runnables/tests/runnable_stream_events_v2.test.ts b/langchain-core/src/runnables/tests/runnable_stream_events_v2.test.ts index 2807b4935657..e79f4765d20a 100644 --- a/langchain-core/src/runnables/tests/runnable_stream_events_v2.test.ts +++ b/langchain-core/src/runnables/tests/runnable_stream_events_v2.test.ts @@ -871,6 +871,7 @@ test("Runnable streamEvents method with chat model chain", async () => { foo: "bar", a: "b", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, data: { @@ -889,6 +890,7 @@ test("Runnable streamEvents method with chat model chain", async () => { a: "b", foo: "bar", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", @@ -912,6 +914,7 @@ test("Runnable streamEvents method with chat model chain", async () => { a: "b", foo: "bar", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", @@ -935,6 +938,7 @@ test("Runnable streamEvents method with chat model chain", async () => { a: "b", foo: "bar", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", @@ -958,6 +962,7 @@ test("Runnable streamEvents method with chat model chain", async () => { a: "b", foo: "bar", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, name: "my_model", @@ -982,6 +987,7 @@ test("Runnable streamEvents method with chat model chain", async () => { foo: "bar", a: "b", ls_model_type: "chat", + ls_provider: model.getName(), ls_stop: undefined, }, data: { @@ -1101,6 +1107,7 @@ test("Chat model that supports streaming, but is invoked, should still emit on_s a: "b", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, data: { input: { @@ -1119,6 +1126,7 @@ test("Chat model that supports streaming, but is invoked, should still emit on_s foo: "bar", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "R" }) }, @@ -1132,6 +1140,7 @@ test("Chat model that supports streaming, but is invoked, should still emit on_s foo: "bar", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "O" }) }, @@ -1145,6 +1154,7 @@ test("Chat model that supports streaming, but is invoked, should still emit on_s foo: "bar", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "A" }) }, @@ -1158,6 +1168,7 @@ test("Chat model that supports streaming, but is invoked, should still emit on_s foo: "bar", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, name: "my_model", data: { chunk: new AIMessageChunk({ id: anyString, content: "R" }) }, @@ -1172,6 +1183,7 @@ test("Chat model that supports streaming, but is invoked, should still emit on_s a: "b", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, data: { input: { @@ -1321,6 +1333,7 @@ test("Chat model that doesn't support streaming, but is invoked, should emit one a: "b", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, data: { input: { @@ -1339,6 +1352,7 @@ test("Chat model that doesn't support streaming, but is invoked, should emit one foo: "bar", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, name: "my_model", data: { @@ -1358,6 +1372,7 @@ test("Chat model that doesn't support streaming, but is invoked, should emit one a: "b", ls_model_type: "chat", ls_stop: undefined, + ls_provider: model.getName(), }, data: { input: { From e0c05df4a016e224a3a2f46cb6ac27c58592dced Mon Sep 17 00:00:00 2001 From: Gareth Andrew Date: Tue, 29 Oct 2024 21:43:38 +0000 Subject: [PATCH 036/100] fix(anthropic): Fix multipart tool message (#7096) Co-authored-by: Brace Sproul --- .../src/tests/chat_models-tools.int.test.ts | 38 +++++++++++++++++++ .../src/utils/message_inputs.ts | 12 +++++- 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/libs/langchain-anthropic/src/tests/chat_models-tools.int.test.ts b/libs/langchain-anthropic/src/tests/chat_models-tools.int.test.ts index 8deb3c8626e1..eb60e8d8da06 100644 --- a/libs/langchain-anthropic/src/tests/chat_models-tools.int.test.ts +++ b/libs/langchain-anthropic/src/tests/chat_models-tools.int.test.ts @@ -88,6 +88,44 @@ test("Few shotting with tool calls", async () => { expect(res.content).toContain("24"); }); +test("Multipart ToolMessage", async () => { + const chat = model.bindTools([new WeatherTool()]); + const res = await chat.invoke([ + new HumanMessage("What is the weather in SF?"), + new AIMessage({ + content: "Let me look up the current weather.", + tool_calls: [ + { + id: "toolu_feiwjf9u98r389u498", + name: "get_weather", + args: { + location: "SF", + }, + }, + ], + }), + new ToolMessage({ + tool_call_id: "toolu_feiwjf9u98r389u498", + content: [ + { + type: "text", + text: "It is currently 24 degrees with hail in San Francisco.", + }, + { + type: "image_url", + image_url: + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAA5QAAAOUBj+WbPAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAH0SURBVFiFzZcxSytBEMd/E+PL2aVNKomFpBP1K4iFvEIQrN6neGAZEUWw8xMIthZaSAR5rzPwGi0t9APIK0RBCxMsxiKzst7lLpszeA4Mt7c785//DLs7d6Kq5BURaQOo6kpujLwERKQCdO01UtVeHpxSrujGIWX8ZQTGIgkCItIQkZaI1McVRETqhtlILKrqBwV2AAVugXp8PWbbATpDbOqGpUArsT7E4QaoZYALtpFT1muGkZpQFmvneA2Us7JMwSibr0tkYDWzAGoG8ARUvfk5YA/4CzwC98A5sAs0Pbuq+V5nVjEgi6qNJ4Et4NWyGqRdYAOY8EhkVi+0nD+Af16gQ2ANmAZmgHXgyFv/A5SCsAMJbBvwf2A5w24VeDDb32MhAMx7ZV8KsF8z2xdgdhwE9g3wYIQTcGw+m8NsS9BvLCISOeWjLNjzhHBxtov+pB/DmhkAbZK7uUP/kikBzzaXepQGVKBpPnf2LoYZj9MuvBk5xhUgchrL5oI+258jVOCX+ZzG5iNPK+97QFV7qtp1GuN4Zc/VEfJytpexZLue9t4r8K2PoRZ9ERlwMVcxRTYjimzHFPlBQtGfZHyDj9IG0AoIHnmbLwog0QIa8bXP/JpF9C8bgClN3qBBUngz+gwBTRmPJOXc0VV7InLmxnlx3gDvLHwSZKNszAAAAABJRU5ErkJggg==", + }, + ], + }), + new AIMessage( + "It is currently 24 degrees in San Francisco with hail in San Francisco." + ), + new HumanMessage("What did you say the weather was?"), + ]); + expect(res.content).toContain("24"); +}); + test("Invalid tool calls should throw an appropriate error", async () => { const chat = model.bindTools([new WeatherTool()]); let error; diff --git a/libs/langchain-anthropic/src/utils/message_inputs.ts b/libs/langchain-anthropic/src/utils/message_inputs.ts index 8f810b231f97..4082405de828 100644 --- a/libs/langchain-anthropic/src/utils/message_inputs.ts +++ b/libs/langchain-anthropic/src/utils/message_inputs.ts @@ -71,7 +71,17 @@ function _mergeMessages( ); } } else { - merged.push(new HumanMessage({ content: message.content })); + merged.push( + new HumanMessage({ + content: [ + { + type: "tool_result", + content: _formatContent(message.content), + tool_use_id: (message as ToolMessage).tool_call_id, + }, + ], + }) + ); } } else { const previousMessage = merged[merged.length - 1]; From 71b6a2dac9fe9083eb662b5d02df2790475668ce Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 29 Oct 2024 14:53:48 -0700 Subject: [PATCH 037/100] chore(anthropic): Release 0.3.7 (#7108) --- libs/langchain-anthropic/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-anthropic/package.json b/libs/langchain-anthropic/package.json index 75c0414ca5da..6d37b6303602 100644 --- a/libs/langchain-anthropic/package.json +++ b/libs/langchain-anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/anthropic", - "version": "0.3.6", + "version": "0.3.7", "description": "Anthropic integrations for LangChain.js", "type": "module", "engines": { From 37e21d281a4fdc6f58bbd233af4371888bd5d9c8 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 29 Oct 2024 15:05:47 -0700 Subject: [PATCH 038/100] chore(core): Release 0.3.16 (#7109) --- langchain-core/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain-core/package.json b/langchain-core/package.json index 0741ec42c6af..c77599697a18 100644 --- a/langchain-core/package.json +++ b/langchain-core/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/core", - "version": "0.3.15", + "version": "0.3.16", "description": "Core LangChain.js abstractions and schemas", "type": "module", "engines": { From 79f3854ccbf373b20671bfc9bf45d9689c77df09 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Wed, 30 Oct 2024 15:48:22 -0700 Subject: [PATCH 039/100] feat: New conceptual docs (#7068) --- README.md | 2 +- docs/core_docs/.gitignore | 104 +- docs/core_docs/docs/concepts.mdx | 1436 ------ docs/core_docs/docs/concepts/agents.mdx | 24 + docs/core_docs/docs/concepts/architecture.mdx | 64 + docs/core_docs/docs/concepts/callbacks.mdx | 59 + docs/core_docs/docs/concepts/chat_history.mdx | 49 + docs/core_docs/docs/concepts/chat_models.mdx | 153 + .../docs/concepts/document_loaders.mdx | 40 + .../docs/concepts/embedding_models.mdx | 133 + docs/core_docs/docs/concepts/evaluation.mdx | 17 + .../docs/concepts/example_selectors.mdx | 23 + .../docs/concepts/few_shot_prompting.mdx | 90 + docs/core_docs/docs/concepts/index.mdx | 84 + .../docs/concepts/key_value_stores.mdx | 38 + docs/core_docs/docs/concepts/lcel.mdx | 163 + docs/core_docs/docs/concepts/messages.mdx | 265 ++ .../core_docs/docs/concepts/multimodality.mdx | 94 + .../docs/concepts/output_parsers.mdx | 36 + .../docs/concepts/prompt_templates.mdx | 126 + docs/core_docs/docs/concepts/rag.mdx | 109 + docs/core_docs/docs/concepts/retrieval.mdx | 246 + docs/core_docs/docs/concepts/retrievers.mdx | 143 + docs/core_docs/docs/concepts/runnables.mdx | 248 + docs/core_docs/docs/concepts/streaming.mdx | 189 + .../docs/concepts/structured_outputs.mdx | 169 + docs/core_docs/docs/concepts/t.ipynb | 81 + docs/core_docs/docs/concepts/text_llms.mdx | 10 + .../docs/concepts/text_splitters.mdx | 145 + docs/core_docs/docs/concepts/tokens.mdx | 58 + docs/core_docs/docs/concepts/tool_calling.mdx | 185 + docs/core_docs/docs/concepts/tools.mdx | 178 + docs/core_docs/docs/concepts/tracing.mdx | 10 + docs/core_docs/docs/concepts/vectorstores.mdx | 194 + .../core_docs/docs/concepts/why_langchain.mdx | 122 + .../docs/how_to/agent_executor.ipynb | 1910 ++++---- docs/core_docs/docs/how_to/assign.ipynb | 376 +- docs/core_docs/docs/how_to/binding.ipynb | 616 +-- .../docs/how_to/caching_embeddings.mdx | 2 +- .../docs/how_to/callbacks_attach.ipynb | 542 +-- .../docs/how_to/callbacks_constructor.ipynb | 398 +- .../docs/how_to/callbacks_custom_events.ipynb | 452 +- .../docs/how_to/callbacks_runtime.ipynb | 534 +-- .../docs/how_to/callbacks_serverless.ipynb | 254 +- .../docs/how_to/cancel_execution.ipynb | 580 +-- .../docs/how_to/character_text_splitter.ipynb | 302 +- .../docs/how_to/chat_model_caching.mdx | 4 +- .../how_to/chat_models_universal_init.mdx | 6 +- .../docs/how_to/chat_token_usage_tracking.mdx | 2 +- .../docs/how_to/chatbots_tools.ipynb | 1006 ++-- .../core_docs/docs/how_to/code_splitter.ipynb | 1360 +++--- .../docs/how_to/contextual_compression.mdx | 2 +- .../how_to/convert_runnable_to_tool.ipynb | 1066 ++--- .../docs/how_to/custom_callbacks.ipynb | 286 +- docs/core_docs/docs/how_to/custom_chat.ipynb | 1004 ++-- docs/core_docs/docs/how_to/custom_llm.ipynb | 586 +-- .../docs/how_to/custom_retriever.mdx | 2 +- docs/core_docs/docs/how_to/custom_tools.ipynb | 796 ++-- .../docs/how_to/document_loader_html.ipynb | 344 +- .../how_to/document_loader_markdown.ipynb | 578 +-- docs/core_docs/docs/how_to/embed_text.mdx | 2 +- .../docs/how_to/ensemble_retriever.mdx | 4 +- .../docs/how_to/example_selectors.ipynb | 556 +-- .../how_to/example_selectors_langsmith.ipynb | 1092 ++--- .../how_to/example_selectors_length_based.mdx | 2 +- .../how_to/example_selectors_similarity.mdx | 4 +- docs/core_docs/docs/how_to/fallbacks.mdx | 2 +- .../docs/how_to/few_shot_examples.ipynb | 722 +-- .../docs/how_to/few_shot_examples_chat.ipynb | 1384 +++--- docs/core_docs/docs/how_to/functions.ipynb | 748 +-- docs/core_docs/docs/how_to/index.mdx | 22 +- .../docs/how_to/llm_token_usage_tracking.mdx | 2 +- docs/core_docs/docs/how_to/logprobs.ipynb | 888 ++-- .../docs/how_to/message_history.ipynb | 1154 ++--- .../core_docs/docs/how_to/migrate_agent.ipynb | 2640 +++++------ docs/core_docs/docs/how_to/multi_vector.mdx | 4 +- .../docs/how_to/multimodal_inputs.ipynb | 410 +- .../docs/how_to/multimodal_prompts.ipynb | 376 +- .../docs/how_to/multiple_queries.ipynb | 522 +-- .../docs/how_to/output_parser_fixing.ipynb | 264 +- .../docs/how_to/output_parser_json.ipynb | 370 +- .../how_to/output_parser_structured.ipynb | 1120 ++--- .../docs/how_to/output_parser_xml.ipynb | 786 ++-- docs/core_docs/docs/how_to/parallel.mdx | 2 +- .../docs/how_to/parent_document_retriever.mdx | 4 +- docs/core_docs/docs/how_to/passthrough.ipynb | 350 +- .../docs/how_to/prompts_composition.ipynb | 666 +-- .../core_docs/docs/how_to/prompts_partial.mdx | 2 +- .../docs/how_to/qa_chat_history_how_to.ipynb | 6 +- docs/core_docs/docs/how_to/qa_citations.ipynb | 4 +- docs/core_docs/docs/how_to/qa_sources.ipynb | 4 +- docs/core_docs/docs/how_to/qa_streaming.ipynb | 6 +- .../docs/how_to/query_high_cardinality.ipynb | 4 +- .../docs/how_to/recursive_text_splitter.ipynb | 442 +- .../docs/how_to/reduce_retrieval_latency.mdx | 4 +- docs/core_docs/docs/how_to/routing.mdx | 6 +- docs/core_docs/docs/how_to/self_query.ipynb | 668 +-- docs/core_docs/docs/how_to/sequence.ipynb | 486 +- .../docs/how_to/split_by_token.ipynb | 212 +- .../docs/how_to/stream_agent_client.mdx | 8 +- .../docs/how_to/stream_tool_client.mdx | 6 +- docs/core_docs/docs/how_to/streaming.ipynb | 4112 ++++++++--------- .../docs/how_to/structured_output.ipynb | 1202 ++--- .../docs/how_to/time_weighted_vectorstore.mdx | 2 +- .../docs/how_to/tool_artifacts.ipynb | 686 +-- docs/core_docs/docs/how_to/tool_calling.ipynb | 10 +- .../docs/how_to/tool_calling_parallel.ipynb | 434 +- .../docs/how_to/tool_calls_multimodal.ipynb | 650 +-- docs/core_docs/docs/how_to/tool_choice.ipynb | 342 +- .../docs/how_to/tool_configure.ipynb | 220 +- .../how_to/tool_results_pass_to_model.ipynb | 728 +-- docs/core_docs/docs/how_to/tool_runtime.ipynb | 780 ++-- .../docs/how_to/tool_stream_events.ipynb | 1352 +++--- docs/core_docs/docs/how_to/tools_error.ipynb | 474 +- .../docs/how_to/tools_few_shot.ipynb | 424 +- .../docs/how_to/tools_prompting.ipynb | 824 ++-- .../core_docs/docs/how_to/trim_messages.ipynb | 1596 +++---- .../docs/how_to/vectorstore_retriever.mdx | 4 +- docs/core_docs/docs/how_to/vectorstores.mdx | 6 +- .../docs/integrations/chat/alibaba_tongyi.mdx | 2 +- .../docs/integrations/chat/anthropic.ipynb | 1970 ++++---- .../docs/integrations/chat/azure.ipynb | 884 ++-- .../docs/integrations/chat/baidu_qianfan.mdx | 2 +- .../docs/integrations/chat/baidu_wenxin.mdx | 2 +- .../docs/integrations/chat/bedrock.ipynb | 594 +-- .../integrations/chat/bedrock_converse.ipynb | 594 +-- .../chat/cloudflare_workersai.ipynb | 554 +-- .../docs/integrations/chat/cohere.ipynb | 3324 ++++++------- .../docs/integrations/chat/deep_infra.mdx | 2 +- .../core_docs/docs/integrations/chat/fake.mdx | 2 +- .../docs/integrations/chat/fireworks.ipynb | 566 +-- .../docs/integrations/chat/friendli.mdx | 2 +- .../chat/google_generativeai.ipynb | 940 ++-- .../integrations/chat/google_vertex_ai.ipynb | 552 +-- .../docs/integrations/chat/groq.ipynb | 610 +-- .../docs/integrations/chat/ibm.ipynb | 1168 ++--- .../docs/integrations/chat/index.mdx | 2 +- .../docs/integrations/chat/llama_cpp.mdx | 2 +- .../docs/integrations/chat/minimax.mdx | 2 +- .../docs/integrations/chat/mistral.ipynb | 700 +-- .../docs/integrations/chat/moonshot.mdx | 2 +- .../docs/integrations/chat/ni_bittensor.mdx | 2 +- .../docs/integrations/chat/ollama.ipynb | 1106 ++--- .../integrations/chat/ollama_functions.mdx | 2 +- .../docs/integrations/chat/openai.ipynb | 2400 +++++----- .../docs/integrations/chat/premai.mdx | 2 +- .../integrations/chat/prompt_layer_openai.mdx | 2 +- .../integrations/chat/tencent_hunyuan.mdx | 2 +- .../docs/integrations/chat/togetherai.ipynb | 540 +-- .../docs/integrations/chat/web_llm.mdx | 2 +- .../docs/integrations/chat/yandex.mdx | 2 +- .../docs/integrations/chat/zhipuai.mdx | 2 +- .../document_loaders/file_loaders/csv.ipynb | 440 +- .../file_loaders/directory.ipynb | 374 +- .../document_loaders/file_loaders/pdf.ipynb | 984 ++-- .../document_loaders/file_loaders/text.ipynb | 318 +- .../file_loaders/unstructured.ipynb | 470 +- .../integrations/document_loaders/index.mdx | 2 +- .../core_docs/docs/integrations/llms/ai21.mdx | 2 +- .../docs/integrations/llms/aleph_alpha.mdx | 2 +- .../docs/integrations/llms/aws_sagemaker.mdx | 2 +- .../docs/integrations/llms/azure.ipynb | 678 +-- .../docs/integrations/llms/bedrock.ipynb | 546 +-- .../docs/integrations/llms/chrome_ai.mdx | 2 +- .../llms/cloudflare_workersai.ipynb | 408 +- .../docs/integrations/llms/cohere.ipynb | 518 +-- .../docs/integrations/llms/deep_infra.mdx | 2 +- .../docs/integrations/llms/fireworks.ipynb | 550 +-- .../docs/integrations/llms/friendli.mdx | 2 +- .../integrations/llms/google_vertex_ai.ipynb | 554 +-- .../docs/integrations/llms/gradient_ai.mdx | 2 +- .../llms/huggingface_inference.mdx | 2 +- .../docs/integrations/llms/ibm.ipynb | 708 +-- .../docs/integrations/llms/index.mdx | 2 +- .../docs/integrations/llms/jigsawstack.mdx | 2 +- .../integrations/llms/layerup_security.mdx | 2 +- .../docs/integrations/llms/llama_cpp.mdx | 2 +- .../docs/integrations/llms/mistral.ipynb | 606 +-- .../docs/integrations/llms/ni_bittensor.mdx | 2 +- .../docs/integrations/llms/ollama.ipynb | 556 +-- .../docs/integrations/llms/openai.ipynb | 514 +-- .../integrations/llms/prompt_layer_openai.mdx | 2 +- .../docs/integrations/llms/raycast.mdx | 2 +- .../docs/integrations/llms/replicate.mdx | 2 +- .../docs/integrations/llms/together.ipynb | 492 +- .../docs/integrations/llms/watsonx_ai.mdx | 2 +- .../docs/integrations/llms/writer.mdx | 2 +- .../docs/integrations/llms/yandex.mdx | 2 +- .../retrievers/bedrock-knowledge-bases.ipynb | 538 +-- .../retrievers/chaindesk-retriever.mdx | 2 +- .../retrievers/chatgpt-retriever-plugin.mdx | 2 +- .../docs/integrations/retrievers/dria.mdx | 2 +- .../docs/integrations/retrievers/exa.ipynb | 700 +-- .../docs/integrations/retrievers/hyde.mdx | 2 +- .../docs/integrations/retrievers/index.mdx | 2 +- .../retrievers/kendra-retriever.ipynb | 460 +- .../retrievers/metal-retriever.mdx | 2 +- .../retrievers/supabase-hybrid.mdx | 2 +- .../docs/integrations/retrievers/tavily.ipynb | 538 +-- .../retrievers/time-weighted-retriever.mdx | 2 +- .../retrievers/vespa-retriever.mdx | 2 +- .../retrievers/zep-cloud-retriever.mdx | 2 +- .../integrations/retrievers/zep-retriever.mdx | 2 +- .../integrations/stores/cassandra_storage.mdx | 2 +- .../integrations/stores/file_system.ipynb | 556 +-- .../docs/integrations/stores/in_memory.ipynb | 464 +- .../docs/integrations/stores/index.mdx | 2 +- .../integrations/stores/ioredis_storage.mdx | 2 +- .../stores/upstash_redis_storage.mdx | 2 +- .../integrations/stores/vercel_kv_storage.mdx | 2 +- .../text_embedding/alibaba_tongyi.mdx | 2 +- .../text_embedding/azure_openai.ipynb | 968 ++-- .../text_embedding/baidu_qianfan.mdx | 2 +- .../integrations/text_embedding/bedrock.ipynb | 674 +-- .../text_embedding/cloudflare_ai.ipynb | 376 +- .../integrations/text_embedding/cohere.ipynb | 666 +-- .../integrations/text_embedding/deepinfra.mdx | 2 +- .../text_embedding/fireworks.ipynb | 674 +-- .../text_embedding/google_generativeai.ipynb | 612 +-- .../text_embedding/google_vertex_ai.ipynb | 704 +-- .../text_embedding/gradient_ai.mdx | 2 +- .../text_embedding/hugging_face_inference.mdx | 2 +- .../integrations/text_embedding/ibm.ipynb | 766 +-- .../integrations/text_embedding/index.mdx | 2 +- .../docs/integrations/text_embedding/jina.mdx | 2 +- .../integrations/text_embedding/llama_cpp.mdx | 2 +- .../integrations/text_embedding/minimax.mdx | 2 +- .../text_embedding/mistralai.ipynb | 676 +-- .../text_embedding/mixedbread_ai.mdx | 2 +- .../integrations/text_embedding/nomic.mdx | 2 +- .../integrations/text_embedding/ollama.ipynb | 656 +-- .../integrations/text_embedding/openai.ipynb | 820 ++-- .../integrations/text_embedding/premai.mdx | 2 +- .../text_embedding/tencent_hunyuan.mdx | 2 +- .../text_embedding/tensorflow.mdx | 2 +- .../text_embedding/togetherai.ipynb | 592 +-- .../text_embedding/transformers.mdx | 2 +- .../integrations/text_embedding/voyageai.mdx | 2 +- .../integrations/text_embedding/zhipuai.mdx | 2 +- .../docs/integrations/toolkits/openapi.ipynb | 634 +-- .../docs/integrations/toolkits/sql.ipynb | 622 +-- .../integrations/toolkits/vectorstore.ipynb | 508 +- .../docs/integrations/tools/aiplugin-tool.mdx | 2 +- .../tools/azure_dynamic_sessions.mdx | 2 +- .../docs/integrations/tools/connery.mdx | 2 +- .../docs/integrations/tools/dalle.mdx | 2 +- .../docs/integrations/tools/discord.mdx | 2 +- .../tools/duckduckgo_search.ipynb | 570 +-- .../docs/integrations/tools/exa_search.ipynb | 838 ++-- .../docs/integrations/tools/gmail.mdx | 2 +- .../integrations/tools/google_calendar.mdx | 2 +- .../docs/integrations/tools/google_places.mdx | 2 +- .../docs/integrations/tools/google_routes.mdx | 2 +- .../docs/integrations/tools/index.mdx | 4 +- .../docs/integrations/tools/jigsawstack.mdx | 2 +- .../docs/integrations/tools/lambda_agent.mdx | 2 +- .../docs/integrations/tools/pyinterpreter.mdx | 2 +- .../docs/integrations/tools/searchapi.mdx | 2 +- .../docs/integrations/tools/searxng.mdx | 2 +- .../docs/integrations/tools/stackexchange.mdx | 2 +- .../integrations/tools/tavily_search.ipynb | 600 +-- .../docs/integrations/tools/webbrowser.mdx | 2 +- .../docs/integrations/tools/wikipedia.mdx | 2 +- .../docs/integrations/tools/wolframalpha.mdx | 2 +- .../docs/integrations/tools/zapier_agent.mdx | 2 +- .../integrations/vectorstores/chroma.ipynb | 702 +-- .../vectorstores/elasticsearch.ipynb | 782 ++-- .../integrations/vectorstores/faiss.ipynb | 920 ++-- .../integrations/vectorstores/hnswlib.ipynb | 750 +-- .../docs/integrations/vectorstores/index.mdx | 2 +- .../integrations/vectorstores/memory.ipynb | 710 +-- .../vectorstores/mongodb_atlas.ipynb | 978 ++-- .../integrations/vectorstores/pgvector.ipynb | 1246 ++--- .../integrations/vectorstores/pinecone.ipynb | 718 +-- .../integrations/vectorstores/qdrant.ipynb | 676 +-- .../integrations/vectorstores/redis.ipynb | 736 +-- .../integrations/vectorstores/supabase.ipynb | 864 ++-- .../integrations/vectorstores/upstash.ipynb | 712 +-- .../integrations/vectorstores/weaviate.ipynb | 762 +-- docs/core_docs/docs/introduction.mdx | 4 +- .../errors/INVALID_PROMPT_INPUT.mdx | 4 +- .../errors/INVALID_TOOL_RESULTS.ipynb | 880 ++-- .../errors/MESSAGE_COERCION_FAILURE.mdx | 4 +- .../errors/OUTPUT_PARSING_FAILURE.mdx | 2 +- docs/core_docs/docs/tutorials/chatbot.ipynb | 2264 ++++----- .../core_docs/docs/tutorials/extraction.ipynb | 716 +-- docs/core_docs/docs/tutorials/llm_chain.ipynb | 1120 ++--- docs/core_docs/docs/tutorials/pdf_qa.ipynb | 824 ++-- .../docs/tutorials/qa_chat_history.ipynb | 2850 ++++++------ .../docs/tutorials/query_analysis.ipynb | 10 +- docs/core_docs/docs/tutorials/rag.ipynb | 32 +- docs/core_docs/docs/tutorials/sql_qa.mdx | 6 +- .../migrating_memory/chat_history.ipynb | 528 +-- .../docs/versions/migrating_memory/index.mdx | 5 +- docs/core_docs/sidebars.js | 14 +- docs/core_docs/src/css/custom.css | 7 +- docs/core_docs/src/theme/RedirectAnchors.js | 111 + .../static/img/conversation_patterns.png | Bin 0 -> 106974 bytes .../static/img/embeddings_concept.png | Bin 0 -> 101510 bytes docs/core_docs/static/img/rag_concepts.png | Bin 0 -> 72552 bytes .../static/img/retrieval_concept.png | Bin 0 -> 122171 bytes .../static/img/retriever_concept.png | Bin 0 -> 19936 bytes .../static/img/retriever_full_docs.png | Bin 0 -> 127464 bytes .../static/img/structured_output.png | Bin 0 -> 89292 bytes docs/core_docs/static/img/text_splitters.png | Bin 0 -> 27792 bytes .../static/img/tool_call_example.png | Bin 0 -> 86016 bytes .../static/img/tool_calling_components.png | Bin 0 -> 179296 bytes .../static/img/tool_calling_concept.png | Bin 0 -> 123308 bytes docs/core_docs/static/img/vectorstores.png | Bin 0 -> 118309 bytes .../static/img/with_structured_output.png | Bin 0 -> 87132 bytes docs/core_docs/vercel.json | 2 +- langchain-core/README.md | 2 +- langchain/README.md | 2 +- .../src/cli/docs/templates/llms.ipynb | 440 +- .../cli/docs/templates/text_embedding.ipynb | 450 +- .../src/cli/docs/templates/vectorstores.ipynb | 726 +-- 316 files changed, 56159 insertions(+), 53911 deletions(-) delete mode 100644 docs/core_docs/docs/concepts.mdx create mode 100644 docs/core_docs/docs/concepts/agents.mdx create mode 100644 docs/core_docs/docs/concepts/architecture.mdx create mode 100644 docs/core_docs/docs/concepts/callbacks.mdx create mode 100644 docs/core_docs/docs/concepts/chat_history.mdx create mode 100644 docs/core_docs/docs/concepts/chat_models.mdx create mode 100644 docs/core_docs/docs/concepts/document_loaders.mdx create mode 100644 docs/core_docs/docs/concepts/embedding_models.mdx create mode 100644 docs/core_docs/docs/concepts/evaluation.mdx create mode 100644 docs/core_docs/docs/concepts/example_selectors.mdx create mode 100644 docs/core_docs/docs/concepts/few_shot_prompting.mdx create mode 100644 docs/core_docs/docs/concepts/index.mdx create mode 100644 docs/core_docs/docs/concepts/key_value_stores.mdx create mode 100644 docs/core_docs/docs/concepts/lcel.mdx create mode 100644 docs/core_docs/docs/concepts/messages.mdx create mode 100644 docs/core_docs/docs/concepts/multimodality.mdx create mode 100644 docs/core_docs/docs/concepts/output_parsers.mdx create mode 100644 docs/core_docs/docs/concepts/prompt_templates.mdx create mode 100644 docs/core_docs/docs/concepts/rag.mdx create mode 100644 docs/core_docs/docs/concepts/retrieval.mdx create mode 100644 docs/core_docs/docs/concepts/retrievers.mdx create mode 100644 docs/core_docs/docs/concepts/runnables.mdx create mode 100644 docs/core_docs/docs/concepts/streaming.mdx create mode 100644 docs/core_docs/docs/concepts/structured_outputs.mdx create mode 100644 docs/core_docs/docs/concepts/t.ipynb create mode 100644 docs/core_docs/docs/concepts/text_llms.mdx create mode 100644 docs/core_docs/docs/concepts/text_splitters.mdx create mode 100644 docs/core_docs/docs/concepts/tokens.mdx create mode 100644 docs/core_docs/docs/concepts/tool_calling.mdx create mode 100644 docs/core_docs/docs/concepts/tools.mdx create mode 100644 docs/core_docs/docs/concepts/tracing.mdx create mode 100644 docs/core_docs/docs/concepts/vectorstores.mdx create mode 100644 docs/core_docs/docs/concepts/why_langchain.mdx create mode 100644 docs/core_docs/src/theme/RedirectAnchors.js create mode 100644 docs/core_docs/static/img/conversation_patterns.png create mode 100644 docs/core_docs/static/img/embeddings_concept.png create mode 100644 docs/core_docs/static/img/rag_concepts.png create mode 100644 docs/core_docs/static/img/retrieval_concept.png create mode 100644 docs/core_docs/static/img/retriever_concept.png create mode 100644 docs/core_docs/static/img/retriever_full_docs.png create mode 100644 docs/core_docs/static/img/structured_output.png create mode 100644 docs/core_docs/static/img/text_splitters.png create mode 100644 docs/core_docs/static/img/tool_call_example.png create mode 100644 docs/core_docs/static/img/tool_calling_components.png create mode 100644 docs/core_docs/static/img/tool_calling_concept.png create mode 100644 docs/core_docs/static/img/vectorstores.png create mode 100644 docs/core_docs/static/img/with_structured_output.png diff --git a/README.md b/README.md index 988669952b12..a8f6f01684bd 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ LangChain is written in TypeScript and can be used in: - **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.) This framework consists of several parts. -- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://js.langchain.com/docs/concepts#langchain-expression-language), [components](https://js.langchain.com/docs/concepts), and [third-party integrations](https://js.langchain.com/docs/integrations/platforms/). +- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://js.langchain.com/docs/concepts/lcel), [components](https://js.langchain.com/docs/concepts), and [third-party integrations](https://js.langchain.com/docs/integrations/platforms/). Use [LangGraph.js](https://js.langchain.com/docs/concepts/#langgraphjs) to build stateful agents with first-class streaming and human-in-the-loop support. - **Productionization**: Use [LangSmith](https://docs.smith.langchain.com/) to inspect, monitor and evaluate your chains, so that you can continuously optimize and deploy with confidence. - **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/). diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore index 3947053a76dd..55864ace8b3e 100644 --- a/docs/core_docs/.gitignore +++ b/docs/core_docs/.gitignore @@ -216,6 +216,8 @@ docs/how_to/assign.md docs/how_to/assign.mdx docs/how_to/agent_executor.md docs/how_to/agent_executor.mdx +docs/concepts/t.md +docs/concepts/t.mdx docs/versions/migrating_memory/conversation_summary_memory.md docs/versions/migrating_memory/conversation_summary_memory.mdx docs/versions/migrating_memory/conversation_buffer_window_memory.md @@ -250,22 +252,20 @@ docs/integrations/vectorstores/elasticsearch.md docs/integrations/vectorstores/elasticsearch.mdx docs/integrations/vectorstores/chroma.md docs/integrations/vectorstores/chroma.mdx -docs/integrations/tools/tavily_search.md -docs/integrations/tools/tavily_search.mdx -docs/integrations/tools/exa_search.md -docs/integrations/tools/exa_search.mdx -docs/integrations/tools/duckduckgo_search.md -docs/integrations/tools/duckduckgo_search.mdx docs/integrations/toolkits/vectorstore.md docs/integrations/toolkits/vectorstore.mdx docs/integrations/toolkits/sql.md docs/integrations/toolkits/sql.mdx docs/integrations/toolkits/openapi.md docs/integrations/toolkits/openapi.mdx -docs/integrations/stores/in_memory.md -docs/integrations/stores/in_memory.mdx -docs/integrations/stores/file_system.md -docs/integrations/stores/file_system.mdx +docs/integrations/tools/tavily_search.md +docs/integrations/tools/tavily_search.mdx +docs/integrations/tools/serpapi.md +docs/integrations/tools/serpapi.mdx +docs/integrations/tools/exa_search.md +docs/integrations/tools/exa_search.mdx +docs/integrations/tools/duckduckgo_search.md +docs/integrations/tools/duckduckgo_search.mdx docs/integrations/text_embedding/togetherai.md docs/integrations/text_embedding/togetherai.mdx docs/integrations/text_embedding/openai.md @@ -274,6 +274,8 @@ docs/integrations/text_embedding/ollama.md docs/integrations/text_embedding/ollama.mdx docs/integrations/text_embedding/mistralai.md docs/integrations/text_embedding/mistralai.mdx +docs/integrations/text_embedding/ibm.md +docs/integrations/text_embedding/ibm.mdx docs/integrations/text_embedding/google_vertex_ai.md docs/integrations/text_embedding/google_vertex_ai.mdx docs/integrations/text_embedding/google_generativeai.md @@ -288,6 +290,20 @@ docs/integrations/text_embedding/bedrock.md docs/integrations/text_embedding/bedrock.mdx docs/integrations/text_embedding/azure_openai.md docs/integrations/text_embedding/azure_openai.mdx +docs/integrations/stores/in_memory.md +docs/integrations/stores/in_memory.mdx +docs/integrations/stores/file_system.md +docs/integrations/stores/file_system.mdx +docs/integrations/retrievers/tavily.md +docs/integrations/retrievers/tavily.mdx +docs/integrations/retrievers/kendra-retriever.md +docs/integrations/retrievers/kendra-retriever.mdx +docs/integrations/retrievers/exa.md +docs/integrations/retrievers/exa.mdx +docs/integrations/retrievers/bm25.md +docs/integrations/retrievers/bm25.mdx +docs/integrations/retrievers/bedrock-knowledge-bases.md +docs/integrations/retrievers/bedrock-knowledge-bases.mdx docs/integrations/llms/together.md docs/integrations/llms/together.mdx docs/integrations/llms/openai.md @@ -296,6 +312,8 @@ docs/integrations/llms/ollama.md docs/integrations/llms/ollama.mdx docs/integrations/llms/mistral.md docs/integrations/llms/mistral.mdx +docs/integrations/llms/ibm.md +docs/integrations/llms/ibm.mdx docs/integrations/llms/google_vertex_ai.md docs/integrations/llms/google_vertex_ai.mdx docs/integrations/llms/fireworks.md @@ -310,32 +328,6 @@ docs/integrations/llms/azure.md docs/integrations/llms/azure.mdx docs/integrations/llms/arcjet.md docs/integrations/llms/arcjet.mdx -docs/integrations/retrievers/tavily.md -docs/integrations/retrievers/tavily.mdx -docs/integrations/retrievers/kendra-retriever.md -docs/integrations/retrievers/kendra-retriever.mdx -docs/integrations/retrievers/exa.md -docs/integrations/retrievers/exa.mdx -docs/integrations/retrievers/bm25.md -docs/integrations/retrievers/bm25.mdx -docs/integrations/retrievers/bedrock-knowledge-bases.md -docs/integrations/retrievers/bedrock-knowledge-bases.mdx -docs/integrations/retrievers/self_query/weaviate.md -docs/integrations/retrievers/self_query/weaviate.mdx -docs/integrations/retrievers/self_query/vectara.md -docs/integrations/retrievers/self_query/vectara.mdx -docs/integrations/retrievers/self_query/supabase.md -docs/integrations/retrievers/self_query/supabase.mdx -docs/integrations/retrievers/self_query/qdrant.md -docs/integrations/retrievers/self_query/qdrant.mdx -docs/integrations/retrievers/self_query/pinecone.md -docs/integrations/retrievers/self_query/pinecone.mdx -docs/integrations/retrievers/self_query/memory.md -docs/integrations/retrievers/self_query/memory.mdx -docs/integrations/retrievers/self_query/hnswlib.md -docs/integrations/retrievers/self_query/hnswlib.mdx -docs/integrations/retrievers/self_query/chroma.md -docs/integrations/retrievers/self_query/chroma.mdx docs/integrations/chat/togetherai.md docs/integrations/chat/togetherai.mdx docs/integrations/chat/openai.md @@ -344,6 +336,8 @@ docs/integrations/chat/ollama.md docs/integrations/chat/ollama.mdx docs/integrations/chat/mistral.md docs/integrations/chat/mistral.mdx +docs/integrations/chat/ibm.md +docs/integrations/chat/ibm.mdx docs/integrations/chat/groq.md docs/integrations/chat/groq.mdx docs/integrations/chat/google_vertex_ai.md @@ -366,6 +360,32 @@ docs/integrations/chat/arcjet.md docs/integrations/chat/arcjet.mdx docs/integrations/chat/anthropic.md docs/integrations/chat/anthropic.mdx +docs/integrations/retrievers/self_query/weaviate.md +docs/integrations/retrievers/self_query/weaviate.mdx +docs/integrations/retrievers/self_query/vectara.md +docs/integrations/retrievers/self_query/vectara.mdx +docs/integrations/retrievers/self_query/supabase.md +docs/integrations/retrievers/self_query/supabase.mdx +docs/integrations/retrievers/self_query/qdrant.md +docs/integrations/retrievers/self_query/qdrant.mdx +docs/integrations/retrievers/self_query/pinecone.md +docs/integrations/retrievers/self_query/pinecone.mdx +docs/integrations/retrievers/self_query/memory.md +docs/integrations/retrievers/self_query/memory.mdx +docs/integrations/retrievers/self_query/hnswlib.md +docs/integrations/retrievers/self_query/hnswlib.mdx +docs/integrations/retrievers/self_query/chroma.md +docs/integrations/retrievers/self_query/chroma.mdx +docs/integrations/document_loaders/file_loaders/unstructured.md +docs/integrations/document_loaders/file_loaders/unstructured.mdx +docs/integrations/document_loaders/file_loaders/text.md +docs/integrations/document_loaders/file_loaders/text.mdx +docs/integrations/document_loaders/file_loaders/pdf.md +docs/integrations/document_loaders/file_loaders/pdf.mdx +docs/integrations/document_loaders/file_loaders/directory.md +docs/integrations/document_loaders/file_loaders/directory.mdx +docs/integrations/document_loaders/file_loaders/csv.md +docs/integrations/document_loaders/file_loaders/csv.mdx docs/integrations/document_loaders/web_loaders/web_puppeteer.md docs/integrations/document_loaders/web_loaders/web_puppeteer.mdx docs/integrations/document_loaders/web_loaders/web_cheerio.md @@ -377,14 +397,4 @@ docs/integrations/document_loaders/web_loaders/pdf.mdx docs/integrations/document_loaders/web_loaders/langsmith.md docs/integrations/document_loaders/web_loaders/langsmith.mdx docs/integrations/document_loaders/web_loaders/firecrawl.md -docs/integrations/document_loaders/web_loaders/firecrawl.mdx -docs/integrations/document_loaders/file_loaders/unstructured.md -docs/integrations/document_loaders/file_loaders/unstructured.mdx -docs/integrations/document_loaders/file_loaders/text.md -docs/integrations/document_loaders/file_loaders/text.mdx -docs/integrations/document_loaders/file_loaders/pdf.md -docs/integrations/document_loaders/file_loaders/pdf.mdx -docs/integrations/document_loaders/file_loaders/directory.md -docs/integrations/document_loaders/file_loaders/directory.mdx -docs/integrations/document_loaders/file_loaders/csv.md -docs/integrations/document_loaders/file_loaders/csv.mdx \ No newline at end of file +docs/integrations/document_loaders/web_loaders/firecrawl.mdx \ No newline at end of file diff --git a/docs/core_docs/docs/concepts.mdx b/docs/core_docs/docs/concepts.mdx deleted file mode 100644 index e058ba7f5a7c..000000000000 --- a/docs/core_docs/docs/concepts.mdx +++ /dev/null @@ -1,1436 +0,0 @@ -# Conceptual guide - -This section contains introductions to key parts of LangChain. - -## Architecture - -LangChain as a framework consists of several pieces. The below diagram shows how they relate. - -import ThemedImage from "@theme/ThemedImage"; -import useBaseUrl from "@docusaurus/useBaseUrl"; - - - -### `@langchain/core` - -This package contains base abstractions of different components and ways to compose them together. -The interfaces for core components like LLMs, vector stores, retrievers and more are defined here. -No third party integrations are defined here. -The dependencies are kept purposefully very lightweight. - -This package is a requirement of most others in the LangChain ecosystem, and must be installed separately. - -### `@langchain/community` - -This package contains third party integrations that are maintained by the LangChain community. -Key partner packages are separated out (see below). -This contains all integrations for various components (LLMs, vector stores, retrievers). -All dependencies in this package are optional to keep the package as lightweight as possible. - -### Partner packages - -While the long tail of integrations are in `@langchain/community`, we split popular integrations into their own packages (e.g. `langchain-openai`, `langchain-anthropic`, etc). -This was done in order to improve support for these important integrations. - -### `langchain` - -The main `langchain` package contains chains, agents, and retrieval strategies that make up an application's cognitive architecture. -These are NOT third party integrations. -All chains, agents, and retrieval strategies here are NOT specific to any one integration, but rather generic across all integrations. - -### [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) - -LangGraph.js is an extension of `langchain` aimed at -building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. - -LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows. - -### [LangSmith](https://docs.smith.langchain.com) - -A developer platform that lets you debug, test, evaluate, and monitor LLM applications. - -## Installation - -If you want to work with high level abstractions, you should install the `langchain` package. - -```bash npm2yarn -npm i langchain @langchain/core -``` - -If you want to work with specific integrations, you will need to install them separately. -See [here](/docs/integrations/platforms/) for a list of integrations and how to install them. - -For working with LangSmith, you will need to set up a LangSmith developer account [here](https://smith.langchain.com) and get an API key. -After that, you can enable it by setting environment variables: - -```shell -export LANGCHAIN_TRACING_V2=true -export LANGCHAIN_API_KEY=ls__... - -# Reduce tracing latency if you are not in a serverless environment -# export LANGCHAIN_CALLBACKS_BACKGROUND=true -``` - -## LangChain Expression Language - - - -LangChain Expression Language, or LCEL, is a declarative way to easily compose chains together. -LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains -(we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL: - -**First-class streaming support** -When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means -eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens. - -**Optimized parallel execution** -Whenever your LCEL chains have steps that can be executed in parallel (eg if you fetch documents from multiple retrievers) we automatically do it for the smallest possible latency. - -**Retries and fallbacks** -Configure retries and fallbacks for any part of your LCEL chain. This is a great way to make your chains more reliable at scale. We’re currently working on adding streaming -support for retries/fallbacks, so you can get the added reliability without any latency cost. - -**Access intermediate results** -For more complex chains it’s often very useful to access the results of intermediate steps even before the final output is produced. This can be used to let end-users know -something is happening, or even just to debug your chain. - -[**Seamless LangSmith tracing**](https://docs.smith.langchain.com) -As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step. -With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com) for maximum observability and debuggability. - -### Runnable interface {#interface} - - - -To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html) protocol. -Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below. - -This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. -The standard interface includes: - -- [`stream`](#stream): stream back chunks of the response -- [`invoke`](#invoke): call the chain on an input -- [`batch`](#batch): call the chain on an array of inputs - -The **input type** and **output type** varies by component: - -| Component | Input Type | Output Type | -| ------------ | ----------------------------------------------------- | --------------------- | -| Prompt | Object | PromptValue | -| ChatModel | Single string, list of chat messages or a PromptValue | ChatMessage | -| LLM | Single string, list of chat messages or a PromptValue | String | -| OutputParser | The output of an LLM or ChatModel | Depends on the parser | -| Retriever | Single string | List of Documents | -| Tool | Single string or object, depending on the tool | Depends on the tool | - -## Components - -LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs. -Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix. - -### Chat models - - - -Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text). -These are generally newer models (older models are generally `LLMs`, see below). -Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages. - -Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. -This gives them the same interface as LLMs (and simpler to use). -When a string is passed in as input, it will be converted to a `HumanMessage` under the hood before being passed to the underlying model. - -LangChain does not host any Chat Models, rather we rely on third party integrations. - -We have some standardized parameters when constructing ChatModels: - -- `model`: the name of the model - -Chat Models also accept other parameters that are specific to that integration. - -:::important -Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it. -Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. -Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information. -::: - -For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models). - -#### Multimodality - -Some chat models are multimodal, accepting images, audio and even video as inputs. -These are still less common, meaning model providers haven't standardized on the "best" way to define the API. -Multimodal outputs are even less common. As such, we've kept our multimodal abstractions fairly light weight -and plan to further solidify the multimodal APIs and interaction patterns as the field matures. - -In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. -So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations. - -For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal). - -### LLMs - - - -:::caution -Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models), -even for non-chat use cases. - -You are probably looking for [the section above instead](/docs/concepts/#chat-models). -::: - -Language models that takes a string as input and returns a string. -These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above). - -Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input. -This gives them the same interface as [Chat Models](/docs/concepts/#chat-models). -When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model. - -LangChain does not host any LLMs, rather we rely on third party integrations. - -For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms). - -### Message types - -Some language models take an array of messages as input and return a message. -There are a few different types of messages. -All messages have a `role`, `content`, and `response_metadata` property. - -The `role` describes WHO is saying the message. The standard roles are "user", "assistant", "system", and "tool". -LangChain has different message classes for different roles. - -The `content` property describes the content of the message. -This can be a few different things: - -- A string (most models deal this type of content) -- A List of objects (this is used for multi-modal input, where the object contains information about that input type and that input location) - -Optionally, messages can have a `name` property which allows for differentiating between multiple speakers with the same role. -For example, if there are two users in the chat history it can be useful to differentiate between them. Not all models support this. - -#### HumanMessage - -This represents a message with role "user". - -#### AIMessage - -This represents a message with role "assistant". In addition to the `content` property, these messages also have: - -**`response_metadata`** - -The `response_metadata` property contains additional metadata about the response. The data here is often specific to each model provider. -This is where information like log-probs and token usage may be stored. - -**`tool_calls`** - -These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output. -They can be accessed from there with the `.tool_calls` property. - -This property returns a list of `ToolCall`s. A `ToolCall` is an object with the following arguments: - -- `name`: The name of the tool that should be called. -- `args`: The arguments to that tool. -- `id`: The id of that tool call. - -#### SystemMessage - -This represents a message with role "system", which tells the model how to behave. Not every model provider supports this. - -#### ToolMessage - -This represents a message with role "tool", which contains the result of calling a tool. In addition to `role` and `content`, this message has: - -- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result. -- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model. - -#### (Legacy) FunctionMessage - -This is a legacy message type, corresponding to OpenAI's legacy function-calling API. `ToolMessage` should be used instead to correspond to the updated tool-calling API. - -This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result. - -### Prompt templates - - - -Prompt templates help to translate user input and parameters into instructions for a language model. -This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output. - -Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in. - -Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or an array of messages. -The reason this PromptValue exists is to make it easy to switch between strings and messages. - -There are a few different types of prompt templates: - -#### String PromptTemplates - -These prompt templates are used to format a single string, and generally are used for simpler inputs. -For example, a common way to construct and use a PromptTemplate is as follows: - -```typescript -import { PromptTemplate } from "@langchain/core/prompts"; - -const promptTemplate = PromptTemplate.fromTemplate( - "Tell me a joke about {topic}" -); - -await promptTemplate.invoke({ topic: "cats" }); -``` - -#### ChatPromptTemplates - -These prompt templates are used to format an array of messages. These "templates" consist of an array of templates themselves. -For example, a common way to construct and use a ChatPromptTemplate is as follows: - -```typescript -import { ChatPromptTemplate } from "@langchain/core/prompts"; - -const promptTemplate = ChatPromptTemplate.fromMessages([ - ["system", "You are a helpful assistant"], - ["user", "Tell me a joke about {topic}"], -]); - -await promptTemplate.invoke({ topic: "cats" }); -``` - -In the above example, this ChatPromptTemplate will construct two messages when called. -The first is a system message, that has no variables to format. -The second is a HumanMessage, and will be formatted by the `topic` variable the user passes in. - -#### MessagesPlaceholder - - - -This prompt template is responsible for adding an array of messages in a particular place. -In the above ChatPromptTemplate, we saw how we could format two messages, each one a string. -But what if we wanted the user to pass in an array of messages that we would slot into a particular spot? -This is how you use MessagesPlaceholder. - -```typescript -import { - ChatPromptTemplate, - MessagesPlaceholder, -} from "@langchain/core/prompts"; -import { HumanMessage } from "@langchain/core/messages"; - -const promptTemplate = ChatPromptTemplate.fromMessages([ - ["system", "You are a helpful assistant"], - new MessagesPlaceholder("msgs"), -]); - -promptTemplate.invoke({ msgs: [new HumanMessage({ content: "hi!" })] }); -``` - -This will produce an array of two messages, the first one being a system message, and the second one being the HumanMessage we passed in. -If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in). -This is useful for letting an array of messages be slotted into a particular spot. - -An alternative way to accomplish the same thing without using the `MessagesPlaceholder` class explicitly is: - -```typescript -const promptTemplate = ChatPromptTemplate.fromMessages([ - ["system", "You are a helpful assistant"], - ["placeholder", "{msgs}"], // <-- This is the changed part -]); -``` - -For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). - -### Example Selectors - -One common prompting technique for achieving better performance is to include examples as part of the prompt. -This is known as [few-shot prompting](/docs/concepts/#few-shot-prompting). -This gives the language model concrete examples of how it should behave. -Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them. -Example Selectors are classes responsible for selecting and then formatting examples into prompts. - -For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors). - -### Output parsers - - - -:::note - -The information here refers to parsers that take a text output from a model try to parse it into a more structured representation. -More and more models are supporting function (or tool) calling, which handles this automatically. -It is recommended to use function/tool calling rather than output parsing. -See documentation for that [here](/docs/concepts/#function-tool-calling). - -::: - -Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks. -Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs. - -There are two main methods an output parser must implement: - -- "Get format instructions": A method which returns a string containing instructions for how the output of a language model should be formatted. -- "Parse": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure. - -And then one optional one: - -- "Parse with prompt": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. - -Output parsers accept a string or `BaseMessage` as input and can return an arbitrary type. - -LangChain has many different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information: - -**Name**: The name of the output parser - -**Supports Streaming**: Whether the output parser supports streaming. - -**Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific arguments. - -**Output Type**: The output type of the object returned by the parser. - -**Description**: Our commentary on this output parser and when to use it. - -| Name | Supports Streaming | Input Type | Output Type | Description | -| ------------------------------------------------------------------------------------------------------------- | ------------------ | ------------------------- | --------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [JSON](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Returns a JSON object as specified. You can specify a Zod schema and it will return JSON for that model. | -| [XML](https://api.js.langchain.com/classes/langchain_core.output_parsers.XMLOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Returns a object of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). | -| [CSV](https://api.js.langchain.com/classes/langchain_core.output_parsers.CommaSeparatedListOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Array[string]` | Returns an array of comma separated values. | -| [Structured](https://api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html) | | `string` \| `BaseMessage` | `Promise>` | Parse structured JSON from an LLM response. | -| [HTTP](https://api.js.langchain.com/classes/langchain.output_parsers.HttpResponseOutputParser.html) | ✅ | `string` | `Promise` | Parse an LLM response to then send over HTTP(s). Useful when invoking the LLM on the server/edge, and then sending the content/stream back to the client. | -| [Bytes](https://api.js.langchain.com/classes/langchain_core.output_parsers.BytesOutputParser.html) | ✅ | `string` \| `BaseMessage` | `Promise` | Parse an LLM response to then send over HTTP(s). Useful for streaming LLM responses from the server/edge to the client. | -| [Datetime](https://api.js.langchain.com/classes/langchain.output_parsers.DatetimeOutputParser.html) | | `string` | `Promise` | Parses response into a `Date`. | -| [Regex](https://api.js.langchain.com/classes/langchain.output_parsers.RegexParser.html) | | `string` | `Promise>` | Parses the given text using the regex pattern and returns a object with the parsed output. | - -For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers). - -### Chat History - -Most LLM applications have a conversational interface. -An essential component of a conversation is being able to refer to information introduced earlier in the conversation. -At bare minimum, a conversational system should be able to access some window of past messages directly. - -The concept of `ChatHistory` refers to a class in LangChain which can be used to wrap an arbitrary chain. -This `ChatHistory` will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database. -Future interactions will then load those messages and pass them into the chain as part of the input. - -### Document - - - -A Document object in LangChain contains information about some data. It has two attributes: - -- `pageContent: string`: The content of this document. Currently is only a string. -- `metadata: Record`: Arbitrary metadata associated with this document. Can track the document id, file name, etc. - -### Document loaders - - - -These classes load Document objects. LangChain has hundreds of integrations with various data sources to load data from: Slack, Notion, Google Drive, etc. - -Each DocumentLoader has its own specific parameters, but they can all be invoked in the same way with the `.load` method. -An example use case is as follows: - -```typescript -import { CSVLoader } from "@langchain/community/document_loaders/fs/csv"; - -const loader = new CSVLoader(); -// <-- Integration specific parameters here - -const docs = await loader.load(); -``` - -For specifics on how to use document loaders, see the [relevant how-to guides here](/docs/how_to/#document-loaders). - -### Text splitters - -Once you've loaded documents, you'll often want to transform them to better suit your application. The simplest example is you may want to split a long document into smaller chunks that can fit into your model's context window. LangChain has a number of built-in document transformers that make it easy to split, combine, filter, and otherwise manipulate documents. - -When you want to deal with long pieces of text, it is necessary to split up that text into chunks. As simple as this sounds, there is a lot of potential complexity here. Ideally, you want to keep the semantically related pieces of text together. What "semantically related" means could depend on the type of text. This notebook showcases several ways to do that. - -At a high level, text splitters work as following: - -1. Split the text up into small, semantically meaningful chunks (often sentences). -2. Start combining these small chunks into a larger chunk until you reach a certain size (as measured by some function). -3. Once you reach that size, make that chunk its own piece of text and then start creating a new chunk of text with some overlap (to keep context between chunks). - -That means there are two different axes along which you can customize your text splitter: - -1. How the text is split -2. How the chunk size is measured - -For specifics on how to use text splitters, see the [relevant how-to guides here](/docs/how_to/#text-splitters). - -### Embedding models - - - -Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text. -By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning. -These natural language search capabilities underpin many types of [context retrieval](/docs/concepts/#retrieval), -where we provide an LLM with the relevant data it needs to effectively respond to a query. - -![](/img/embeddings.png) - -The `Embeddings` class is a class designed for interfacing with text embedding models. There are many different embedding model providers (OpenAI, Cohere, Hugging Face, etc) and local models, and this class is designed to provide a standard interface for all of them. - -The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself). - -For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models). - -### Vector stores {#vectorstore} - - - -One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors, -and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query. -A vector store takes care of storing embedded data and performing vector search for you. - -Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before -similarity search, allowing you more control over returned documents. - -Vectorstores can be converted to the retriever interface by doing: - -```typescript -const vectorstore = new MyVectorStore(); -const retriever = vectorstore.asRetriever(); -``` - -For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vectorstores). - -### Retrievers - - - -A retriever is an interface that returns relevant documents given an unstructured query. -They are more general than a vector store. -A retriever does not need to be able to store documents, only to return (or retrieve) them. -Retrievers can be created from vector stores, but are also broad enough to include [Exa search](/docs/integrations/retrievers/exa/) (web search) and [Amazon Kendra](/docs/integrations/retrievers/kendra-retriever/). - -Retrievers accept a string query as input and return an array of `Document`s as output. - -For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers). - -### Key-value stores - -For some techniques, such as [indexing and retrieval with multiple vectors per document](/docs/how_to/multi_vector/), having some sort of key-value (KV) storage is helpful. - -LangChain includes a [`BaseStore`](https://api.js.langchain.com/classes/langchain_core.stores.BaseStore.html) interface, -which allows for storage of arbitrary data. However, LangChain components that require KV-storage accept a -more specific `BaseStore` instance that stores binary data (referred to as a `ByteStore`), and internally take care of -encoding and decoding data for their specific needs. - -This means that as a user, you only need to think about one type of store rather than different ones for different types of data. - -#### Interface - -All [`BaseStores`](https://api.js.langchain.com/classes/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows -for modifying **multiple** key-value pairs at once: - -- `mget(keys: string[]): Promise<(undefined | Uint8Array)[]>`: get the contents of multiple keys, returning `None` if the key does not exist -- `mset(keyValuePairs: [string, Uint8Array][]): Promise`: set the contents of multiple keys -- `mdelete(keys: string[]): Promise`: delete multiple keys -- `yieldKeys(prefix?: string): AsyncGenerator`: yield all keys in the store, optionally filtering by a prefix - -For key-value store implementations, see [this section](/docs/integrations/stores/). - -### Tools - - - -Tools are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models. -Tools are needed whenever you want a model to control parts of your code or call out to external APIs. - -A tool consists of: - -1. The name of the tool. -2. A description of what the tool does. -3. A JSON schema defining the inputs to the tool. -4. A function. - -When a tool is bound to a model, the name, description and JSON schema are provided as context to the model. - -Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs. -Typical usage may look like the following: - -```ts -// Define a list of tools -const tools = [...]; -const llmWithTools = llm.bindTools([tool]); - -const aiMessage = await llmWithTools.invoke("do xyz..."); -// AIMessage(tool_calls=[ToolCall(...), ...], ...) -``` - -The `AIMessage` returned from the model MAY have `tool_calls` associated with it. -Read [this guide](/docs/concepts/#aimessage) for more information on what the response type may look like. - -Once the tools are chosen, you will usually want to invoke them and then pass the results back to the model so that it can complete whatever task -it's performing. - -There are generally two different ways to invoke the tool and pass back the response: - -#### Invoke with just the arguments - -When you invoke a tool with just the arguments, you will get back the raw tool output (usually a string). -Here's what this looks like: - -```ts -import { ToolMessage } from "@langchain/core/messages"; - -const toolCall = aiMessage.tool_calls[0]; // ToolCall(args={...}, id=..., ...) -const toolOutput = await tool.invoke(toolCall.args); -const toolMessage = new ToolMessage({ - content: toolOutput, - name: toolCall.name, - tool_call_id: toolCall.id, -}); -``` - -Note that the `content` field will generally be passed back to the model. -If you do not want the raw tool response to be passed to the model, but you still want to keep it around, -you can transform the tool output but also pass it as an artifact (read more about [`ToolMessage.artifact` here](/docs/concepts/#toolmessage)) - -```ts -// Same code as above -const responseForModel = someTransformation(response); -const toolMessage = new ToolMessage({ - content: responseForModel, - tool_call_id: toolCall.id, - name: toolCall.name, - artifact: response, -}); -``` - -#### Invoke with `ToolCall` - -The other way to invoke a tool is to call it with the full `ToolCall` that was generated by the model. -When you do this, the tool will return a `ToolMessage`. -The benefits of this are that you don't have to write the logic yourself to transform the tool output into a ToolMessage. -Here's what this looks like: - -```ts -const toolCall = aiMessage.tool_calls[0]; -const toolMessage = await tool.invoke(toolCall); -``` - -If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the `ToolMessage`, you will need to have the tool return a tuple -with two items: the `content` and the `artifact`. -Read more about [defining tools that return artifacts here](/docs/how_to/tool_artifacts/). - -#### Best practices - -When designing tools to be used by a model, it is important to keep in mind that: - -- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models. -- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering. -- Simple, narrowly scoped tools are easier for models to use than complex tools. - -#### Related - -For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools). - -To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/). - -### Toolkits - - - -Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods. - -All Toolkits expose a `getTools` method which returns an array of tools. -You can therefore do: - -```typescript -// Initialize a toolkit -const toolkit = new ExampleTookit(...) - -// Get list of tools -const tools = toolkit.getTools() -``` - -### Agents - -By themselves, language models can't take actions - they just output text. -A big use case for LangChain is creating **agents**. -Agents are systems that use an LLM as a reasoning engineer to determine which actions to take and what the inputs to those actions should be. -The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish. - -[LangGraph](https://github.com/langchain-ai/langgraphjs) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents. -Please check out that [documentation](https://langchain-ai.github.io/langgraphjs/) for a more in depth overview of agent concepts. - -There is a legacy agent concept in LangChain that we are moving towards deprecating: `AgentExecutor`. -AgentExecutor was essentially a runtime for agents. -It was a great place to get started, however, it was not flexible enough as you started to have more customized agents. -In order to solve that we built LangGraph to be this flexible, highly-controllable runtime. - -If you are still using AgentExecutor, do not fear: we still have a guide on [how to use AgentExecutor](/docs/how_to/agent_executor). -It is recommended, however, that you start to transition to [LangGraph](https://github.com/langchain-ai/langgraphjs). -In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent). - -#### ReAct agents - - - -One popular architecture for building agents is [**ReAct**](https://arxiv.org/abs/2210.03629). -ReAct combines reasoning and acting in an iterative process - in fact the name "ReAct" stands for "Reason" and "Act". - -The general flow looks like this: - -- The model will "think" about what step to take in response to an input and any previous observations. -- The model will then choose an action from available tools (or choose to respond to the user). -- The model will generate arguments to that tool. -- The agent runtime (executor) will parse out the chosen tool and call it with the generated arguments. -- The executor will return the results of the tool call back to the model as an observation. -- This process repeats until the agent chooses to respond. - -There are general prompting based implementations that do not require any model-specific features, but the most -reliable implementations use features like [tool calling](/docs/how_to/tool_calling/) to reliably format outputs -and reduce variance. - -Please see the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for more information, -or [this how-to guide](/docs/how_to/migrate_agent/) for specific information on migrating to LangGraph. - -### Callbacks - -LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks. - -You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail. - -#### Callback Events - -| Event | Event Trigger | Associated Method | -| ---------------- | ------------------------------------------- | ---------------------- | -| Chat model start | When a chat model starts | `handleChatModelStart` | -| LLM start | When a llm starts | `handleLLMStart` | -| LLM new token | When an llm OR chat model emits a new token | `handleLLMNewToken` | -| LLM ends | When an llm OR chat model ends | `handleLLMEnd` | -| LLM errors | When an llm OR chat model errors | `handleLLMError` | -| Chain start | When a chain starts running | `handleChainStart` | -| Chain end | When a chain ends | `handleChainEnd` | -| Chain error | When a chain errors | `handleChainError` | -| Tool start | When a tool starts running | `handleToolStart` | -| Tool end | When a tool ends | `handleToolEnd` | -| Tool error | When a tool errors | `handleToolError` | -| Agent action | When an agent takes an action | `handleAgentAction` | -| Agent finish | When an agent ends | `handleAgentEnd` | -| Retriever start | When a retriever starts | `handleRetrieverStart` | -| Retriever end | When a retriever ends | `handleRetrieverEnd` | -| Retriever error | When a retriever errors | `handleRetrieverError` | -| Text | When arbitrary text is run | `handleText` | - -#### Callback handlers - -`CallbackHandlers` are objects that implement the [`CallbackHandler`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) interface, which has a method for each event that can be subscribed to. -The `CallbackManager` will call the appropriate method on each handler when the event is triggered. - -#### Passing callbacks - -The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places: - -- **Request callbacks**: Passed at the time of the request in addition to the input data. - Available on all standard `Runnable` objects. These callbacks are INHERITED by all children - of the object they are defined on. For example, `chain.invoke({foo: "bar"}, {callbacks: [handler]})`. -- **Constructor callbacks**: defined in the constructor, e.g. `new ChatAnthropic({ callbacks: [handler], tags: ["a-tag"] })`. In this case, the callbacks will be used for all calls made on that object, and will be scoped to that object only. - For example, if you initialize a chat model with constructor callbacks, then use it within a chain, the callbacks will only be invoked for calls to that model. - -:::warning -Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children -of the object. -::: - -If you're creating a custom chain or runnable, you need to remember to propagate request time -callbacks to any child objects. - -For specifics on how to use callbacks, see the [relevant how-to guides here](/docs/how_to/#callbacks). - -## Techniques - -### Streaming - - - -Individual LLM calls often run for much longer than traditional resource requests. -This compounds when you build more complex chains or agents that require multiple reasoning steps. - -Fortunately, LLMs generate output iteratively, which means it's possible to show sensible intermediate results -before the final response is ready. Consuming output as soon as it becomes available has therefore become a vital part of the UX -around building apps with LLMs to help alleviate latency issues, and LangChain aims to have first-class support for streaming. - -Below, we'll discuss some concepts and considerations around streaming in LangChain. - -#### `.stream()` - -Most modules in LangChain include the `.stream()` method as an ergonomic streaming interface. -`.stream()` returns an iterator, which you can consume with a [`for await...of`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of) loop. Here's an example with a chat model: - -```ts -import { ChatAnthropic } from "@langchain/anthropic"; -import { concat } from "@langchain/core/utils/stream"; -import type { AIMessageChunk } from "@langchain/core/messages"; - -const model = new ChatAnthropic({ model: "claude-3-sonnet-20240229" }); - -const stream = await model.stream("what color is the sky?"); - -let gathered: AIMessageChunk | undefined = undefined; - -for await (const chunk of stream) { - console.log(chunk); - if (gathered === undefined) { - gathered = chunk; - } else { - gathered = concat(gathered, chunk); - } -} - -console.log(gathered); -``` - -For models (or other components) that don't support streaming natively, this iterator would just yield a single chunk, but -you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode -without the need to provide additional config. - -The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.js.langchain.com/classes/langchain_core.messages.AIMessageChunk.html). -Because this method is part of [LangChain Expression Language](/docs/concepts/#langchain-expression-language), -you can handle formatting differences from different outputs using an [output parser](/docs/concepts/#output-parsers) to transform -each yielded chunk. - -You can check out [this guide](/docs/how_to/streaming/#using-stream) for more detail on how to use `.stream()`. - -#### `.streamEvents()` - - - -While the `.stream()` method is intuitive, it can only return the final generated value of your chain. This is fine for single LLM calls, -but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of -the chain alongside the final output - for example, returning sources alongside the final generation when building a chat -over documents app. - -There are ways to do this [using callbacks](/docs/concepts/#callbacks-1), or by constructing your chain in such a way that it passes intermediate -values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an -`.streamEvents()` method that combines the flexibility of callbacks with the ergonomics of `.stream()`. When called, it returns an iterator -which yields [various types of events](/docs/how_to/streaming/#event-reference) that you can filter and process according -to the needs of your project. - -Here's one small example that prints just events containing streamed chat model output: - -```ts -import { StringOutputParser } from "@langchain/core/output_parsers"; -import { ChatPromptTemplate } from "@langchain/core/prompts"; -import { ChatAnthropic } from "@langchain/anthropic"; - -const model = new ChatAnthropic({ model: "claude-3-sonnet-20240229" }); - -const prompt = ChatPromptTemplate.fromTemplate("tell me a joke about {topic}"); -const parser = new StringOutputParser(); -const chain = prompt.pipe(model).pipe(parser); - -const eventStream = await chain.streamEvents( - { topic: "parrot" }, - { version: "v2" } -); - -for await (const event of eventStream) { - const kind = event.event; - if (kind === "on_chat_model_stream") { - console.log(event); - } -} -``` - -You can roughly think of it as an iterator over callback events (though the format differs) - and you can use it on almost all LangChain components! - -See [this guide](/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.streamEvents()`, -or [this guide](/docs/how_to/callbacks_custom_events) for how to stream custom events from within a chain. - -#### Callbacks - -The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a -callback handler that handles the [`handleLLMNewToken`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html#handleLLMNewToken) event into LangChain components. When that component is invoked, any -[LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls -the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response. -You can also handle the [`handleLLMEnd`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html#handleLLMEnd) event to perform any necessary cleanup. - -You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks. - -Callbacks were the first technique for streaming introduced in LangChain. While powerful and generalizable, -they can be unwieldy for developers. For example: - -- You need to explicitly initialize and manage some aggregator or other stream to collect results. -- The execution order isn't explicitly guaranteed, and you could theoretically have a callback run after the `.invoke()` method finishes. -- Providers would often make you pass an additional parameter to stream outputs instead of returning them all at once. -- You would often ignore the result of the actual model call in favor of callback results. - -#### Tokens - -The unit that most model providers use to measure input and output is via a unit called a **token**. -Tokens are the basic units that language models read and generate when processing or producing text. -The exact definition of a token can vary depending on the specific way the model was trained - -for instance, in English, a token could be a single word like "apple", or a part of a word like "app". - -When you send a model a prompt, the words and characters in the prompt are encoded into tokens using a **tokenizer**. -The model then streams back generated output tokens, which the tokenizer decodes into human-readable text. -The below example shows how OpenAI models tokenize `LangChain is cool!`: - -![](/img/tokenization.png) - -You can see that it gets split into 5 different tokens, and that the boundaries between tokens are not exactly the same as word boundaries. - -The reason language models use tokens rather than something more immediately intuitive like "characters" -has to do with how they process and understand text. At a high-level, language models iteratively predict their next generated output based on -the initial input and their previous generations. Training the model using tokens language models to handle linguistic -units (like words or subwords) that carry meaning, rather than individual characters, which makes it easier for the model -to learn and understand the structure of the language, including grammar and context. -Furthermore, using tokens can also improve efficiency, since the model processes fewer units of text compared to character-level processing. - -### Function/tool calling - -:::info -We use the term tool calling interchangeably with function calling. Although -function calling is sometimes meant to refer to invocations of a single function, -we treat all models as though they can return multiple tool or function calls in -each message. -::: - -Tool calling allows a [chat model](/docs/concepts/#chat-models) to respond to a given prompt by generating output that -matches a user-defined schema. - -While the name implies that the model is performing -some action, this is actually not the case! The model only generates the arguments to a tool, and actually running the tool (or not) is up to the user. -One common example where you **wouldn't** want to call a function with the generated arguments -is if you want to [extract structured output matching some schema](/docs/concepts/#structured-output) -from unstructured text. You would give the model an "extraction" tool that takes -parameters matching the desired schema, then treat the generated output as your final -result. - -![Diagram of a tool call by a chat model](/img/tool_call.png) - -Tool calling is not universal, but is supported by many popular LLM providers, including [Anthropic](/docs/integrations/chat/anthropic/), -[Cohere](/docs/integrations/chat/cohere/), [Google](/docs/integrations/chat/google_vertex_ai/), -[Mistral](/docs/integrations/chat/mistral/), [OpenAI](/docs/integrations/chat/openai/), and even for locally-running models via [Ollama](/docs/integrations/chat/ollama/). - -LangChain provides a standardized interface for tool calling that is consistent across different models. - -The standard interface consists of: - -- `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call. This method accepts [LangChain tools](/docs/concepts/#tools) as well as model-specific formats. -- `AIMessage.tool_calls`: an attribute on the `AIMessage` returned from the model for accessing the tool calls requested by the model. - -#### Tool usage - -After the model calls tools, you can use the tool by invoking it, then passing the arguments back to the model. -LangChain provides the [`Tool`](/docs/concepts/#tools) abstraction to help you handle this. - -The general flow is this: - -1. Generate tool calls with a chat model in response to a query. -2. Invoke the appropriate tools using the generated tool call as arguments. -3. Format the result of the tool invocations as [`ToolMessages`](/docs/concepts/#toolmessage). -4. Pass the entire list of messages back to the model so that it can generate a final answer (or call more tools). - -![Diagram of a complete tool calling flow](/img/tool_calling_flow.png) - -This is how tool calling [agents](/docs/concepts/#agents) perform tasks and answer queries. - -Check out some more focused guides below: - -- [How to use chat models to call tools](/docs/how_to/tool_calling/) -- [How to pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model/) -- [Building an agent with LangGraph](https://langchain-ai.github.io/langgraphjs/tutorials/introduction/) - -### Structured output - -LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide -range of inputs, but for some use-cases, it can be useful to constrain the LLM's output -to a specific format or structure. This is referred to as **structured output**. - -For example, if the output is to be stored in a relational database, -it is much easier if the model generates output that adheres to a defined schema or format. -[Extracting specific information](/docs/tutorials/extraction/) from unstructured text is another -case where this is particularly useful. Most commonly, the output format will be JSON, -though other formats such as [XML](/docs/how_to/output_parser_xml/) can be useful too. Below, we'll discuss -a few ways to get structured output from models in LangChain. - -#### `.withStructuredOutput()` - -For convenience, some LangChain chat models support a [`.withStructuredOutput()`](/docs/how_to/structured_output/#the-.withstructuredoutput-method) method. -This method only requires a schema as input, and returns an object matching the requested schema. -Generally, this method is only present on models that support one of the more advanced methods described below, -and will use one of them under the hood. It takes care of importing a suitable output parser and -formatting the schema in the right format for the model. - -Here's an example: - -```ts -import { z } from "zod"; - -const joke = z.object({ - setup: z.string().describe("The setup of the joke"), - punchline: z.string().describe("The punchline to the joke"), - rating: z.number().optional().describe("How funny the joke is, from 1 to 10"), -}); - -// Can also pass in JSON schema. -// It's also beneficial to pass in an additional "name" parameter to give the -// model more context around the type of output to generate. -const structuredLlm = model.withStructuredOutput(joke); - -await structuredLlm.invoke("Tell me a joke about cats"); -``` - -``` -{ - setup: "Why don't cats play poker in the wild?", - punchline: "Too many cheetahs.", - rating: 7 -} -``` - -We recommend this method as a starting point when working with structured output: - -- It uses other model-specific features under the hood, without the need to import an output parser. -- For the models that use tool calling, no special prompting is needed. -- If multiple underlying techniques are supported, you can supply a `method` parameter to - [toggle which one is used](/docs/how_to/structured_output/#specifying-the-output-method-advanced). - -You may want or need to use other techiniques if: - -- The chat model you are using does not support tool calling. -- You are working with very complex schemas and the model is having trouble generating outputs that conform. - -For more information, check out this [how-to guide](/docs/how_to/structured_output/#the-.withstructuredoutput-method). - -You can also check out [this table](/docs/integrations/chat/) for a list of models that support -`.withStructuredOutput()`. - -#### Raw prompting - -The most intuitive way to get a model to structure output is to ask nicely. -In addition to your query, you can give instructions describing what kind of output you'd like, then -parse the output using an [output parser](/docs/concepts/#output-parsers) to convert the raw -model message or string output into something more easily manipulated. - -The biggest benefit to raw prompting is its flexibility: - -- Raw prompting does not require any special model features, only sufficient reasoning capability to understand - the passed schema. -- You can prompt for any format you'd like, not just JSON. This can be useful if the model you - are using is more heavily trained on a certain type of data, such as XML or YAML. - -However, there are some drawbacks too: - -- LLMs are non-deterministic, and prompting a LLM to consistently output data in the exactly correct format - for smooth parsing can be surprisingly difficult and model-specific. -- Individual models have quirks depending on the data they were trained on, and optimizing prompts can be quite difficult. - Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions, - and still others may prefer XML. - -While features offered by model providers may increase reliability, prompting techniques remain important for tuning your -results no matter which method you choose. - -#### JSON mode - - - -Some models, such as [Mistral](/docs/integrations/chat/mistral/), [OpenAI](/docs/integrations/chat/openai/), -[Together AI](/docs/integrations/chat/togetherai/) and [Ollama](/docs/integrations/chat/ollama/), -support a feature called **JSON mode**, usually enabled via config. - -When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON. -Often they require some custom prompting, but it's usually much less burdensome than completely raw prompting and -more along the lines of, -`"you must always return JSON"`. The [output also is generally easier to parse](/docs/how_to/output_parser_json/). - -It's also generally simpler to use directly and more commonly available than tool calling, and can give -more flexibility around prompting and shaping results than tool calling. - -Here's an example: - -```ts -import { JsonOutputParser } from "@langchain/core/output_parsers"; -import { ChatPromptTemplate } from "@langchain/core/prompts"; -import { ChatOpenAI } from "@langchain/openai"; - -const model = new ChatOpenAI({ - model: "gpt-4o", - modelKwargs: { - response_format: { type: "json_object" }, - }, -}); - -const TEMPLATE = `Answer the user's question to the best of your ability. -You must always output a JSON object with an "answer" key and a "followup_question" key. - -{question}`; - -const prompt = ChatPromptTemplate.fromTemplate(TEMPLATE); - -const chain = prompt.pipe(model).pipe(new JsonOutputParser()); - -await chain.invoke({ question: "What is the powerhouse of the cell?" }); -``` - -``` -{ - answer: "The powerhouse of the cell is the mitochondrion.", - followup_question: "Would you like to learn more about the functions of mitochondria?" -} -``` - -For a full list of model providers that support JSON mode, see [this table](/docs/integrations/chat/). - -#### Tool calling {#structured-output-tool-calling} - -For models that support it, [tool calling](/docs/concepts/#functiontool-calling) can be very convenient for structured output. It removes the -guesswork around how best to prompt schemas in favor of a built-in model feature. - -It works by first binding the desired schema either directly or via a [LangChain tool](/docs/concepts/#tools) to a -[chat model](/docs/concepts/#chat-models) using the `.bind_tools()` method. The model will then generate an `AIMessage` containing -a `tool_calls` field containing `args` that match the desired shape. - -There are several acceptable formats you can use to bind tools to a model in LangChain. Here's one example using [Zod](https://zod.dev): - -```ts -import { z } from "zod"; -import { zodToJsonSchema } from "zod-to-json-schema"; -import { ChatOpenAI } from "@langchain/openai"; - -const toolSchema = z.object({ - answer: z.string().describe("The answer to the user's question"), - followup_question: z - .string() - .describe("A followup question the user could ask"), -}); - -const model = new ChatOpenAI({ - model: "gpt-4o", - temperature: 0, -}); - -const modelWithTools = model.bindTools([ - { - type: "function", - function: { - name: "response_formatter", - description: - "Always use this tool to structure your response to the user.", - parameters: zodToJsonSchema(toolSchema), - }, - }, -]); - -const aiMessage = await modelWithTools.invoke( - "What is the powerhouse of the cell?" -); - -aiMessage.tool_calls?.[0].args; -``` - -``` -{ - answer: 'The powerhouse of the cell is the mitochondrion.', - followup_question: 'What is the main function of the mitochondrion in the cell?' -} -``` - -Tool calling is a generally consistent way to get a model to generate structured output, and is the default technique -used for the [`.withStructuredOutput()`](/docs/concepts/#withstructuredoutput) method when a model supports it. - -The following how-to guides are good practical resources for using function/tool calling for structured output: - -- [How to return structured data from an LLM](/docs/how_to/structured_output/) -- [How to use a model to call tools](/docs/how_to/tool_calling) - -### Few-shot prompting - -One of the most effective ways to improve model performance is to give a model examples of what you want it to do. The technique of adding example inputs and expected outputs to a model prompt is known as "few-shot prompting". There are a few things to think about when doing few-shot prompting: - -1. How are examples generated? -2. How many examples are in each prompt? -3. How are examples selected at runtime? -4. How are examples formatted in the prompt? - -Here are the considerations for each. - -#### 1. Generating examples - -The first and most important step of few-shot prompting is coming up with a good dataset of examples. -Good examples should be relevant at runtime, clear, informative, and provide information that was not already known to the model. - -At a high-level, the basic ways to generate examples are: - -- Manual: a person/people generates examples they think are useful. -- Better model: a better (presumably more expensive/slower) model's responses are used as examples for a worse (presumably cheaper/faster) model. -- User feedback: users (or labelers) leave feedback on interactions with the application and examples are generated based on that feedback (for example, all interactions with positive feedback could be turned into examples). -- LLM feedback: same as user feedback but the process is automated by having models evaluate themselves. - -Which approach is best depends on your task. For tasks where a small number core principles need to be understood really well, it can be valuable hand-craft a few really good examples. -For tasks where the space of correct behaviors is broader and more nuanced, it can be useful to generate many examples in a more automated fashion so that there's a higher likelihood of there being some highly relevant examples for any runtime input. - -**Single-turn v.s. multi-turn examples** - -Another dimension to think about when generating examples is what the example is actually showing. - -The simplest types of examples just have a user input and an expected model output. These are single-turn examples. - -One more complex type if example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer. -This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where its useful to show common errors and spell out exactly why they're wrong and what should be done instead. - -#### 2. Number of examples - -Once we have a dataset of examples, we need to think about how many examples should be in each prompt. -The key tradeoff is that more examples generally improve performance, but larger prompts increase costs and latency. -And beyond some threshold having too many examples can start to confuse the model. -Finding the right number of examples is highly dependent on the model, the task, the quality of the examples, and your cost and latency constraints. -Anecdotally, the better the model is the fewer examples it needs to perform well and the more quickly you hit steeply diminishing returns on adding more examples. -But, the best/only way to reliably answer this question is to run some experiments with different numbers of examples. - -#### 3. Selecting examples - -Assuming we are not adding our entire example dataset into each prompt, we need to have a way of selecting examples from our dataset based on a given input. We can do this: - -- Randomly -- By (semantic or keyword-based) similarity of the inputs -- Based on some other constraints, like token size - -LangChain has a number of [`ExampleSelectors`](/docs/concepts/#example-selectors) which make it easy to use any of these techniques. - -Generally, selecting by semantic similarity leads to the best model performance. But how important this is is again model and task specific, and is something worth experimenting with. - -#### 4. Formatting examples - -Most state-of-the-art models these days are chat models, so we'll focus on formatting examples for those. Our basic options are to insert the examples: - -- In the system prompt as a string -- As their own messages - -If we insert our examples into the system prompt as a string, we'll need to make sure it's clear to the model where each example begins and which parts are the input versus output. Different models respond better to different syntaxes, like [ChatML](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chat-markup-language), XML, TypeScript, etc. - -If we insert our examples as messages, where each example is represented as a sequence of Human, AI messages, we might want to also assign [names](/docs/concepts/#messages) to our messages like `"exampleUser"` and `"exampleAssistant"` to make it clear that these messages correspond to different actors than the latest input message. - -**Formatting tool call examples** - -One area where formatting examples as messages can be tricky is when our example outputs have tool calls. This is because different models have different constraints on what types of message sequences are allowed when any tool calls are generated. - -- Some models require that any AIMessage with tool calls be immediately followed by ToolMessages for every tool call, -- Some models additionally require that any ToolMessages be immediately followed by an AIMessage before the next HumanMessage, -- Some models require that tools are passed in to the model if there are any tool calls / ToolMessages in the chat history. - -These requirements are model-specific and should be checked for the model you are using. If your model requires ToolMessages after tool calls and/or AIMessages after ToolMessages and your examples only include expected tool calls and not the actual tool outputs, you can try adding dummy ToolMessages / AIMessages to the end of each example with generic contents to satisfy the API constraints. - -In these cases it's especially worth experimenting with inserting your examples as strings versus messages, as having dummy messages can adversely affect certain models. - -You can see a case study of how Anthropic and OpenAI respond to different few-shot prompting techniques on two different tool calling benchmarks [here](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance/). - -### Retrieval - -LLMs are trained on a large but fixed dataset, limiting their ability to reason over private or recent information. Fine-tuning an LLM with specific facts is one way to mitigate this, but is often [poorly suited for factual recall](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) and [can be costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise). -Retrieval is the process of providing relevant information to an LLM to improve its response for a given input. Retrieval augmented generation (RAG) is the process of grounding the LLM generation (output) using the retrieved information. - -:::tip - -- See our RAG from Scratch [video series](https://youtube.com/playlist?list=PLfaIDFEXuae2LXbO1_PKyVJiQ23ZztA0x&feature=shared). - The code examples are in Python but is useful for a general overview of RAG concepts for visual learners. -- For a high-level guide on retrieval, see this [tutorial on RAG](/docs/tutorials/rag/). - -::: - -RAG is only as good as the retrieved documents’ relevance and quality. Fortunately, an emerging set of techniques can be employed to design and improve RAG systems. We've focused on taxonomizing and summarizing many of these techniques (see below figure) and will share some high-level strategic guidance in the following sections. -You can and should experiment with using different pieces together. You might also find [this LangSmith guide](https://docs.smith.langchain.com/how_to_guides/evaluation/evaluate_llm_application) useful for showing how to evaluate different iterations of your app. - -![](/img/rag_landscape.png) - -#### Query Translation - -First, consider the user input(s) to your RAG system. Ideally, a RAG system can handle a wide range of inputs, from poorly worded questions to complex multi-part queries. -**Using an LLM to review and optionally modify the input is the central idea behind query translation.** This serves as a general buffer, optimizing raw user inputs for your retrieval system. -For example, this can be as simple as extracting keywords or as complex as generating multiple sub-questions for a complex query. - -| Name | When to use | Description | -| --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Multi-query](/docs/how_to/multiple_queries/) | When you need to cover multiple perspectives of a question. | Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, return the unique documents for all queries. | -| [Decomposition (Python cookbook)](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems. | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). | -| [Step-back (Python cookbook)](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a higher-level conceptual understanding is required. | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. | -| [HyDE (Python cookbook)](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | If you have challenges retrieving relevant documents using the raw user inputs. | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. | - -:::tip - -See our Python RAG from Scratch videos for a few different specific approaches: - -- [Multi-query](https://youtu.be/JChPi0CRnDY?feature=shared) -- [Decomposition](https://youtu.be/h0OPWlEOank?feature=shared) -- [Step-back](https://youtu.be/xn1jEjRyJ2U?feature=shared) -- [HyDE](https://youtu.be/SaDzIVkYqyY?feature=shared) - -::: - -#### Routing - -Second, consider the data sources available to your RAG system. You want to query across more than one database or across structured and unstructured data sources. **Using an LLM to review the input and route it to the appropriate data source is a simple and effective approach for querying across sources.** - -| Name | When to use | Description | -| ------------------------------------------------------------------------ | ----------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | -| [Logical routing](/docs/how_to/routing/) | When you can prompt an LLM with rules to decide where to route the input. | Logical routing can use an LLM to reason about the query and choose which datastore is most appropriate. | -| [Semantic routing](/docs/how_to/routing/#routing-by-semantic-similarity) | When semantic similarity is an effective way to determine where to route the input. | Semantic routing embeds both query and, typically a set of prompts. It then chooses the appropriate prompt based upon similarity. | - -:::tip - -See our Python RAG from Scratch video on [routing](https://youtu.be/pfpIndq7Fi8?feature=shared). - -::: - -#### Query Construction - -Third, consider whether any of your data sources require specific query formats. Many structured databases use SQL. Vector stores often have specific syntax for applying keyword filters to document metadata. **Using an LLM to convert a natural language query into a query syntax is a popular and powerful approach.** -In particular, [text-to-SQL](/docs/tutorials/sql_qa/), [text-to-Cypher](/docs/tutorials/graph/), and [query analysis for metadata filters](/docs/tutorials/query_analysis/#query-analysis) are useful ways to interact with structured, graph, and vector databases respectively. - -| Name | When to Use | Description | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Text to SQL](/docs/tutorials/sql_qa/) | If users are asking questions that require information housed in a relational database, accessible via SQL. | This uses an LLM to transform user input into a SQL query. | -| [Text-to-Cypher](/docs/tutorials/graph/) | If users are asking questions that require information housed in a graph database, accessible via Cypher. | This uses an LLM to transform user input into a Cypher query. | -| [Self Query](/docs/how_to/self_query/) | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). | - -:::tip - -See our [blog post overview](https://blog.langchain.dev/query-construction/) and RAG from Scratch video on [query construction](https://youtu.be/kl6NwWYxvbM?feature=shared), the process of text-to-DSL where DSL is a domain specific language required to interact with a given database. This converts user questions into structured queries. - -::: - -#### Indexing - -Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models). - -Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens. - -Two approaches can address this tension: (1) [Multi Vector](/docs/how_to/multi_vector/) retriever using an LLM to translate documents into any form (e.g., often into a summary) that is well-suited for indexing, but returns full documents to the LLM for generation. (2) [ParentDocument](/docs/how_to/parent_document_retriever/) retriever embeds document chunks, but also returns full documents. The idea is to get the best of both worlds: use concise representations (summaries or chunks) for retrieval, but use the full documents for answer generation. - -| Name | Index Type | Uses an LLM | When to Use | Description | -| --------------------------------------------------------------------- | ----------------------------- | ------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| [Vector store](/docs/how_to/vectorstore_retriever/) | Vector store | No | If you are just getting started and looking for something quick and easy. | This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. | -| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vector store + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). | -| [Multi Vector](/docs/how_to/multi_vector/) | Vector store + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. | -| [Time-Weighted Vector store](/docs/how_to/time_weighted_vectorstore/) | Vector store | No | If you have timestamps associated with your documents, and you want to retrieve the most recent ones | This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) | - -:::tip - -- See our Python RAG from Scratch video on [indexing fundamentals](https://youtu.be/bjb_EMsTDKI?feature=shared) -- See our Python RAG from Scratch video on [multi vector retriever](https://youtu.be/gTCU9I6QqCE?feature=shared) - -::: - -Fifth, consider ways to improve the quality of your similarity search itself. Embedding models compress text into fixed-length (vector) representations that capture the semantic content of the document. This compression is useful for search / retrieval, but puts a heavy burden on that single vector representation to capture the semantic nuance / detail of the document. In some cases, irrelevant or redundant content can dilute the semantic usefulness of the embedding. - -There are some additional tricks to improve the quality of your retrieval. Embeddings excel at capturing semantic information, but may struggle with keyword-based queries. Many [vector stores](docs/integrations/retrievers/supabase-hybrid/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity, which marries the benefits of both approaches. Furthermore, many vector stores have [maximal marginal relevance](https://api.js.langchain.com/interfaces/langchain_core.vectorstores.VectorStoreInterface.html#maxMarginalRelevanceSearch), which attempts to diversify the results of a search to avoid returning similar and redundant documents. - -| Name | When to use | Description | -| ------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -| [Hybrid search](/docs/integrations/retrievers/supabase-hybrid/) | When combining keyword-based and semantic similarity. | Hybrid search combines keyword and semantic similarity, marrying the benefits of both approaches. | -| [Maximal Marginal Relevance (MMR)](/docs/integrations/vectorstores/mongodb_atlas/#maximal-marginal-relevance) | When needing to diversify search results. | MMR attempts to diversify the results of a search to avoid returning similar and redundant documents. | - -#### Post-processing - -Sixth, consider ways to filter or rank retrieved documents. This is very useful if you are [combining documents returned from multiple sources](/docs/how_to/ensemble_retriever), since it can can down-rank less relevant documents and / or [compress similar documents](/docs/how_to/contextual_compression/#more-built-in-compressors-filters). - -| Name | Index Type | Uses an LLM | When to Use | Description | -| -------------------------------------------------------------------- | ---------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Contextual Compression](/docs/how_to/contextual_compression/) | Any | Sometimes | If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. | This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. | -| [Ensemble](/docs/how_to/ensemble_retriever/) | Any | No | If you have multiple retrieval methods and want to try combining them. | This fetches documents from multiple retrievers and then combines them. | -| [Re-ranking](/docs/integrations/document_compressors/cohere_rerank/) | Any | Yes | If you want to rank retrieved documents based upon relevance, especially if you want to combine results from multiple retrieval methods. | Given a query and a list of documents, Rerank indexes the documents from most to least semantically relevant to the query. | - -:::tip - -See our Python RAG from Scratch video on [RAG-Fusion](https://youtu.be/77qELPbNgxA?feature=shared), on approach for post-processing across multiple queries: Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, and combine the ranks of multiple search result lists to produce a single, unified ranking with [Reciprocal Rank Fusion (RRF)](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1). - -::: - -#### Generation - -**Finally, consider ways to build self-correction into your RAG system.** RAG systems can suffer from low quality retrieval (e.g., if a user question is out of the domain for the index) and / or hallucinations in generation. A naive retrieve-generate pipeline has no ability to detect or self-correct from these kinds of errors. The concept of ["flow engineering"](https://x.com/karpathy/status/1748043513156272416) has been introduced [in the context of code generation](https://arxiv.org/abs/2401.08500): iteratively build an answer to a code question with unit tests to check and self-correct errors. Several works have applied this RAG, such as Self-RAG and Corrective-RAG. In both cases, checks for document relevance, hallucinations, and / or answer quality are performed in the RAG answer generation flow. - -We've found that graphs are a great way to reliably express logical flows and have implemented ideas from several of these papers [using LangGraph](https://github.com/langchain-ai/langgraphjs/tree/main/examples/rag), as shown in the figure below (red - routing, blue - fallback, green - self-correction): - -- **Routing:** Adaptive RAG ([paper](https://arxiv.org/abs/2403.14403)). Route questions to different retrieval approaches, as discussed above -- **Fallback:** Corrective RAG ([paper](https://arxiv.org/pdf/2401.15884.pdf)). Fallback to web search if docs are not relevant to query -- **Self-correction:** Self-RAG ([paper](https://arxiv.org/abs/2310.11511)). Fix answers w/ hallucinations or don’t address question - -![](/img/langgraph_rag.png) - -| Name | When to use | Description | -| -------------- | ---------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Self-RAG | When needing to fix answers with hallucinations or irrelevant content. | Self-RAG performs checks for document relevance, hallucinations, and answer quality during the RAG answer generation flow, iteratively building an answer and self-correcting errors. | -| Corrective-RAG | When needing a fallback mechanism for low relevance docs. | Corrective-RAG includes a fallback (e.g., to web search) if the retrieved documents are not relevant to the query, ensuring higher quality and more relevant retrieval. | - -:::tip - -See several videos and cookbooks showcasing RAG with LangGraph: - -- [LangGraph Corrective RAG](https://www.youtube.com/watch?v=E2shqsYwxck) -- [LangGraph combining Adaptive, Self-RAG, and Corrective RAG](https://www.youtube.com/watch?v=-ROS6gfYIts) -- [Cookbooks for RAG using LangGraph.js](https://github.com/langchain-ai/langgraphjs/tree/main/examples/rag) - -::: - -### Text splitting - -LangChain offers many different types of `text splitters`. -These are available in the main `langchain` package, but can be used separately in the [`@langchain/textsplitters`](https://www.npmjs.com/package/@langchain/textsplitters) package. - -Table columns: - -- **Name**: Name of the text splitter -- **Classes**: Classes that implement this text splitter -- **Splits On**: How this text splitter splits text -- **Adds Metadata**: Whether or not this text splitter adds metadata about where each chunk came from. -- **Description**: Description of the splitter, including recommendation on when to use it. - -| Name | Classes | Splits On | Adds Metadata | Description | -| --------- | ----------------------------------------------------------------------- | ------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Recursive | [RecursiveCharacterTextSplitter](/docs/how_to/recursive_text_splitter/) | A list of user defined characters | | Recursively splits text. This splitting is trying to keep related pieces of text next to each other. This is the `recommended way` to start splitting text. | -| Code | [many languages](/docs/how_to/code_splitter/) | Code (Python, JS) specific characters | | Splits text based on characters specific to coding languages. 15 different languages are available to choose from. | -| Token | [many classes](/docs/how_to/split_by_token/) | Tokens | | Splits text on tokens. There exist a few different ways to measure tokens. | -| Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. | - -### Evaluation - - - -Evaluation is the process of assessing the performance and effectiveness of your LLM-powered applications. -It involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose. -This process is vital for building reliable applications. - -![](/img/langsmith_evaluate.png) - -[LangSmith](https://docs.smith.langchain.com/) helps with this process in a few ways: - -- It makes it easier to create and curate datasets via its tracing and annotation features -- It provides an evaluation framework that helps you define metrics and run your app against your dataset -- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code - -To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation). - -### Tracing - - - -A trace is essentially a series of steps that your application takes to go from input to output. -Traces contain individual steps called `runs`. These can be individual calls from a model, retriever, -tool, or sub-chains. -Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues. - -For a deeper dive, check out [this LangSmith conceptual guide](https://docs.smith.langchain.com/concepts/tracing). - -### Generative UI - -LangChain.js provides a few templates and examples showing off generative UI, -and other ways of streaming data from the server to the client, specifically in React/Next.js. - -You can find the template for generative UI in the official [LangChain.js Next.js template](https://github.com/langchain-ai/langchain-nextjs-template/blob/main/app/generative_ui/README.md). - -For streaming agentic responses and intermediate steps, you can find the [template and documentation here](https://github.com/langchain-ai/langchain-nextjs-template/blob/main/app/ai_sdk/agent/README.md). - -And finally, streaming tool calls and structured output can be found [here](https://github.com/langchain-ai/langchain-nextjs-template/blob/main/app/ai_sdk/tools/README.md). diff --git a/docs/core_docs/docs/concepts/agents.mdx b/docs/core_docs/docs/concepts/agents.mdx new file mode 100644 index 000000000000..770bca57ff63 --- /dev/null +++ b/docs/core_docs/docs/concepts/agents.mdx @@ -0,0 +1,24 @@ +# Agents + +By themselves, language models can't take actions - they just output text. Agents are systems that take a high-level task and use an LLM as a reasoning engine to decide what actions to take and execute those actions. + +[LangGraph](/docs/concepts/architecture#langgraph) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents. We recommend that you use LangGraph for building agents. + +Please see the following resources for more information: + +- LangGraph docs on [common agent architectures](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/) +- [Pre-built agents in LangGraph](https://langchain-ai.github.io/langgraphjs/reference/functions/langgraph_prebuilt.createReactAgent.html) + +## Legacy agent concept: AgentExecutor + +LangChain previously introduced the `AgentExecutor` as a runtime for agents. +While it served as an excellent starting point, its limitations became apparent when dealing with more sophisticated and customized agents. +As a result, we're gradually phasing out `AgentExecutor` in favor of more flexible solutions in LangGraph. + +### Transitioning from AgentExecutor to langgraph + +If you're currently using `AgentExecutor`, don't worry! We've prepared resources to help you: + +1. For those who still need to use `AgentExecutor`, we offer a comprehensive guide on [how to use AgentExecutor](/docs/how_to/agent_executor). + +2. However, we strongly recommend transitioning to LangGraph for improved flexibility and control. To facilitate this transition, we've created a detailed [migration guide](/docs/how_to/migrate_agent) to help you move from `AgentExecutor` to LangGraph seamlessly. diff --git a/docs/core_docs/docs/concepts/architecture.mdx b/docs/core_docs/docs/concepts/architecture.mdx new file mode 100644 index 000000000000..85465bcf5bde --- /dev/null +++ b/docs/core_docs/docs/concepts/architecture.mdx @@ -0,0 +1,64 @@ +import ThemedImage from "@theme/ThemedImage"; +import useBaseUrl from "@docusaurus/useBaseUrl"; + +# Architecture + +LangChain is a framework that consists of a number of packages. + + + +## @langchain/core + +This package contains base abstractions for different components and ways to compose them together. +The interfaces for core components like chat models, vector stores, tools and more are defined here. +No third-party integrations are defined here. +The dependencies are very lightweight. + +## langchain + +The main `langchain` package contains chains and retrieval strategies that make up an application's cognitive architecture. +These are NOT third-party integrations. +All chains, agents, and retrieval strategies here are NOT specific to any one integration, but rather generic across all integrations. + +## Integration packages + +Popular integrations have their own packages (e.g. `@langchain/openai`, `@langchain/anthropic`, etc) so that they can be properly versioned and appropriately lightweight. + +For more information see: + +- A list [integrations packages](/docs/integrations/platforms/) +- The [API Reference](https://api.js.langchain.com/) where you can find detailed information about each of the integration package. + +## @langchain/community + +This package contains third-party integrations that are maintained by the LangChain community. +Key integration packages are separated out (see above). +This contains integrations for various components (chat models, vector stores, tools, etc). +All dependencies in this package are optional to keep the package as lightweight as possible. + +## @langchian/langgraph + +`@langchian/langgraph` is an extension of `langchain` aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. + +LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows. + +:::info[Further reading] + +- See our LangGraph overview [here](https://langchain-ai.github.io/langgraphjs/concepts/high_level/#core-principles). +- See our LangGraph Academy Course [here](https://academy.langchain.com/courses/intro-to-langgraph). + +::: + +## LangSmith + +A developer platform that lets you debug, test, evaluate, and monitor LLM applications. + +For more information, see the [LangSmith documentation](https://docs.smith.langchain.com) diff --git a/docs/core_docs/docs/concepts/callbacks.mdx b/docs/core_docs/docs/concepts/callbacks.mdx new file mode 100644 index 000000000000..777a3c804f52 --- /dev/null +++ b/docs/core_docs/docs/concepts/callbacks.mdx @@ -0,0 +1,59 @@ +# Callbacks + +:::note Prerequisites + +- [Runnable interface](/docs/concepts/runnables) + +::: + +LangChain provides a callback system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks. + +You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail. + +## Callback events + +| Event | Event Trigger | Associated Method | +| ---------------- | ------------------------------------------- | ---------------------- | +| Chat model start | When a chat model starts | `handleChatModelStart` | +| LLM start | When a llm starts | `handleLlmStart` | +| LLM new token | When an llm OR chat model emits a new token | `handleLlmNewToken` | +| LLM ends | When an llm OR chat model ends | `handleLlmEnd` | +| LLM errors | When an llm OR chat model errors | `handleLlmError` | +| Chain start | When a chain starts running | `handleChainStart` | +| Chain end | When a chain ends | `handleChainEnd` | +| Chain error | When a chain errors | `handleChainError` | +| Tool start | When a tool starts running | `handleToolStart` | +| Tool end | When a tool ends | `handleToolEnd` | +| Tool error | When a tool errors | `handleToolError` | +| Retriever start | When a retriever starts | `handleRetrieverStart` | +| Retriever end | When a retriever ends | `handleRetrieverEnd` | +| Retriever error | When a retriever errors | `handleRetrieverError` | + +## Callback handlers + +- Callback handlers implement the [BaseCallbackHandler](https://api.js.langchain.com/classes/_langchain_core.callbacks_base.BaseCallbackHandler.html) interface. + +During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://api.js.langchain.com/classes/_langchain_core.callbacks_manager.BaseCallbackManager.html)) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered. + +## Passing callbacks + +The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places: + +- **Request time callbacks**: Passed at the time of the request in addition to the input data. + Available on all standard `Runnable` objects. These callbacks are INHERITED by all children + of the object they are defined on. For example, `await chain.invoke({ number: 25 }, { callbacks: [handler] })`. +- **Constructor callbacks**: `const chain = new TheNameOfSomeChain({ callbacks: [handler] })`. These callbacks + are passed as arguments to the constructor of the object. The callbacks are scoped + only to the object they are defined on, and are **not** inherited by any children of the object. + +:::warning + +Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children +of the object. + +::: + +If you're creating a custom chain or runnable, you need to remember to propagate request time +callbacks to any child objects. + +For specifics on how to use callbacks, see the [relevant how-to guides here](/docs/how_to/#callbacks). diff --git a/docs/core_docs/docs/concepts/chat_history.mdx b/docs/core_docs/docs/concepts/chat_history.mdx new file mode 100644 index 000000000000..dd3b1adef75b --- /dev/null +++ b/docs/core_docs/docs/concepts/chat_history.mdx @@ -0,0 +1,49 @@ +# Chat history + +:::info Prerequisites + +- [Messages](/docs/concepts/messages) +- [Chat models](/docs/concepts/chat_models) +- [Tool calling](/docs/concepts/tool_calling) + +::: + +Chat history is a record of the conversation between the user and the chat model. It is used to maintain context and state throughout the conversation. The chat history is sequence of [messages](/docs/concepts/messages), each of which is associated with a specific [role](/docs/concepts/messages#role), such as "user", "assistant", "system", or "tool". + +## Conversation patterns + +![Conversation patterns](/img/conversation_patterns.png) + +Most conversations start with a **system message** that sets the context for the conversation. This is followed by a **user message** containing the user's input, and then an **assistant message** containing the model's response. + +The **assistant** may respond directly to the user or if configured with tools request that a [tool](/docs/concepts/tool_calling) be invoked to perform a specific task. + +So a full conversation often involves a combination of two patterns of alternating messages: + +1. The **user** and the **assistant** representing a back-and-forth conversation. +2. The **assistant** and **tool messages** representing an ["agentic" workflow](/docs/concepts/agents) where the assistant is invoking tools to perform specific tasks. + +## Managing chat history + +Since chat models have a maximum limit on input size, it's important to manage chat history and trim it as needed to avoid exceeding the [context window](/docs/concepts/chat_models#context_window). + +While processing chat history, it's essential to preserve a correct conversation structure. + +Key guidelines for managing chat history: + +- The conversation should follow one of these structures: + - The first message is either a "user" message or a "system" message, followed by a "user" and then an "assistant" message. + - The last message should be either a "user" message or a "tool" message containing the result of a tool call. +- When using [tool calling](/docs/concepts/tool_calling), a "tool" message should only follow an "assistant" message that requested the tool invocation. + +:::tip + +Understanding correct conversation structure is essential for being able to properly implement +[memory](https://langchain-ai.github.io/langgraphjs/concepts/memory/) in chat models. + +::: + +## Related resources + +- [How to trim messages](/docs/how_to/trim_messages/) +- [Memory guide](https://langchain-ai.github.io/langgraphjs/concepts/memory/) for information on implementing short-term and long-term memory in chat models using [LangGraph](https://langchain-ai.github.io/langgraphjs/). diff --git a/docs/core_docs/docs/concepts/chat_models.mdx b/docs/core_docs/docs/concepts/chat_models.mdx new file mode 100644 index 000000000000..62ade12dad08 --- /dev/null +++ b/docs/core_docs/docs/concepts/chat_models.mdx @@ -0,0 +1,153 @@ +# Chat models + +## Overview + +Large Language Models (LLMs) are advanced machine learning models that excel in a wide range of language-related tasks such as text generation, translation, summarization, question answering, and more, without needing task-specific tuning for every scenario. + +Modern LLMs are typically accessed through a chat model interface that takes a list of [messages](/docs/concepts/messages) as input and returns a [message](/docs/concepts/messages) as output. + +The newest generation of chat models offer additional capabilities: + +- [Tool calling](/docs/concepts/tool_calling): Many popular chat models offer a native [tool calling](/docs/concepts/tool_calling) API. This API allows developers to build rich applications that enable AI to interact with external services, APIs, and databases. Tool calling can also be used to extract structured information from unstructured data and perform various other tasks. +- [Structured output](/docs/concepts/structured_outputs): A technique to make a chat model respond in a structured format, such as JSON that matches a given schema. +- [Multimodality](/docs/concepts/multimodality): The ability to work with data other than text; for example, images, audio, and video. + +## Features + +LangChain provides a consistent interface for working with chat models from different providers while offering additional features for monitoring, debugging, and optimizing the performance of applications that use LLMs. + +- Integrations with many chat model providers (e.g., Anthropic, OpenAI, Ollama, Microsoft Azure, Google Vertex, Amazon Bedrock, Hugging Face, Cohere, Groq). Please see [chat model integrations](/docs/integrations/chat/) for an up-to-date list of supported models. +- Use either LangChain's [messages](/docs/concepts/messages) format or OpenAI format. +- Standard [tool calling API](/docs/concepts/tool_calling): standard interface for binding tools to models, accessing tool call requests made by models, and sending tool results back to the model. +- Standard API for structuring outputs (/docs/concepts/structured_outputs) via the `withStructuredOutput` method. +- Integration with [LangSmith](https://docs.smith.langchain.com) for monitoring and debugging production-grade applications based on LLMs. +- Additional features like standardized [token usage](/docs/concepts/messages#token_usage), [rate limiting](#rate-limiting), [caching](#cache) and more. + +## Integrations + +LangChain has many chat model integrations that allow you to use a wide variety of models from different providers. + +These integrations are one of two types: + +1. **Official models**: These are models that are officially supported by LangChain and/or model provider. You can find these models in the `@langchain/` packages. +2. **Community models**: There are models that are mostly contributed and supported by the community. You can find these models in the `@langchain/community` package. + +LangChain chat models are named with a convention that prefixes "Chat" to their class names (e.g., `ChatOllama`, `ChatAnthropic`, `ChatOpenAI`, etc.). + +Please review the [chat model integrations](/docs/integrations/chat/) for a list of supported models. + +:::note +Models that do **not** include the prefix "Chat" in their name or include "LLM" as a suffix in their name typically refer to older models that do not follow the chat model interface and instead use an interface that takes a string as input and returns a string as output. +::: + +## Interface + +LangChain chat models implement the [BaseChatModel](https://api.js.langchain.com/classes/_langchain_core.language_models_chat_models.BaseChatModel.html) interface. Because BaseChatModel also implements the [Runnable Interface](/docs/concepts/runnables), chat models support a [standard streaming interface](/docs/concepts/streaming), optimized [batching](/docs/concepts/runnables#batch), and more. Please see the [Runnable Interface](/docs/concepts/runnables) for more details. + +Many of the key methods of chat models operate on [messages](/docs/concepts/messages) as input and return messages as output. + +Chat models offer a standard set of parameters that can be used to configure the model. These parameters are typically used to control the behavior of the model, such as the temperature of the output, the maximum number of tokens in the response, and the maximum time to wait for a response. Please see the [standard parameters](#standard-parameters) section for more details. + +:::note +In documentation, we will often use the terms "LLM" and "Chat Model" interchangeably. This is because most modern LLMs are exposed to users via a chat model interface. + +However, LangChain also has implementations of older LLMs that do not follow the chat model interface and instead use an interface that takes a string as input and returns a string as output. These models are typically named without the "Chat" prefix (e.g., `Ollama`, `Anthropic`, `OpenAI`, etc.). +These models implement the [BaseLLM](https://api.js.langchain.com/classes/_langchain_core.language_models_llms.BaseLLM.html) interface and may be named with the "LLM" suffix (e.g., `OpenAILLM`, etc.). Generally, users should not use these models. +::: + +### Key methods + +The key methods of a chat model are: + +1. **invoke**: The primary method for interacting with a chat model. It takes a list of [messages](/docs/concepts/messages) as input and returns a list of messages as output. +2. **stream**: A method that allows you to stream the output of a chat model as it is generated. +3. **batch**: A method that allows you to batch multiple requests to a chat model together for more efficient processing. +4. **bindTools**: A method that allows you to bind a tool to a chat model for use in the model's execution context. +5. **withStructuredOutput**: A wrapper around the `invoke` method for models that natively support [structured output](/docs/concepts/structured_outputs). + +Other important methods can be found in the [BaseChatModel API Reference](https://api.js.langchain.com/classes/_langchain_core.language_models_chat_models.BaseChatModel.html). + +### Inputs and outputs + +Modern LLMs are typically accessed through a chat model interface that takes [messages](/docs/concepts/messages) as input and returns [messages](/docs/concepts/messages) as output. Messages are typically associated with a role (e.g., "system", "human", "assistant") and one or more content blocks that contain text or potentially multimodal data (e.g., images, audio, video). + +LangChain supports two message formats to interact with chat models: + +1. **LangChain Message Format**: LangChain's own message format, which is used by default and is used internally by LangChain. +2. **OpenAI's Message Format**: OpenAI's message format. + +### Standard parameters + +Many chat models have standardized parameters that can be used to configure the model: + +| Parameter | Description | +| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | The name or identifier of the specific AI model you want to use (e.g., `"gpt-3.5-turbo"` or `"gpt-4"`). | +| `temperature` | Controls the randomness of the model's output. A higher value (e.g., 1.0) makes responses more creative, while a lower value (e.g., 0.1) makes them more deterministic and focused. | +| `timeout` | The maximum time (in seconds) to wait for a response from the model before canceling the request. Ensures the request doesn’t hang indefinitely. | +| `maxTokens` | Limits the total number of tokens (words and punctuation) in the response. This controls how long the output can be. | +| `stop` | Specifies stop sequences that indicate when the model should stop generating tokens. For example, you might use specific strings to signal the end of a response. | +| `maxRetries` | The maximum number of attempts the system will make to resend a request if it fails due to issues like network timeouts or rate limits. | +| `apiKey` | The API key required for authenticating with the model provider. This is usually issued when you sign up for access to the model. | +| `baseUrl` | The URL of the API endpoint where requests are sent. This is typically provided by the model's provider and is necessary for directing your requests. | + +Some important things to note: + +- Standard parameters only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these. +- Standard params are currently only enforced on integrations that have their own integration packages (e.g. `@langchain/openai`, `@langchain/anthropic`, etc.), they're not enforced on models in `@langchain/community`. + +ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the [API reference](https://api.js.langchain.com/) for that model. + +## Tool calling + +Chat models can call [tools](/docs/concepts/tools) to perform tasks such as fetching data from a database, making API requests, or running custom code. Please +see the [tool calling](/docs/concepts/tool_calling) guide for more information. + +## Structured outputs + +Chat models can be requested to respond in a particular format (e.g., JSON or matching a particular schema). This feature is extremely +useful for information extraction tasks. Please read more about +the technique in the [structured outputs](/docs/concepts/structured_outputs) guide. + +## Multimodality + +Large Language Models (LLMs) are not limited to processing text. They can also be used to process other types of data, such as images, audio, and video. This is known as [multimodality](/docs/concepts/multimodality). + +Currently, only some LLMs support multimodal inputs, and almost none support multimodal outputs. Please consult the specific model documentation for details. + +## Context window + +A chat model's context window refers to the maximum size of the input sequence the model can process at one time. While the context windows of modern LLMs are quite large, they still present a limitation that developers must keep in mind when working with chat models. + +If the input exceeds the context window, the model may not be able to process the entire input and could raise an error. In conversational applications, this is especially important because the context window determines how much information the model can "remember" throughout a conversation. Developers often need to manage the input within the context window to maintain a coherent dialogue without exceeding the limit. For more details on handling memory in conversations, refer to the [memory](https://langchain-ai.github.io/langgraphjs/concepts/memory/). + +The size of the input is measured in [tokens](/docs/concepts/tokens) which are the unit of processing that the model uses. + +## Advanced topics + +### Caching + +Chat model APIs can be slow, so a natural question is whether to cache the results of previous conversations. Theoretically, caching can help improve performance by reducing the number of requests made to the model provider. In practice, caching chat model responses is a complex problem and should be approached with caution. + +The reason is that getting a cache hit is unlikely after the first or second interaction in a conversation if relying on caching the **exact** inputs into the model. For example, how likely do you think that multiple conversations start with the exact same message? What about the exact same three messages? + +An alternative approach is to use semantic caching, where you cache responses based on the meaning of the input rather than the exact input itself. This can be effective in some situations, but not in others. + +A semantic cache introduces a dependency on another model on the critical path of your application (e.g., the semantic cache may rely on an [embedding model](/docs/concepts/embedding_models) to convert text to a vector representation), and it's not guaranteed to capture the meaning of the input accurately. + +However, there might be situations where caching chat model responses is beneficial. For example, if you have a chat model that is used to answer frequently asked questions, caching responses can help reduce the load on the model provider and improve response times. + +Please see the [how to cache chat model responses](/docs/how_to/#chat-model-caching) guide for more details. + +## Related resources + +- How-to guides on using chat models: [how-to guides](/docs/how_to/#chat-models). +- List of supported chat models: [chat model integrations](/docs/integrations/chat/). + +### Conceptual guides + +- [Messages](/docs/concepts/messages) +- [Tool calling](/docs/concepts/tool_calling) +- [Multimodality](/docs/concepts/multimodality) +- [Structured outputs](/docs/concepts/structured_outputs) +- [Tokens](/docs/concepts/tokens) diff --git a/docs/core_docs/docs/concepts/document_loaders.mdx b/docs/core_docs/docs/concepts/document_loaders.mdx new file mode 100644 index 000000000000..efd20f38fd6d --- /dev/null +++ b/docs/core_docs/docs/concepts/document_loaders.mdx @@ -0,0 +1,40 @@ +# Document loaders + + + +:::info[Prerequisites] + +- [Document loaders API reference](/docs/how_to/#document-loaders) + +::: + +Document loaders are designed to load document objects. LangChain has hundreds of integrations with various data sources to load data from: Slack, Notion, Google Drive, etc. + +## Integrations + +You can find available integrations on the [Document loaders integrations page](/docs/integrations/document_loaders/). + +## Interface + +Documents loaders implement the [BaseLoader interface](https://api.js.langchain.com/classes/_langchain_core.document_loaders_base.BaseDocumentLoader.html). + +Each DocumentLoader has its own specific parameters, but they can all be invoked in the same way with the `.load` method or `.lazy_load`. + +Here's a simple example: + +```typescript +import { CSVLoader } from "@langchain/community/document_loaders/fs/csv"; + +const loader = new CSVLoader( + ... // <-- Integration specific parameters here +); +const data = await loader.load(); +``` + +## Related resources + +Please see the following resources for more information: + +- [How-to guides for document loaders](/docs/how_to/#document-loaders) +- [Document API reference](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) +- [Document loaders integrations](/docs/integrations/document_loaders/) diff --git a/docs/core_docs/docs/concepts/embedding_models.mdx b/docs/core_docs/docs/concepts/embedding_models.mdx new file mode 100644 index 000000000000..5927af636491 --- /dev/null +++ b/docs/core_docs/docs/concepts/embedding_models.mdx @@ -0,0 +1,133 @@ +# Embedding models + + + +:::info[Prerequisites] + +- [Documents](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) + +::: + +:::info[Note] + +This conceptual overview focuses on text-based embedding models. + +Embedding models can also be [multimodal](/docs/concepts/multimodality) though such models are not currently supported by LangChain. + +::: + +Imagine being able to capture the essence of any text - a tweet, document, or book - in a single, compact representation. +This is the power of embedding models, which lie at the heart of many retrieval systems. +Embedding models transform human language into a format that machines can understand and compare with speed and accuracy. +These models take text as input and produce a fixed-length array of numbers, a numerical fingerprint of the text's semantic meaning. +Embeddings allow search system to find relevant documents not just based on keyword matches, but on semantic understanding. + +## Key concepts + +![Conceptual Overview](/img/embeddings_concept.png) + +(1) **Embed text as a vector**: Embeddings transform text into a numerical vector representation. + +(2) **Measure similarity**: Embedding vectors can be comparing using simple mathematical operations. + +## Embedding + +### Historical context + +The landscape of embedding models has evolved significantly over the years. +A pivotal moment came in 2018 when Google introduced [BERT (Bidirectional Encoder Representations from Transformers)](https://www.nvidia.com/en-us/glossary/bert/). +BERT applied transformer models to embed text as a simple vector representation, which lead to unprecedented performance across various NLP tasks. +However, BERT wasn't optimized for generating sentence embeddings efficiently. +This limitation spurred the creation of [SBERT (Sentence-BERT)](https://www.sbert.net/examples/training/sts/README.html), which adapted the BERT architecture to generate semantically rich sentence embeddings, easily comparable via similarity metrics like cosine similarity, dramatically reduced the computational overhead for tasks like finding similar sentences. +Today, the embedding model ecosystem is diverse, with numerous providers offering their own implementations. +To navigate this variety, researchers and practitioners often turn to benchmarks like the Massive Text Embedding Benchmark (MTEB) [here](https://huggingface.co/blog/mteb) for objective comparisons. + +:::info[Further reading] + +- See the [seminal BERT paper](https://arxiv.org/abs/1810.04805). +- See Cameron Wolfe's [excellent review](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2) of embedding models. +- See the [Massive Text Embedding Benchmark (MTEB)](https://huggingface.co/blog/mteb) leaderboard for a comprehensive overview of embedding models. + +::: + +### Interface + +LangChain provides a universal interface for working with them, providing standard methods for common operations. +This common interface simplifies interaction with various embedding providers through two central methods: + +- `embedDocuments`: For embedding multiple texts (documents) +- `embedQuery`: For embedding a single text (query) + +This distinction is important, as some providers employ different embedding strategies for documents (which are to be searched) versus queries (the search input itself). +To illustrate, here's a practical example using LangChain's `.embedDocuments` method to embed a list of strings: + +```typescript +import { OpenAIEmbeddings } from "@langchain/openai"; +const embeddingsModel = new OpenAIEmbeddings(); +const embeddings = await embeddingsModel.embedDocuments([ + "Hi there!", + "Oh, hello!", + "What's your name?", + "My friends call me World", + "Hello World!", +]); + +console.log(`(${embeddings.length}, ${embeddings[0].length})`); +// (5, 1536) +``` + +For convenience, you can also use the `embedQuery` method to embed a single text: + +```typescript +const queryEmbedding = await embeddingsModel.embedQuery( + "What is the meaning of life?" +); +``` + +:::info[Further reading] + +- See the full list of [LangChain embedding model integrations](/docs/integrations/text_embedding/). +- See these [how-to guides](/docs/how_to/embed_text) for working with embedding models. + +::: + +### Integrations + +LangChain offers many embedding model integrations which you can find [on the embedding models](/docs/integrations/text_embedding/) integrations page. + +## Measure similarity + +Each embedding is essentially a set of coordinates, often in a high-dimensional space. +In this space, the position of each point (embedding) reflects the meaning of its corresponding text. +Just as similar words might be close to each other in a thesaurus, similar concepts end up close to each other in this embedding space. +This allows for intuitive comparisons between different pieces of text. +By reducing text to these numerical representations, we can use simple mathematical operations to quickly measure how alike two pieces of text are, regardless of their original length or structure. +Some common similarity metrics include: + +- **Cosine Similarity**: Measures the cosine of the angle between two vectors. +- **Euclidean Distance**: Measures the straight-line distance between two points. +- **Dot Product**: Measures the projection of one vector onto another. + +The choice of similarity metric should be chosen based on the model. +As an example, [OpenAI suggests cosine similarity for their embeddings](https://platform.openai.com/docs/guides/embeddings/which-distance-function-should-i-use), which can be easily implemented: + +```typescript +function cosineSimilarity(vec1: number[], vec2: number[]): number { + const dotProduct = vec1.reduce((sum, val, i) => sum + val * vec2[i], 0); + const norm1 = Math.sqrt(vec1.reduce((sum, val) => sum + val * val, 0)); + const norm2 = Math.sqrt(vec2.reduce((sum, val) => sum + val * val, 0)); + return dotProduct / (norm1 * norm2); +} + +const similarity = cosineSimilarity(queryResult, documentResult); +console.log("Cosine Similarity:", similarity); +``` + +:::info[Further reading] + +- See Simon Willison's [nice blog post and video](https://simonwillison.net/2023/Oct/23/embeddings/) on embeddings and similarity metrics. +- See [this documentation](https://developers.google.com/machine-learning/clustering/dnn-clustering/supervised-similarity) from Google on similarity metrics to consider with embeddings. +- See Pinecone's [blog post](https://www.pinecone.io/learn/vector-similarity/) on similarity metrics. +- See OpenAI's [FAQ](https://platform.openai.com/docs/guides/embeddings/faq) on what similarity metric to use with OpenAI embeddings. + +::: diff --git a/docs/core_docs/docs/concepts/evaluation.mdx b/docs/core_docs/docs/concepts/evaluation.mdx new file mode 100644 index 000000000000..ff80647edd08 --- /dev/null +++ b/docs/core_docs/docs/concepts/evaluation.mdx @@ -0,0 +1,17 @@ +# Evaluation + + + +Evaluation is the process of assessing the performance and effectiveness of your LLM-powered applications. +It involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose. +This process is vital for building reliable applications. + +![](/img/langsmith_evaluate.png) + +[LangSmith](https://docs.smith.langchain.com/) helps with this process in a few ways: + +- It makes it easier to create and curate datasets via its tracing and annotation features +- It provides an evaluation framework that helps you define metrics and run your app against your dataset +- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code + +To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation). diff --git a/docs/core_docs/docs/concepts/example_selectors.mdx b/docs/core_docs/docs/concepts/example_selectors.mdx new file mode 100644 index 000000000000..89e2bf5931e3 --- /dev/null +++ b/docs/core_docs/docs/concepts/example_selectors.mdx @@ -0,0 +1,23 @@ +# Example selectors + +:::note Prerequisites + +- [Chat models](/docs/concepts/chat_models/) +- [Few-shot prompting](/docs/concepts/few_shot_prompting/) + +::: + +## Overview + +One common prompting technique for achieving better performance is to include examples as part of the prompt. This is known as [few-shot prompting](/docs/concepts/few_shot_prompting). + +This gives the [language model](/docs/concepts/chat_models/) concrete examples of how it should behave. +Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them. + +**Example Selectors** are classes responsible for selecting and then formatting examples into prompts. + +> See the [API reference for more information.](https://v03.api.js.langchain.com/classes/_langchain_core.example_selectors.BaseExampleSelector.html) + +## Related resources + +- [Example selector how-to guides](/docs/how_to/#example-selectors) diff --git a/docs/core_docs/docs/concepts/few_shot_prompting.mdx b/docs/core_docs/docs/concepts/few_shot_prompting.mdx new file mode 100644 index 000000000000..67f61caa8e59 --- /dev/null +++ b/docs/core_docs/docs/concepts/few_shot_prompting.mdx @@ -0,0 +1,90 @@ +# Few-shot prompting + +:::note Prerequisites + +- [Chat models](/docs/concepts/chat_models/) + +::: + +## Overview + +One of the most effective ways to improve model performance is to give a model examples of +what you want it to do. The technique of adding example inputs and expected outputs +to a model prompt is known as "few-shot prompting". The technique is based on the +[Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) paper. +There are a few things to think about when doing few-shot prompting: + +1. How are examples generated? +2. How many examples are in each prompt? +3. How are examples selected at runtime? +4. How are examples formatted in the prompt? + +Here are the considerations for each. + +## 1. Generating examples + +The first and most important step of few-shot prompting is coming up with a good dataset of examples. Good examples should be relevant at runtime, clear, informative, and provide information that was not already known to the model. + +At a high-level, the basic ways to generate examples are: + +- Manual: a person/people generates examples they think are useful. +- Better model: a better (presumably more expensive/slower) model's responses are used as examples for a worse (presumably cheaper/faster) model. +- User feedback: users (or labelers) leave feedback on interactions with the application and examples are generated based on that feedback (for example, all interactions with positive feedback could be turned into examples). +- LLM feedback: same as user feedback but the process is automated by having models evaluate themselves. + +Which approach is best depends on your task. For tasks where a small number core principles need to be understood really well, it can be valuable hand-craft a few really good examples. +For tasks where the space of correct behaviors is broader and more nuanced, it can be useful to generate many examples in a more automated fashion so that there's a higher likelihood of there being some highly relevant examples for any runtime input. + +**Single-turn v.s. multi-turn examples** + +Another dimension to think about when generating examples is what the example is actually showing. + +The simplest types of examples just have a user input and an expected model output. These are single-turn examples. + +One more complex type if example is where the example is an entire conversation, usually in which a model initially responds incorrectly and a user then tells the model how to correct its answer. +This is called a multi-turn example. Multi-turn examples can be useful for more nuanced tasks where its useful to show common errors and spell out exactly why they're wrong and what should be done instead. + +## 2. Number of examples + +Once we have a dataset of examples, we need to think about how many examples should be in each prompt. +The key tradeoff is that more examples generally improve performance, but larger prompts increase costs and latency. +And beyond some threshold having too many examples can start to confuse the model. +Finding the right number of examples is highly dependent on the model, the task, the quality of the examples, and your cost and latency constraints. +Anecdotally, the better the model is the fewer examples it needs to perform well and the more quickly you hit steeply diminishing returns on adding more examples. +But, the best/only way to reliably answer this question is to run some experiments with different numbers of examples. + +## 3. Selecting examples + +Assuming we are not adding our entire example dataset into each prompt, we need to have a way of selecting examples from our dataset based on a given input. We can do this: + +- Randomly +- By (semantic or keyword-based) similarity of the inputs +- Based on some other constraints, like token size + +LangChain has a number of [`ExampleSelectors`](/docs/concepts/example_selectors) which make it easy to use any of these techniques. + +Generally, selecting by semantic similarity leads to the best model performance. But how important this is is again model and task specific, and is something worth experimenting with. + +## 4. Formatting examples + +Most state-of-the-art models these days are chat models, so we'll focus on formatting examples for those. Our basic options are to insert the examples: + +- In the system prompt as a string +- As their own messages + +If we insert our examples into the system prompt as a string, we'll need to make sure it's clear to the model where each example begins and which parts are the input versus output. Different models respond better to different syntaxes, like [ChatML](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chat-markup-language), XML, TypeScript, etc. + +If we insert our examples as messages, where each example is represented as a sequence of Human, AI messages, we might want to also assign [names](/docs/concepts/messages) to our messages like `"example_user"` and `"example_assistant"` to make it clear that these messages correspond to different actors than the latest input message. + +**Formatting tool call examples** + +One area where formatting examples as messages can be tricky is when our example outputs have tool calls. This is because different models have different constraints on what types of message sequences are allowed when any tool calls are generated. + +- Some models require that any `AIMessage` with tool calls be immediately followed by `ToolMessage`s for every tool call, +- Some models additionally require that any `ToolMessage`s be immediately followed by an `AIMessage` before the next `HumanMessage`, +- Some models require that tools are passed in to the model if there are any tool calls / `ToolMessage`s in the chat history. + +These requirements are model-specific and should be checked for the model you are using. If your model requires `ToolMessage`s after tool calls and/or `AIMessage`s after `ToolMessage`s and your examples only include expected tool calls and not the actual tool outputs, you can try adding dummy `ToolMessage`s / `AIMessage`s to the end of each example with generic contents to satisfy the API constraints. +In these cases it's especially worth experimenting with inserting your examples as strings versus messages, as having dummy messages can adversely affect certain models. + +You can see a case study of how Anthropic and OpenAI respond to different few-shot prompting techniques on two different tool calling benchmarks [here](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance/). diff --git a/docs/core_docs/docs/concepts/index.mdx b/docs/core_docs/docs/concepts/index.mdx new file mode 100644 index 000000000000..d01291666ab4 --- /dev/null +++ b/docs/core_docs/docs/concepts/index.mdx @@ -0,0 +1,84 @@ +# Conceptual guide + +This guide provides explanations of the key concepts behind the LangChain framework and AI applications more broadly. + +We recommend that you go through at least one of the [Tutorials](/docs/tutorials) before diving into the conceptual guide. This will provide practical context that will make it easier to understand the concepts discussed here. + +The conceptual guide does not cover step-by-step instructions or specific implementation examples — those are found in the [How-to guides](/docs/how_to/) and [Tutorials](/docs/tutorials). For detailed reference material, please see the [API reference](https://api.js.langchain.com/). + +## High level + +- **[Why LangChain?](/docs/concepts/why_langchain)**: Overview of the value that LangChain provides. +- **[Architecture](/docs/concepts/architecture)**: How packages are organized in the LangChain ecosystem. + +## Concepts + +- **[Chat models](/docs/concepts/chat_models)**: LLMs exposed via a chat API that process sequences of messages as input and output a message. +- **[Messages](/docs/concepts/messages)**: The unit of communication in chat models, used to represent model input and output. +- **[Chat history](/docs/concepts/chat_history)**: A conversation represented as a sequence of messages, alternating between user messages and model responses. +- **[Tools](/docs/concepts/tools)**: A function with an associated schema defining the function's name, description, and the arguments it accepts. +- **[Tool calling](/docs/concepts/tool_calling)**: A type of chat model API that accepts tool schemas, along with messages, as input and returns invocations of those tools as part of the output message. +- **[Structured output](/docs/concepts/structured_outputs)**: A technique to make a chat model respond in a structured format, such as JSON that matches a given schema. +- **[Memory](https://langchain-ai.github.io/langgraphjs/concepts/memory/)**: Information about a conversation that is persisted so that it can be used in future conversations. +- **[Multimodality](/docs/concepts/multimodality)**: The ability to work with data that comes in different forms, such as text, audio, images, and video. +- **[Runnable interface](/docs/concepts/runnables)**: The base abstraction that many LangChain components and the LangChain Expression Language are built on. +- **[LangChain Expression Language (LCEL)](/docs/concepts/lcel)**: A syntax for orchestrating LangChain components. Most useful for simpler applications. +- **[Document loaders](/docs/concepts/document_loaders)**: Load a source as a list of documents. +- **[Retrieval](/docs/concepts/retrieval)**: Information retrieval systems can retrieve structured or unstructured data from a datasource in response to a query. +- **[Text splitters](/docs/concepts/text_splitters)**: Split long text into smaller chunks that can be individually indexed to enable granular retrieval. +- **[Embedding models](/docs/concepts/embedding_models)**: Models that represent data such as text or images in a vector space. +- **[Vector stores](/docs/concepts/vectorstores)**: Storage of and efficient search over vectors and associated metadata. +- **[Retriever](/docs/concepts/retrievers)**: A component that returns relevant documents from a knowledge base in response to a query. +- **[Retrieval Augmented Generation (RAG)](/docs/concepts/rag)**: A technique that enhances language models by combining them with external knowledge bases. +- **[Agents](/docs/concepts/agents)**: Use a [language model](/docs/concepts/chat_models) to choose a sequence of actions to take. Agents can interact with external resources via [tool](/docs/concepts/tools). +- **[Prompt templates](/docs/concepts/prompt_templates)**: Component for factoring out the static parts of a model "prompt" (usually a sequence of messages). Useful for serializing, versioning, and reusing these static parts. +- **[Output parsers](/docs/concepts/output_parsers)**: Responsible for taking the output of a model and transforming it into a more suitable format for downstream tasks. Output parsers were primarily useful prior to the general availability of [tool calling](/docs/concepts/tool_calling) and [structured outputs](/docs/concepts/structured_outputs). +- **[Few-shot prompting](/docs/concepts/few_shot_prompting)**: A technique for improving model performance by providing a few examples of the task to perform in the prompt. +- **[Example selectors](/docs/concepts/example_selectors)**: Used to select the most relevant examples from a dataset based on a given input. Example selectors are used in few-shot prompting to select examples for a prompt. +- **[Callbacks](/docs/concepts/callbacks)**: Callbacks enable the execution of custom auxiliary code in built-in components. Callbacks are used to stream outputs from LLMs in LangChain, trace the intermediate steps of an application, and more. +- **[Tracing](/docs/concepts/tracing)**: The process of recording the steps that an application takes to go from input to output. Tracing is essential for debugging and diagnosing issues in complex applications. +- **[Evaluation](/docs/concepts/evaluation)**: The process of assessing the performance and effectiveness of AI applications. This involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose. This process is vital for building reliable applications. + +## Glossary + +- **[AIMessageChunk](/docs/concepts/messages#aimessagechunk)**: A partial response from an AI message. Used when streaming responses from a chat model. +- **[AIMessage](/docs/concepts/messages#aimessage)**: Represents a complete response from an AI model. +- **[streamEvents](/docs/concepts/streaming#streamevents)**: Stream granular information from [LCEL](/docs/concepts/lcel) chains. +- **[StructuredTool](/docs/concepts/tools#structuredtool)**: The base class for all tools in LangChain. +- **[batch](/docs/concepts/runnables)**: Use to execute a runnable with batch inputs a Runnable. +- **[bindTools](/docs/concepts/chat_models#bind-tools)**: Allows models to interact with tools. +- **[Caching](/docs/concepts/chat_models#caching)**: Storing results to avoid redundant calls to a chat model. +- **[Context window](/docs/concepts/chat_models#context-window)**: The maximum size of input a chat model can process. +- **[Conversation patterns](/docs/concepts/chat_history#conversation-patterns)**: Common patterns in chat interactions. +- **[Document](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html)**: LangChain's representation of a document. +- **[Embedding models](/docs/concepts/multimodality#embedding-models)**: Models that generate vector embeddings for various data types. +- **[HumanMessage](/docs/concepts/messages#humanmessage)**: Represents a message from a human user. +- **[input and output types](/docs/concepts/runnables#input-and-output-types)**: Types used for input and output in Runnables. +- **[Integration packages](/docs/concepts/architecture#partner-packages)**: Third-party packages that integrate with LangChain. +- **[invoke](/docs/concepts/runnables)**: A standard method to invoke a Runnable. +- **[JSON mode](/docs/concepts/structured_outputs#json-mode)**: Returning responses in JSON format. +- **[@langchain/community](/docs/concepts/architecture#langchain-community)**: Community-driven components for LangChain. +- **[@langchain/core](/docs/concepts/architecture#langchain-core)**: Core langchain package. Includes base interfaces and in-memory implementations. +- **[langchain](/docs/concepts/architecture#langchain)**: A package for higher level components (e.g., some pre-built chains). +- **[@langchain/langgraph](/docs/concepts/architecture#langgraph)**: Powerful orchestration layer for LangChain. Use to build complex pipelines and workflows. +- **[Managing chat history](/docs/concepts/chat_history#managing-chat-history)**: Techniques to maintain and manage the chat history. +- **[OpenAI format](/docs/concepts/messages#openai-format)**: OpenAI's message format for chat models. +- **[Propagation of RunnableConfig](/docs/concepts/runnables#propagation-of-runnableconfig)**: Propagating configuration through Runnables. +- **[RemoveMessage](/docs/concepts/messages#removemessage)**: An abstraction used to remove a message from chat history, used primarily in LangGraph. +- **[role](/docs/concepts/messages#role)**: Represents the role (e.g., user, assistant) of a chat message. +- **[RunnableConfig](/docs/concepts/runnables#runnableconfig)**: Use to pass run time information to Runnables (e.g., `runName`, `runId`, `tags`, `metadata`, `maxConcurrency`, `recursionLimit`, `configurable`). +- **[Standard parameters for chat models](/docs/concepts/chat_models#standard-parameters)**: Parameters such as API key, `temperature`, and `maxTokens`, +- **[stream](/docs/concepts/streaming)**: Use to stream output from a Runnable or a graph. +- **[Tokenization](/docs/concepts/tokens)**: The process of converting data into tokens and vice versa. +- **[Tokens](/docs/concepts/tokens)**: The basic unit that a language model reads, processes, and generates under the hood. +- **[Tool artifacts](/docs/concepts/tools#tool-artifacts)**: Add artifacts to the output of a tool that will not be sent to the model, but will be available for downstream processing. +- **[Tool binding](/docs/concepts/tool_calling#tool-binding)**: Binding tools to models. +- **[`tool`](/docs/concepts/tools)**: Function for creating tools in LangChain. +- **[Toolkits](/docs/concepts/tools#toolkits)**: A collection of tools that can be used together. +- **[ToolMessage](/docs/concepts/messages#toolmessage)**: Represents a message that contains the results of a tool execution. +- **[Vector stores](/docs/concepts/vectorstores)**: Datastores specialized for storing and efficiently searching vector embeddings. +- **[withStructuredOutput](/docs/concepts/structured_outputs/#structured-output-method)**: A helper method for chat models that natively support [tool calling](/docs/concepts/tool_calling) to get structured output matching a given schema specified via Zod, JSON schema or a function. + +import RedirectAnchors from "@theme/RedirectAnchors"; + + diff --git a/docs/core_docs/docs/concepts/key_value_stores.mdx b/docs/core_docs/docs/concepts/key_value_stores.mdx new file mode 100644 index 000000000000..fa280d948839 --- /dev/null +++ b/docs/core_docs/docs/concepts/key_value_stores.mdx @@ -0,0 +1,38 @@ +# Key-value stores + +## Overview + +LangChain provides a key-value store interface for storing and retrieving data. + +LangChain includes a [`BaseStore`](https://api.js.langchain.com/classes/_langchain_core.stores.BaseStore.html) interface, +which allows for storage of arbitrary data. However, LangChain components that require KV-storage accept a +more specific `BaseStore` instance that stores binary data (referred to as a `ByteStore`), and internally take care of +encoding and decoding data for their specific needs. + +This means that as a user, you only need to think about one type of store rather than different ones for different types of data. + +## Usage + +The key-value store interface in LangChain is used primarily for: + +1. Caching [embeddings](/docs/concepts/embedding_models) via [CachedBackedEmbeddings](https://api.js.langchain.com/classes/langchain.embeddings_cache_backed.CacheBackedEmbeddings.html) to avoid recomputing embeddings for repeated queries or when re-indexing content. + +2. As a simple [Document](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) persistence layer in some retrievers. + +Please see these how-to guides for more information: + +- [How to cache embeddings guide](/docs/how_to/caching_embeddings/). +- [How to retriever using multiple vectors per document](/docs/how_to/custom_retriever/). + +## Interface + +All [`BaseStore`](https://api.js.langchain.com/classes/_langchain_core.stores.BaseStore.html)s support the following interface. Note that the interface allows for modifying **multiple** key-value pairs at once: + +- `mget(keys: string[]): Promise<(Uint8Array | undefined)[]>`: get the contents of multiple keys, returning `undefined` if the key does not exist +- `mset(keyValuePairs: [string, Uint8Array][]): Promise`: set the contents of multiple keys +- `mdelete(keys: string[]): Promise`: delete multiple keys +- `yieldKeys(prefix?: string): AsyncIterator`: yield all keys in the store, optionally filtering by a prefix + +## Integrations + +Please reference the [stores integration page](/docs/integrations/stores/) for a list of available key-value store integrations. diff --git a/docs/core_docs/docs/concepts/lcel.mdx b/docs/core_docs/docs/concepts/lcel.mdx new file mode 100644 index 000000000000..f6c94bcefb11 --- /dev/null +++ b/docs/core_docs/docs/concepts/lcel.mdx @@ -0,0 +1,163 @@ +# LangChain Expression Language (LCEL) + +:::info Prerequisites + +- [Runnable Interface](/docs/concepts/runnables) + +::: + +The **L**ang**C**hain **E**xpression **L**anguage (LCEL) takes a [declarative](https://en.wikipedia.org/wiki/Declarative_programming) approach to building new [Runnables](/docs/concepts/runnables) from existing Runnables. + +This means that you describe what you want to happen, rather than how you want it to happen, allowing LangChain to optimize the run-time execution of the chains. + +We often refer to a `Runnable` created using LCEL as a "chain". It's important to remember that a "chain" is `Runnable` and it implements the full [Runnable Interface](/docs/concepts/runnables). + +:::note + +- The [LCEL cheatsheet](/docs/how_to/lcel_cheatsheet/) shows common patterns that involve the Runnable interface and LCEL expressions. +- Please see the following list of [how-to guides](/docs/how_to/#langchain-expression-language-lcel) that cover common tasks with LCEL. +- A list of built-in `Runnables` can be found in the [LangChain Core API Reference](https://api.js.langchain.com/modules/_langchain_core.runnables.html). Many of these Runnables are useful when composing custom "chains" in LangChain using LCEL. + +::: + +## Benefits of LCEL + +LangChain optimizes the run-time execution of chains built with LCEL in a number of ways: + +- **Optimize parallel execution**: Run Runnables in parallel using [RunnableParallel](#RunnableParallel) or run multiple inputs through a given chain in parallel using the [Runnable Batch API](/docs/concepts/runnables#batch). Parallel execution can significantly reduce the latency as processing can be done in parallel instead of sequentially. +- **Simplify streaming**: LCEL chains can be streamed, allowing for incremental output as the chain is executed. LangChain can optimize the streaming of the output to minimize the time-to-first-token(time elapsed until the first chunk of output from a [chat model](/docs/concepts/chat_models) or [llm](/docs/concepts/text_llms) comes out). + +Other benefits include: + +- [**Seamless LangSmith tracing**](https://docs.smith.langchain.com) + As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step. + With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability. +- **Standard API**: Because all chains are built using the Runnable interface, they can be used in the same way as any other Runnable. +- [**Deployable with LangServe**](/docs/concepts/architecture#langserve): Chains built with LCEL can be deployed using for production use. + +## Should I use LCEL? + +LCEL is an [orchestration solution]() -- it allows LangChain to handle run-time execution of chains in an optimized way. + +While we have seen users run chains with hundreds of steps in production, we generally recommend using LCEL for simpler orchestration tasks. When the application requires complex state management, branching, cycles or multiple agents, we recommend that users take advantage of [LangGraph](/docs/concepts/architecture#langgraph). + +In LangGraph, users define graphs that specify the flow of the application. This allows users to keep using LCEL within individual nodes when LCEL is needed, while making it easy to define complex orchestration logic that is more readable and maintainable. + +Here are some guidelines: + +- If you are making a single LLM call, you don't need LCEL; instead call the underlying [chat model](/docs/concepts/chat_models) directly. +- If you have a simple chain (e.g., prompt + llm + parser, simple retrieval set up etc.), LCEL is a reasonable fit, if you're taking advantage of the LCEL benefits. +- If you're building a complex chain (e.g., with branching, cycles, multiple agents, etc.) use [LangGraph](/docs/concepts/architecture#langgraph) instead. Remember that you can always use LCEL within individual nodes in LangGraph. + +## Composition Primitives + +`LCEL` chains are built by composing existing `Runnables` together. The two main composition primitives are [RunnableSequence](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableSequence.html) and [RunnableParallel](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableParallel.html). + +Many other composition primitives (e.g., [RunnableAssign](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableAssign.html)) can be thought of as variations of these two primitives. + +:::note +You can find a list of all composition primitives in the [LangChain Core API Reference](https://api.js.langchain.com/modules/_langchain_core.runnables.html). +::: + +### RunnableSequence + +`RunnableSequence` is a composition primitive that allows you "chain" multiple runnables sequentially, with the output of one runnable serving as the input to the next. + +```typescript +import { RunnableSequence } from "@langchain/core/runnables"; +const chain = new RunnableSequence({ + first: runnable1, + // Optional, use if you have more than two runnables + // middle: [...], + last: runnable2, +}); +``` + +Invoking the `chain` with some input: + +```typescript +const finalOutput = await chain.invoke(someInput); +``` + +corresponds to the following: + +```typescript +const output1 = await runnable1.invoke(someInput); +const finalOutput = await runnable2.invoke(output1); +``` + +:::note +`runnable1` and `runnable2` are placeholders for any `Runnable` that you want to chain together. +::: + +### RunnableParallel + +`RunnableParallel` is a composition primitive that allows you to run multiple runnables concurrently, with the same input provided to each. + +```typescript +import { RunnableParallel } from "@langchain/core/runnables"; +const chain = new RunnableParallel({ + key1: runnable1, + key2: runnable2, +}); +``` + +Invoking the `chain` with some input: + +```typescript +const finalOutput = await chain.invoke(someInput); +``` + +Will yield a `finalOutput` object with the same keys as the input object, but with the values replaced by the output of the corresponding runnable. + +```typescript +{ + key1: await runnable1.invoke(someInput), + key2: await runnable2.invoke(someInput), +} +``` + +Recall, that the runnables are executed in parallel, so while the result is the same as +object comprehension shown above, the execution time is much faster. + +## Composition Syntax + +The usage of `RunnableSequence` and `RunnableParallel` is so common that we created a shorthand syntax for using them. This helps +to make the code more readable and concise. + +### The `pipe` method. + +You can `pipe` runnables together using the `.pipe(runnable)` method. + +```typescript +const chain = runnable1.pipe(runnable2); +``` + +is Equivalent to: + +```typescript +const chain = new RunnableSequence({ + first: runnable1, + last: runnable2, +}); +``` + +#### RunnableLambda functions + +You can define generic TypeScript functions are runnables through the `RunnableLambda` class. + +```typescript +const someFunc = RunnableLambda.from((input) => { + return input; +}); + +const chain = someFunc.pipe(runnable1); +``` + +## Legacy chains + +LCEL aims to provide consistency around behavior and customization over legacy subclassed chains such as `LLMChain` and +`ConversationalRetrievalChain`. Many of these legacy chains hide important details like prompts, and as a wider variety +of viable models emerge, customization has become more and more important. + +For guides on how to do specific tasks with LCEL, check out [the relevant how-to guides](/docs/how_to/#langchain-expression-language-lcel). diff --git a/docs/core_docs/docs/concepts/messages.mdx b/docs/core_docs/docs/concepts/messages.mdx new file mode 100644 index 000000000000..183c68bf1715 --- /dev/null +++ b/docs/core_docs/docs/concepts/messages.mdx @@ -0,0 +1,265 @@ +# Messages + +:::info Prerequisites + +- [Chat Models](/docs/concepts/chat_models) + +::: + +## Overview + +Messages are the unit of communication in [chat models](/docs/concepts/chat_models). They are used to represent the input and output of a chat model, as well as any additional context or metadata that may be associated with a conversation. + +Each message has a **role** (e.g., "user", "assistant"), **content** (e.g., text, multimodal data), and additional metadata that can vary depending on the chat model provider. + +LangChain provides a unified message format that can be used across chat models, allowing users to work with different chat models without worrying about the specific details of the message format used by each model provider. + +## What inside a message? + +A message typically consists of the following pieces of information: + +- **Role**: The role of the message (e.g., "user", "assistant"). +- **Content**: The content of the message (e.g., text, multimodal data). +- Additional metadata: id, name, [token usage](/docs/concepts/tokens) and other model-specific metadata. + +### Role + +Roles are used to distinguish between different types of messages in a conversation and help the chat model understand how to respond to a given sequence of messages. + +| **Role** | **Description** | +| --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **system** | Used to tell the chat model how to behave and provide additional context. Not supported by all chat model providers. | +| **user** | Represents input from a user interacting with the model, usually in the form of text or other interactive input. | +| **assistant** | Represents a response from the model, which can include text or a request to invoke tools. | +| **tool** | A message used to pass the results of a tool invocation back to the model after external data or processing has been retrieved. Used with chat models that support [tool calling](/docs/concepts/tool_calling). | +| **function** (legacy) | This is a legacy role, corresponding to OpenAI's legacy function-calling API. **tool** role should be used instead. | + +### Content + +The content of a message text or an array of objects representing [multimodal data](/docs/concepts/multimodality) (e.g., images, audio, video). The exact format of the content can vary between different chat model providers. + +Currently, most chat models support text as the primary content type, with some models also supporting multimodal data. However, support for multimodal data is still limited across most chat model providers. + +For more information see: + +- [HumanMessage](#humanmessage) -- for content in the input from the user. +- [AIMessage](#aimessage) -- for content in the response from the model. +- [Multimodality](/docs/concepts/multimodality) -- for more information on multimodal content. + +### Other Message Data + +Depending on the chat model provider, messages can include other data such as: + +- **ID**: An optional unique identifier for the message. +- **Name**: An optional `name` property which allows differentiate between different entities/speakers with the same role. Not all models support this! +- **Metadata**: Additional information about the message, such as timestamps, token usage, etc. +- **Tool Calls**: A request made by the model to call one or more tools> See [tool calling](/docs/concepts/tool_calling) for more information. + +## Conversation Structure + +The sequence of messages into a chat model should follow a specific structure to ensure that the chat model can generate a valid response. + +For example, a typical conversation structure might look like this: + +1. **User Message**: "Hello, how are you?" +2. **Assistant Message**: "I'm doing well, thank you for asking." +3. **User Message**: "Can you tell me a joke?" +4. **Assistant Message**: "Sure! Why did the scarecrow win an award? Because he was outstanding in his field!" + +Please read the [chat history](/docs/concepts/chat_history) guide for more information on managing chat history and ensuring that the conversation structure is correct. + +## LangChain Messages + +LangChain provides a unified message format that can be used across all chat models, allowing users to work with different chat models without worrying about the specific details of the message format used by each model provider. + +LangChain messages are classes that subclass from a [BaseMessage](https://api.js.langchain.com/classes/_langchain_core.messages.BaseMessage.html). + +The five main message types are: + +- [SystemMessage](#systemmessage): corresponds to **system** role +- [HumanMessage](#humanmessage): corresponds to **user** role +- [AIMessage](#aimessage): corresponds to **assistant** role +- [AIMessageChunk](#aimessagechunk): corresponds to **assistant** role, used for [streaming](/docs/concepts/streaming) responses +- [ToolMessage](#toolmessage): corresponds to **tool** role + +Other important messages include: + +- [RemoveMessage](#removemessage) -- does not correspond to any role. This is an abstraction, mostly used in [LangGraph](/docs/concepts/architecture#langgraph) to manage chat history. +- **Legacy** [FunctionMessage](#legacy-functionmessage): corresponds to the **function** role in OpenAI's **legacy** function-calling API. + +You can find more information about **messages** in the [API Reference](https://api.js.langchain.com/modules/_langchain_core.messages.html). + +### SystemMessage + +A `SystemMessage` is used to prime the behavior of the AI model and provide additional context, such as instructing the model to adopt a specific persona or setting the tone of the conversation (e.g., "This is a conversation about cooking"). + +Different chat providers may support system message in one of the following ways: + +- **Through a "system" message role**: In this case, a system message is included as part of the message sequence with the role explicitly set as "system." +- **Through a separate API parameter for system instructions**: Instead of being included as a message, system instructions are passed via a dedicated API parameter. +- **No support for system messages**: Some models do not support system messages at all. + +Most major chat model providers support system instructions via either a chat message or a separate API parameter. LangChain will automatically adapt based on the provider’s capabilities. If the provider supports a separate API parameter for system instructions, LangChain will extract the content of a system message and pass it through that parameter. + +If no system message is supported by the provider, in most cases LangChain will attempt to incorporate the system message's content into a HumanMessage or raise an exception if that is not possible. However, this behavior is not yet consistently enforced across all implementations, and if using a less popular implementation of a chat model (e.g., an implementation from the `@langchain/community` package) it is recommended to check the specific documentation for that model. + +### HumanMessage + +The `HumanMessage` corresponds to the **"user"** role. A human message represents input from a user interacting with the model. + +#### Text Content + +Most chat models expect the user input to be in the form of text. + +```typescript +import { HumanMessage } from "@langchain/core/messages"; + +await model.invoke([new HumanMessage("Hello, how are you?")]); +``` + +:::tip +When invoking a chat model with a string as input, LangChain will automatically convert the string into a `HumanMessage` object. This is mostly useful for quick testing. + +```typescript +await model.invoke("Hello, how are you?"); +``` + +::: + +#### Multi-modal Content + +Some chat models accept multimodal inputs, such as images, audio, video, or files like PDFs. + +Please see the [multimodality](/docs/concepts/multimodality) guide for more information. + +### AIMessage + +`AIMessage` is used to represent a message with the role **"assistant"**. This is the response from the model, which can include text or a request to invoke tools. It could also include other media types like images, audio, or video -- though this is still uncommon at the moment. + +```typescript +import { HumanMessage } from "@langchain/core/messages"; + +const aiMessage = await model.invoke([new HumanMessage("Tell me a joke")]); +console.log(aiMessage); +``` + +```text +AIMessage({ + content: "Why did the chicken cross the road?\n\nTo get to the other side!", + tool_calls: [], + response_metadata: { ... }, + usage_metadata: { ... }, +}) +``` + +An `AIMessage` has the following attributes. The attributes which are **standardized** are the ones that LangChain attempts to standardize across different chat model providers. **raw** fields are specific to the model provider and may vary. + +| Attribute | Standardized/Raw | Description | +| -------------------- | :--------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | Raw | Usually a string, but can be a list of content blocks. See [content](#content) for details. | +| `tool_calls` | Standardized | Tool calls associated with the message. See [tool calling](/docs/concepts/tool_calling) for details. | +| `invalid_tool_calls` | Standardized | Tool calls with parsing errors associated with the message. See [tool calling](/docs/concepts/tool_calling) for details. | +| `usage_metadata` | Standardized | Usage metadata for a message, such as [token counts](/docs/concepts/tokens). See [Usage Metadata API Reference](https://api.js.langchain.com/types/_langchain_core.messages.UsageMetadata.html). | +| `id` | Standardized | An optional unique identifier for the message, ideally provided by the provider/model that created the message. | +| `response_metadata` | Raw | Response metadata, e.g., response headers, logprobs, token counts. | + +#### content + +The **content** property of an `AIMessage` represents the response generated by the chat model. + +The content is either: + +- **text** -- the norm for virtually all chat models. +- A **array of objects** -- Each object represents a content block and is associated with a `type`. + - Used by Anthropic for surfacing agent thought process when doing [tool calling](/docs/concepts/tool_calling). + - Used by OpenAI for audio outputs. Please see [multi-modal content](/docs/concepts/multimodality) for more information. + +:::important +The **content** property is **not** standardized across different chat model providers, mostly because there are +still few examples to generalize from. +::: + +### AIMessageChunk + +It is common to [stream](/docs/concepts/streaming) responses for the chat model as they are being generated, so the user can see the response in real-time instead of waiting for the entire response to be generated before displaying it. + +It is returned from the `stream`, and `streamEvents` methods of the chat model. + +For example, + +```typescript +for await (const chunk of model.stream([ + new HumanMessage("what color is the sky?"), +])) { + console.log(chunk); +} +``` + +`AIMessageChunk` follows nearly the same structure as `AIMessage`, but uses a different [ToolCallChunk](https://api.js.langchain.com/types/_langchain_core.messages_tool.ToolCallChunk.html) +to be able to stream tool calling in a standardized manner. + +#### Aggregating + +`MessageChunks` have a `concat` method you can use, or you can import it. This is useful when you want to display the final response to the user. + +```typescript +const aiMessage = chunk1.concat(chunk2).concat(chunk3).concat(...); +``` + +or + +```typescript +import { concat } from "@langchain/core/utils/stream"; +const aiMessage = concat(chunk1, chunk2); +``` + +### ToolMessage + +This represents a message with role "tool", which contains the result of [calling a tool](/docs/concepts/tool_calling). In addition to `role` and `content`, this message has: + +- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result. +- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model. + +Please see [tool calling](/docs/concepts/tool_calling) for more information. + +### RemoveMessage + +This is a special message type that does not correspond to any roles. It is used +for managing chat history in [LangGraph](/docs/concepts/architecture#langgraph). + +Please see the following for more information on how to use the `RemoveMessage`: + +- [Memory conceptual guide](https://langchain-ai.github.io/langgraphjs/concepts/memory/) +- [How to delete messages](https://langchain-ai.github.io/langgraphjs/how-tos/delete-messages/) + +### (Legacy) FunctionMessage + +This is a legacy message type, corresponding to OpenAI's legacy function-calling API. `ToolMessage` should be used instead to correspond to the updated tool-calling API. + +## OpenAI Format + +### Inputs + +Chat models also accept OpenAI's format as **inputs** to chat models: + +```typescript +await chatModel.invoke([ + { + role: "user", + content: "Hello, how are you?", + }, + { + role: "assistant", + content: "I'm doing well, thank you for asking.", + }, + { + role: "user", + content: "Can you tell me a joke?", + }, +]); +``` + +### Outputs + +At the moment, the output of the model will be in terms of LangChain messages, so you will need to convert the output to the OpenAI format if you +need OpenAI format for the output as well. diff --git a/docs/core_docs/docs/concepts/multimodality.mdx b/docs/core_docs/docs/concepts/multimodality.mdx new file mode 100644 index 000000000000..8a0f9d082175 --- /dev/null +++ b/docs/core_docs/docs/concepts/multimodality.mdx @@ -0,0 +1,94 @@ +# Multimodality + +## Overview + +**Multimodality** refers to the ability to work with data that comes in different forms, such as text, audio, images, and video. Multimodality can appear in various components, allowing models and systems to handle and process a mix of these data types seamlessly. + +- **Chat Models**: These could, in theory, accept and generate multimodal inputs and outputs, handling a variety of data types like text, images, audio, and video. +- **Embedding Models**: Embedding Models can represent multimodal content, embedding various forms of data—such as text, images, and audio—into vector spaces. +- **Vector Stores**: Vector stores could search over embeddings that represent multimodal data, enabling retrieval across different types of information. + +## Multimodality in chat models + +:::info Pre-requisites + +- [Chat models](/docs/concepts/chat_models) +- [Messages](/docs/concepts/messages) + +::: + +Multimodal support is still relatively new and less common, model providers have not yet standardized on the "best" way to define the API. As such, LangChain's multimodal abstractions are lightweight and flexible, designed to accommodate different model providers' APIs and interaction patterns, but are **not** standardized across models. + +### How to use multimodal models + +- Use the [chat model integration table](/docs/integrations/chat/) to identify which models support multimodality. +- Reference the [relevant how-to guides](/docs/how_to/#multimodal) for specific examples of how to use multimodal models. + +### What kind of multimodality is supported? + +#### Inputs + +Some models can accept multimodal inputs, such as images, audio, video, or files. The types of multimodal inputs supported depend on the model provider. For instance, [Google's Gemini](/docs/integrations/chat/google_generativeai/) supports documents like PDFs as inputs. + +Most chat models that support **multimodal inputs** also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations. + +The gist of passing multimodal inputs to a chat model is to use content blocks that specify a type and corresponding data. For example, to pass an image to a chat model: + +```typescript +import { HumanMessage } from "@langchain/core/messages"; + +const message = new HumanMessage({ + content: [ + { type: "text", text: "describe the weather in this image" }, + { type: "image_url", image_url: { url: image_url } }, + ], +}); +const response = await model.invoke([message]); +``` + +:::caution +The exact format of the content blocks may vary depending on the model provider. Please refer to the chat model's +integration documentation for the correct format. Find the integration in the [chat model integration table](/docs/integrations/chat/). +::: + +#### Outputs + +Virtually no popular chat models support multimodal outputs at the time of writing (October 2024). + +The only exception is OpenAI's chat model ([gpt-4o-audio-preview](/docs/integrations/chat/openai/)), which can generate audio outputs. + +Multimodal outputs will appear as part of the [AIMessage](/docs/concepts/messages/#aimessage) response object. + +Please see the [ChatOpenAI](/docs/integrations/chat/openai/) for more information on how to use multimodal outputs. + +#### Tools + +Currently, no chat model is designed to work **directly** with multimodal data in a [tool call request](/docs/concepts/tool_calling) or [ToolMessage](/docs/concepts/tool_calling) result. + +However, a chat model can easily interact with multimodal data by invoking tools with references (e.g., a URL) to the multimodal data, rather than the data itself. For example, any model capable of [tool calling](/docs/concepts/tool_calling) can be equipped with tools to download and process images, audio, or video. + +## Multimodality in embedding models + +:::info Prerequisites + +- [Embedding Models](/docs/concepts/embedding_models) + +::: + +**Embeddings** are vector representations of data used for tasks like similarity search and retrieval. + +The current [embedding interface](https://api.js.langchain.com/classes/_langchain_core.embeddings.Embeddings.html) used in LangChain is optimized entirely for text-based data, and will **not** work with multimodal data. + +As use cases involving multimodal search and retrieval tasks become more common, we expect to expand the embedding interface to accommodate other data types like images, audio, and video. + +## Multimodality in vector stores + +:::info Prerequisites + +- [Vector stores](/docs/concepts/vectorstores) + +::: + +Vector stores are databases for storing and retrieving embeddings, which are typically used in search and retrieval tasks. Similar to embeddings, vector stores are currently optimized for text-based data. + +As use cases involving multimodal search and retrieval tasks become more common, we expect to expand the vector store interface to accommodate other data types like images, audio, and video. diff --git a/docs/core_docs/docs/concepts/output_parsers.mdx b/docs/core_docs/docs/concepts/output_parsers.mdx new file mode 100644 index 000000000000..41babb8a3d40 --- /dev/null +++ b/docs/core_docs/docs/concepts/output_parsers.mdx @@ -0,0 +1,36 @@ +# Output parsers + + + +:::note + +The information here refers to parsers that take a text output from a model try to parse it into a more structured representation. +More and more models are supporting function (or tool) calling, which handles this automatically. +It is recommended to use function/tool calling rather than output parsing. +See documentation for that [here](/docs/concepts/tool_calling). + +::: + +Output parsers are responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks. +Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs. + +LangChain has lots of different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information: + +- **Name**: The name of the output parser +- **Supports Streaming**: Whether the output parser supports streaming. +- **Has Format Instructions**: Whether the output parser has format instructions. This is generally available except when (a) the desired schema is not specified in the prompt but rather in other parameters (like OpenAI function calling), or (b) when the OutputParser wraps another OutputParser. +- **Calls LLM**: Whether this output parser itself calls an LLM. This is usually only done by output parsers that attempt to correct misformatted output. +- **Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific kwargs. +- **Output Type**: The output type of the object returned by the parser. +- **Description**: Our commentary on this output parser and when to use it. + +| Name | Supports Streaming | Has Format Instructions | Calls LLM | Input Type | Output Type | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------ | ----------------------- | --------- | --------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [JSON](https://api.js.langchain.com/classes/_langchain_core.output_parsers.JsonOutputParser.html) | ✅ | ✅ | | `string` \| `Message` | JSON object | Returns a JSON object as specified. Probably the most reliable output parser for getting structured data that does NOT use function calling. | +| [XML](https://api.js.langchain.com/classes/_langchain_core.output_parsers.XMLOutputParser.html) | ✅ | ✅ | | `string` \| `Message` | `object` | Returns a object of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). | +| [CSV](https://api.js.langchain.com/classes/langchain.output_parsers.CommaSeparatedListOutputParser.html) | ✅ | ✅ | | `string` \| `Message` | `Array` | Returns a list of comma separated values. | +| [OutputFixing](https://api.js.langchain.com/classes/langchain.output_parsers.OutputFixingParser.html) | | | ✅ | `string` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the error message and the bad output to an LLM and ask it to fix the output. | +| [Datetime](https://api.js.langchain.com/classes/langchain.output_parsers.DatetimeOutputParser.html) | | ✅ | | `string` \| `Message` | `Date` | Parses response into a datetime string. | +| [Structured](https://api.js.langchain.com/classes/_langchain_core.output_parsers.StructuredOutputParser.html) | | ✅ | | `string` \| `Message` | `Record` | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. | + +For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers). diff --git a/docs/core_docs/docs/concepts/prompt_templates.mdx b/docs/core_docs/docs/concepts/prompt_templates.mdx new file mode 100644 index 000000000000..0065467439c9 --- /dev/null +++ b/docs/core_docs/docs/concepts/prompt_templates.mdx @@ -0,0 +1,126 @@ +# Prompt Templates + +Prompt templates help to translate user input and parameters into instructions for a language model. +This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output. + +Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in. + +Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or a list of messages. +The reason this PromptValue exists is to make it easy to switch between strings and messages. + +There are a few different types of prompt templates: + +## String PromptTemplates + +These prompt templates are used to format a single string, and generally are used for simpler inputs. +For example, a common way to construct and use a PromptTemplate is as follows: + +```typescript +import { PromptTemplate } from "@langchain/core/prompts"; + +const promptTemplate = PromptTemplate.fromTemplate( + "Tell me a joke about {topic}" +); + +await promptTemplate.invoke({ topic: "cats" }); +``` + +```text +StringPromptValue { + value: 'Tell me a joke about cats' +} +``` + +## ChatPromptTemplates + +These prompt templates are used to format a list of messages. These "templates" consist of a list of templates themselves. +For example, a common way to construct and use a ChatPromptTemplate is as follows: + +```typescript +import { ChatPromptTemplate } from "@langchain/core/prompts"; + +const promptTemplate = ChatPromptTemplate.fromMessages([ + ["system", "You are a helpful assistant"], + ["user", "Tell me a joke about {topic}"], +]); + +await promptTemplate.invoke({ topic: "cats" }); +``` + +```text +ChatPromptValue { + messages: [ + SystemMessage { + "content": "You are a helpful assistant", + "additional_kwargs": {}, + "response_metadata": {} + }, + HumanMessage { + "content": "Tell me a joke about cats", + "additional_kwargs": {}, + "response_metadata": {} + } + ] +} +``` + +In the above example, this ChatPromptTemplate will construct two messages when called. +The first is a system message, that has no variables to format. +The second is a HumanMessage, and will be formatted by the `topic` variable the user passes in. + +## MessagesPlaceholder + + + +This prompt template is responsible for adding a list of messages in a particular place. +In the above ChatPromptTemplate, we saw how we could format two messages, each one a string. +But what if we wanted the user to pass in a list of messages that we would slot into a particular spot? +This is how you use MessagesPlaceholder. + +```typescript +import { + ChatPromptTemplate, + MessagesPlaceholder, +} from "@langchain/core/prompts"; +import { HumanMessage } from "@langchain/core/messages"; + +const promptTemplate = ChatPromptTemplate.fromMessages([ + ["system", "You are a helpful assistant"], + new MessagesPlaceholder("msgs"), +]); + +await promptTemplate.invoke({ msgs: [new HumanMessage("hi!")] }); +``` + +```text +ChatPromptValue { + messages: [ + SystemMessage { + "content": "You are a helpful assistant", + "additional_kwargs": {}, + "response_metadata": {} + }, + HumanMessage { + "content": "hi!", + "additional_kwargs": {}, + "response_metadata": {} + } + ] +} +``` + +This will produce a list of two messages, the first one being a system message, and the second one being the HumanMessage we passed in. +If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in). +This is useful for letting a list of messages be slotted into a particular spot. + +An alternative way to accomplish the same thing without using the `MessagesPlaceholder` class explicitly is: + +```typescript +const promptTemplate = ChatPromptTemplate.fromMessages([ + ["system", "You are a helpful assistant"], + // highlight-next-line + ["placeholder", "{msgs}"], // <-- This is the changed part +]); +``` + +For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates). diff --git a/docs/core_docs/docs/concepts/rag.mdx b/docs/core_docs/docs/concepts/rag.mdx new file mode 100644 index 000000000000..381c06efce14 --- /dev/null +++ b/docs/core_docs/docs/concepts/rag.mdx @@ -0,0 +1,109 @@ +# Retrieval augmented generation (rag) + +:::info[Prerequisites] + +- [Retrieval](/docs/concepts/retrieval/) + +::: + +## Overview + +Retrieval Augmented Generation (RAG) is a powerful technique that enhances [language models](/docs/concepts/chat_models/) by combining them with external knowledge bases. +RAG addresses [a key limitation of models](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise): models rely on fixed training datasets, which can lead to outdated or incomplete information. +When given a query, RAG systems first search a knowledge base for relevant information. +The system then incorporates this retrieved information into the model's prompt. +The model uses the provided context to generate a response to the query. +By bridging the gap between vast language models and dynamic, targeted information retrieval, RAG is a powerful technique for building more capable and reliable AI systems. + +## Key concepts + +![Conceptual Overview](/img/rag_concepts.png) + +(1) **Retrieval system**: Retrieve relevant information from a knowledge base. + +(2) **Adding external knowledge**: Pass retrieved information to a model. + +## Retrieval system + +Model's have internal knowledge that is often fixed, or at least not updated frequently due to the high cost of training. +This limits their ability to answer questions about current events, or to provide specific domain knowledge. +To address this, there are various knowledge injection techniques like [fine-tuning](https://hamel.dev/blog/posts/fine_tuning_valuable.html) or continued pre-training. +Both are [costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise) and often [poorly suited](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) for factual retrieval. +Using a retrieval system offers several advantages: + +- **Up-to-date information**: RAG can access and utilize the latest data, keeping responses current. +- **Domain-specific expertise**: With domain-specific knowledge bases, RAG can provide answers in specific domains. +- **Reduced hallucination**: Grounding responses in retrieved facts helps minimize false or invented information. +- **Cost-effective knowledge integration**: RAG offers a more efficient alternative to expensive model fine-tuning. + +:::info[Further reading] + +See our conceptual guide on [retrieval](/docs/concepts/retrieval/). + +::: + +## Adding external knowledge + +With a retrieval system in place, we need to pass knowledge from this system to the model. +A RAG pipeline typically achieves this following these steps: + +- Receive an input query. +- Use the retrieval system to search for relevant information based on the query. +- Incorporate the retrieved information into the prompt sent to the LLM. +- Generate a response that leverages the retrieved context. + +As an example, here's a simple RAG workflow that passes information from a [retriever](/docs/concepts/retrievers/) to a [chat model](/docs/concepts/chat_models/): + +```typescript +import { ChatOpenAI } from "@langchain/openai"; + +// Define a system prompt that tells the model how to use the retrieved context +const systemPrompt = `You are an assistant for question-answering tasks. +Use the following pieces of retrieved context to answer the question. +If you don't know the answer, just say that you don't know. +Use three sentences maximum and keep the answer concise. +Context: {context}:`; + +// Define a question +const question = + "What are the main components of an LLM-powered autonomous agent system?"; + +// Retrieve relevant documents +const docs = await retriever.invoke(question); + +// Combine the documents into a single string +const docsText = docs.map((d) => d.pageContent).join(""); + +// Populate the system prompt with the retrieved context +const systemPromptFmt = systemPrompt.replace("{context}", docsText); + +// Create a model +const model = new ChatOpenAI({ + model: "gpt-4o", + temperature: 0, +}); + +// Generate a response +const questions = await model.invoke([ + { + role: "system", + content: systemPromptFmt, + }, + { + role: "user", + content: question, + }, +]); +``` + +:::info[Further reading] + +RAG a deep area with many possible optimization and design choices: + +- See [this excellent blog](https://cameronrwolfe.substack.com/p/a-practitioners-guide-to-retrieval?utm_source=profile&utm_medium=reader2) from Cameron Wolfe for a comprehensive overview and history of RAG. +- See our [RAG how-to guides](/docs/how_to/#qa-with-rag). +- See our RAG [tutorials](/docs/tutorials/#working-with-external-knowledge). +- See our RAG from Scratch course, with [code](https://github.com/langchain-ai/rag-from-scratch) and [video playlist](https://www.youtube.com/playlist?list=PLfaIDFEXuae2LXbO1_PKyVJiQ23ZztA0x). +- Also, see our RAG from Scratch course [on Freecodecamp](https://youtu.be/sVcwVQRHIc8?feature=shared). + +::: diff --git a/docs/core_docs/docs/concepts/retrieval.mdx b/docs/core_docs/docs/concepts/retrieval.mdx new file mode 100644 index 000000000000..a040c2c82668 --- /dev/null +++ b/docs/core_docs/docs/concepts/retrieval.mdx @@ -0,0 +1,246 @@ +# Retrieval + +:::info[Prerequisites] + +- [Retrievers](/docs/concepts/retrievers/) +- [Vector stores](/docs/concepts/vectorstores/) +- [Embeddings](/docs/concepts/embedding_models/) +- [Text splitters](/docs/concepts/text_splitters/) + +::: + +:::danger[Security] + +Some of the concepts reviewed here utilize models to generate queries (e.g., for SQL or graph databases). +There are inherent risks in doing this. +Make sure that your database connection permissions are scoped as narrowly as possible for your application's needs. +This will mitigate, though not eliminate, the risks of building a model-driven system capable of querying databases. +For more on general security best practices, see our [security guide](/docs/security/). + +::: + +## Overview + +Retrieval systems are fundamental to many AI applications, efficiently identifying relevant information from large datasets. +These systems accommodate various data formats: + +- Unstructured text (e.g., documents) is often stored in vector stores or lexical search indexes. +- Structured data is typically housed in relational or graph databases with defined schemas. + +Despite this diversity in data formats, modern AI applications increasingly aim to make all types of data accessible through natural language interfaces. +Models play a crucial role in this process by translating natural language queries into formats compatible with the underlying search index or database. +This translation enables more intuitive and flexible interactions with complex data structures. + +## Key concepts + +![Retrieval](/img/retrieval_concept.png) + +(1) **Query analysis**: A process where models transform or construct search queries to optimize retrieval. + +(2) **Information retrieval**: Search queries are used to fetch information from various retrieval systems. + +## Query analysis + +While users typically prefer to interact with retrieval systems using natural language, retrieval systems can specific query syntax or benefit from particular keywords. +Query analysis serves as a bridge between raw user input and optimized search queries. Some common applications of query analysis include: + +1. **Query Re-writing**: Queries can be re-written or expanded to improve semantic or lexical searches. +2. **Query Construction**: Search indexes may require structured queries (e.g., SQL for databases). + +Query analysis employs models to transform or construct optimized search queries from raw user input. + +### Query re-writing + +Retrieval systems should ideally handle a wide spectrum of user inputs, from simple and poorly worded queries to complex, multi-faceted questions. +To achieve this versatility, a popular approach is to use models to transform raw user queries into more effective search queries. +This transformation can range from simple keyword extraction to sophisticated query expansion and reformulation. +Here are some key benefits of using models for query analysis in unstructured data retrieval: + +1. **Query Clarification**: Models can rephrase ambiguous or poorly worded queries for clarity. +2. **Semantic Understanding**: They can capture the intent behind a query, going beyond literal keyword matching. +3. **Query Expansion**: Models can generate related terms or concepts to broaden the search scope. +4. **Complex Query Handling**: They can break down multi-part questions into simpler sub-queries. + +Various techniques have been developed to leverage models for query re-writing, including: + +| Name | When to use | Description | +| --------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Decomposition](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems. | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). | +| [Step-back](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a higher-level conceptual understanding is required. | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. [Paper](https://arxiv.org/pdf/2310.06117). | +| [HyDE](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | If you have challenges retrieving relevant documents using the raw user inputs. | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. [Paper](https://arxiv.org/abs/2212.10496). | + +As an example, query decomposition can simply be accomplished using prompting and a structured output that enforces a list of sub-questions. +These can then be run sequentially or in parallel on a downstream retrieval system. + +```typescript +import { z } from "zod"; +import { ChatOpenAI } from "@langchain/openai"; +import { SystemMessage, HumanMessage } from "@langchain/core/messages"; + +// Define a zod object for the structured output +const Questions = z.object({ + questions: z + .array(z.string()) + .describe("A list of sub-questions related to the input query."), +}); + +// Create an instance of the model and enforce the output structure +const model = new ChatOpenAI({ modelName: "gpt-4", temperature: 0 }); +const structuredModel = model.withStructuredOutput(Questions); + +// Define the system prompt +const system = `You are a helpful assistant that generates multiple sub-questions related to an input question. +The goal is to break down the input into a set of sub-problems / sub-questions that can be answers in isolation.`; + +// Pass the question to the model +const question = + "What are the main components of an LLM-powered autonomous agent system?"; +const questions = await structuredModel.invoke([ + new SystemMessage(system), + new HumanMessage(question), +]); +``` + +:::tip + +See our RAG from Scratch videos for a few different specific approaches: + +- [Multi-query](https://youtu.be/JChPi0CRnDY?feature=shared) +- [Decomposition](https://youtu.be/h0OPWlEOank?feature=shared) +- [Step-back](https://youtu.be/xn1jEjRyJ2U?feature=shared) +- [HyDE](https://youtu.be/SaDzIVkYqyY?feature=shared) + +::: + +### Query construction + +Query analysis also can focus on translating natural language queries into specialized query languages or filters. +This translation is crucial for effectively interacting with various types of databases that house structured or semi-structured data. + +1. **Structured Data examples**: For relational and graph databases, Domain-Specific Languages (DSLs) are used to query data. + + - **Text-to-SQL**: [Converts natural language to SQL](https://paperswithcode.com/task/text-to-sql) for relational databases. + - **Text-to-Cypher**: [Converts natural language to Cypher](https://neo4j.com/labs/neodash/2.4/user-guide/extensions/natural-language-queries/) for graph databases. + +2. **Semi-structured Data examples**: For vectorstores, queries can combine semantic search with metadata filtering. + - **Natural Language to Metadata Filters**: Converts user queries into [appropriate metadata filters](https://docs.pinecone.io/guides/data/filter-with-metadata). + +These approaches leverage models to bridge the gap between user intent and the specific query requirements of different data storage systems. Here are some popular techniques: + +| Name | When to Use | Description | +| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Self Query](/docs/how_to/self_query/) | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). | +| [Text to SQL](/docs/tutorials/sql_qa/) | If users are asking questions that require information housed in a relational database, accessible via SQL. | This uses an LLM to transform user input into a SQL query. | +| [Text-to-Cypher](/docs/tutorials/graph/) | If users are asking questions that require information housed in a graph database, accessible via Cypher. | This uses an LLM to transform user input into a Cypher query. | + +As an example, here is how to use the `SelfQueryRetriever` to convert natural language queries into metadata filters. + +```typescript +import { SelfQueryRetriever } from "langchain/retrievers/self_query"; +import { AttributeInfo } from "langchain/chains/query_constructor"; +import { ChatOpenAI } from "@langchain/openai"; + +const attributeInfo: AttributeInfo[] = schemaForMetadata; +const documentContents = "Brief summary of a movie"; +const llm = new ChatOpenAI({ temperature: 0 }); +const retriever = SelfQueryRetriever.fromLLM({ + llm, + vectorStore, + documentContents, + attributeInfo, +}); +``` + +:::info[Further reading] + +- See our tutorials on [text-to-SQL](/docs/tutorials/sql_qa/), [text-to-Cypher](/docs/tutorials/graph/), and [query analysis for metadata filters](/docs/tutorials/query_analysis/). +- See our [blog post overview](https://blog.langchain.dev/query-construction/). +- See our RAG from Scratch video on [query construction](https://youtu.be/kl6NwWYxvbM?feature=shared). + +::: + +## Information retrieval + +### Common retrieval systems + +#### Lexical search indexes + +Many search engines are based upon matching words in a query to the words in each document. +This approach is called lexical retrieval, using search [algorithms that are typically based upon word frequencies](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2). +The intution is simple: a word appears frequently both in the user’s query and a particular document, then this document might be a good match. + +The particular data structure used to implement this is often an [_inverted index_](https://www.geeksforgeeks.org/inverted-index/). +This types of index contains a list of words and a mapping of each word to a list of locations at which it occurs in various documents. +Using this data structure, it is possible to efficiently match the words in search queries to the documents in which they appear. +[BM25](https://en.wikipedia.org/wiki/Okapi_BM25#:~:text=BM25%20is%20a%20bag%2Dof,slightly%20different%20components%20and%20parameters.) and [TF-IDF](https://en.wikipedia.org/wiki/Tf%E2%80%93idf) are [two popular lexical search algorithms](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2). + +:::info[Further reading] + +- See the [BM25](/docs/integrations/retrievers/bm25/) retriever integration. + +::: + +#### Vector indexes + +Vector indexes are an alternative way to index and store unstructured data. +See our conceptual guide on [vectorstores](/docs/concepts/vectorstores/) for a detailed overview. +In short, rather than using word frequencies, vectorstores use an [embedding model](/docs/concepts/embedding_models/) to compress documents into high-dimensional vector representation. +This allows for efficient similarity search over embedding vectors using simple mathematical operations like cosine similarity. + +:::info[Further reading] + +- See our [how-to guide](/docs/how_to/vectorstore_retriever/) for more details on working with vectorstores. +- See our [list of vectorstore integrations](/docs/integrations/vectorstores/). +- See Cameron Wolfe's [blog post](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2) on the basics of vector search. + +::: + +#### Relational databases + +Relational databases are a fundamental type of structured data storage used in many applications. +They organize data into tables with predefined schemas, where each table represents an entity or relationship. +Data is stored in rows (records) and columns (attributes), allowing for efficient querying and manipulation through SQL (Structured Query Language). +Relational databases excel at maintaining data integrity, supporting complex queries, and handling relationships between different data entities. + +:::info[Further reading] + +- See our [tutorial](/docs/tutorials/sql_qa/) for working with SQL databases. + +::: + +#### Graph databases + +Graph databases are a specialized type of database designed to store and manage highly interconnected data. +Unlike traditional relational databases, graph databases use a flexible structure consisting of nodes (entities), edges (relationships), and properties. +This structure allows for efficient representation and querying of complex, interconnected data. +Graph databases store data in a graph structure, with nodes, edges, and properties. +They are particularly useful for storing and querying complex relationships between data points, such as social networks, supply-chain management, fraud detection, and recommendation services + +:::info[Further reading] + +- See our [tutorial](/docs/tutorials/graph/) for working with graph databases. +- See Neo4j's [starter kit for LangChain](https://neo4j.com/developer-blog/langchain-neo4j-starter-kit/). + +::: + +### Retriever + +LangChain provides a unified interface for interacting with various retrieval systems through the [retriever](/docs/concepts/retrievers/) concept. The interface is straightforward: + +1. Input: A query (string) +2. Output: A list of documents (standardized LangChain [Document](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) objects) + +You can create a retriever using any of the retrieval systems mentioned earlier. The query analysis techniques we discussed are particularly useful here, as they enable natural language interfaces for databases that typically require structured query languages. +For example, you can build a retriever for a SQL database using text-to-SQL conversion. This allows a natural language query (string) to be transformed into a SQL query behind the scenes. +Regardless of the underlying retrieval system, all retrievers in LangChain share a common interface. You can use them with the simple `invoke` method: + +```typescript +const docs = await retriever.invoke(query); +``` + +:::info[Further reading] + +- See our [conceptual guide on retrievers](/docs/concepts/retrievers/). +- See our [how-to guide](/docs/how_to/#retrievers) on working with retrievers. + +::: diff --git a/docs/core_docs/docs/concepts/retrievers.mdx b/docs/core_docs/docs/concepts/retrievers.mdx new file mode 100644 index 000000000000..0d07c67a11c0 --- /dev/null +++ b/docs/core_docs/docs/concepts/retrievers.mdx @@ -0,0 +1,143 @@ +# Retrievers + + + +:::info[Prerequisites] + +- [Vector stores](/docs/concepts/vectorstores/) +- [Embeddings](/docs/concepts/embedding_models/) +- [Text splitters](/docs/concepts/text_splitters/) + +::: + +## Overview + +Many different types of retrieval systems exist, including vectorstores, graph databases, and relational databases. +With the rise on popularity of large language models, retrieval systems have become an important component in AI application (e.g., [RAG](/docs/concepts/rag/)). +Because of their importance and variability, LangChain provides a uniform interface for interacting with different types of retrieval systems. +The LangChain [retriever](/docs/concepts/retrievers/) interface is straightforward: + +1. Input: A query (string) +2. Output: A list of documents (standardized LangChain [Document](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) objects) + +## Key concept + +![Retriever](/img/retriever_concept.png) + +All retrievers implement a simple interface for retrieving documents using natural language queries. + +## Interface + +The only requirement for a retriever is the ability to accepts a query and return documents. +In particular, [LangChain's retriever class](https://api.js.langchain.com/classes/_langchain_core.retrievers.BaseRetriever.html) only requires that the `_getRelevantDocuments` method is implemented, which takes a `query: string` and returns a list of [Document](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) objects that are most relevant to the query. +The underlying logic used to get relevant documents is specified by the retriever and can be whatever is most useful for the application. + +A LangChain retriever is a [runnable](/docs/how_to/lcel_cheatsheet/), which is a standard interface is for LangChain components. +This means that it has a few common methods, including `invoke`, that are used to interact with it. A retriever can be invoked with a query: + +```typescript +const docs = await retriever.invoke(query); +``` + +Retrievers return a list of [Document](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) objects, which have two attributes: + +- `pageContent`: The content of this document. Currently is a string. +- `metadata`: Arbitrary metadata associated with this document (e.g., document id, file name, source, etc). + +:::info[Further reading] + +- See our [how-to guide](/docs/how_to/custom_retriever/) on building your own custom retriever. + +::: + +## Common types + +Despite the flexibility of the retriever interface, a few common types of retrieval systems are frequently used. + +### Search apis + +It's important to note that retrievers don't need to actually _store_ documents. +For example, we can be built retrievers on top of search APIs that simply return search results! + +### Relational or graph database + +Retrievers can be built on top of relational or graph databases. +In these cases, [query analysis](/docs/concepts/retrieval/) techniques to construct a structured query from natural language is critical. +For example, you can build a retriever for a SQL database using text-to-SQL conversion. This allows a natural language query (string) retriever to be transformed into a SQL query behind the scenes. + +:::info[Further reading] + +- See our [tutorial](/docs/tutorials/sql_qa/) for context on how to build a retreiver using a SQL database and text-to-SQL. +- See our [tutorial](/docs/tutorials/graph/) for context on how to build a retreiver using a graph database and text-to-Cypher. + +::: + +### Lexical search + +As discussed in our conceptual review of [retrieval](/docs/concepts/retrieval/), many search engines are based upon matching words in a query to the words in each document. +[BM25](https://en.wikipedia.org/wiki/Okapi_BM25#:~:text=BM25%20is%20a%20bag%2Dof,slightly%20different%20components%20and%20parameters.) and [TF-IDF](https://en.wikipedia.org/wiki/Tf%E2%80%93idf) are [two popular lexical search algorithms](https://cameronrwolfe.substack.com/p/the-basics-of-ai-powered-vector-search?utm_source=profile&utm_medium=reader2). +LangChain has retrievers for many popular lexical search algorithms / engines. + +:::info[Further reading] + +- See the [BM25](/docs/integrations/retrievers/bm25/) retriever integration. + +::: + +### Vector store + +[Vector stores](/docs/concepts/vectorstores/) are a powerful and efficient way to index and retrieve unstructured data. +An vectorstore can be used as a retriever by calling the `asRetriever()` method. + +```typescript +const vectorstore = new MyVectorStore(); +const retriever = vectorstore.asRetriever(); +``` + +## Advanced retrieval patterns + +### Ensemble + +Because the retriever interface is so simple, returning a list of `Document` objects given a search query, it is possible to combine multiple retrievers using ensembling. +This is particularly useful when you have multiple retrievers that are good at finding different types of relevant documents. +It is easy to create an [ensemble retriever](/docs/how_to/ensemble_retriever/) that combines multiple retrievers with linear weighted scores: + +```typescript +// Initialize the ensemble retriever +const ensembleRetriever = new EnsembleRetriever({ + retrievers: [bm25Retriever, vectorStoreRetriever], + weights: [0.5, 0.5], +}); +``` + +When ensembling, how do we combine search results from many retrievers? +This motivates the concept of re-ranking, which takes the output of multiple retrievers and combines them using a more sophisticated algorithm such as [Reciprocal Rank Fusion (RRF)](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf). + +### Source document retention + +Many retrievers utilize some kind of index to make documents easily searchable. +The process of indexing can include a transformation step (e.g., vectorstores often use document splitting). +Whatever transformation is used, can be very useful to retain a link between the _transformed document_ and the original, giving the retriever the ability to return the _original_ document. + +![Retrieval with full docs](/img/retriever_full_docs.png) + +This is particularly useful in AI applications, because it ensures no loss in document context for the model. +For example, you may use small chunk size for indexing documents in a vectorstore. +If you return _only_ the chunks as the retrieval result, then the model will have lost the original document context for the chunks. + +LangChain has two different retrievers that can be used to address this challenge. +The [Multi-Vector](/docs/how_to/multi_vector/) retriever allows the user to use any document transformation (e.g., use an LLM to write a summary of the document) for indexing while retaining linkage to the source document. +The [ParentDocument](/docs/how_to/parent_document_retriever/) retriever links document chunks from a text-splitter transformation for indexing while retaining linkage to the source document. + +| Name | Index Type | Uses an LLM | When to Use | Description | +| --------------------------------------------------------- | ----------------------------- | ------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vector store + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). | +| [Multi Vector](/docs/how_to/multi_vector/) | Vector store + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. | + +:::info[Further reading] + +- See our [how-to guide](/docs/how_to/parent_document_retriever/) on using the ParentDocument retriever. +- See our [how-to guide](/docs/how_to/multi_vector/) on using the MultiVector retriever. +- See our RAG from Scratch video on the [multi vector retriever](https://youtu.be/gTCU9I6QqCE?feature=shared). + +::: diff --git a/docs/core_docs/docs/concepts/runnables.mdx b/docs/core_docs/docs/concepts/runnables.mdx new file mode 100644 index 000000000000..e6a72b668d8c --- /dev/null +++ b/docs/core_docs/docs/concepts/runnables.mdx @@ -0,0 +1,248 @@ +# Runnable interface + +The Runnable interface is foundational for working with LangChain components, and it's implemented across many of them, such as [language models](/docs/concepts/chat_models), [output parsers](/docs/concepts/output_parsers), [retrievers](/docs/concepts/retrievers), [compiled LangGraph graphs](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#compiling-your-graph) and more. + +This guide covers the main concepts and methods of the Runnable interface, which allows developers to interact with various LangChain components in a consistent and predictable manner. + +:::info Related Resources + +- The ["Runnable" Interface API Reference](https://api.js.langchain.com/classes/_langchain_core.runnables.Runnable.html) provides a detailed overview of the Runnable interface and its methods. +- A list of built-in `Runnables` can be found in the [LangChain Core API Reference](https://api.js.langchain.com/modules/_langchain_core.runnables.html). Many of these Runnables are useful when composing custom "chains" in LangChain using the [LangChain Expression Language (LCEL)](/docs/concepts/lcel). + +::: + +## Overview of runnable interface + +The Runnable way defines a standard interface that allows a Runnable component to be: + +- [Invoked](/docs/how_to/lcel_cheatsheet/#invoke-a-runnable): A single input is transformed into an output. +- [Batched](/docs/how_to/lcel_cheatsheet/#batch-a-runnable/): Multiple inputs are efficiently transformed into outputs. +- [Streamed](/docs/how_to/lcel_cheatsheet/#stream-a-runnable): Outputs are streamed as they are produced. +- Inspected: Schematic information about Runnable's input, output, and configuration can be accessed. +- Composed: Multiple Runnables can be composed to work together using [the LangChain Expression Language (LCEL)](/docs/concepts/lcel) to create complex pipelines. + +Please review the [LCEL Cheatsheet](/docs/how_to/lcel_cheatsheet) for some common patterns that involve the Runnable interface and LCEL expressions. + +### Optimized parallel execution (batch) + +LangChain Runnables offer a built-in `batch` API that allow you to process multiple inputs in parallel. + +Using this method can significantly improve performance when needing to process multiple independent inputs, as the +processing can be done in parallel instead of sequentially. + +The batching method is: + +- `batch`: Process multiple inputs in parallel, returning results in the same order as the inputs. + +The default implementation of `batch` executed the `invoke` method in parallel. + +Some Runnables may provide their own implementations of `batch` that are optimized for their specific use case (e.g., +rely on a `batch` API provided by a model provider). + +:::tip +When processing a large number of inputs using `batch`, users may want to control the maximum number of parallel calls. This can be done by setting the `maxConcurrency` attribute in the `RunnableConfig` object. See the [RunnableConfig](/docs/concepts/runnables#RunnableConfig) for more information. +::: + +## Streaming apis + + + +Streaming is critical in making applications based on LLMs feel responsive to end-users. + +Runnables expose the following three streaming APIs: + +1. [`stream`](https://api.js.langchain.com/classes/_langchain_core.runnables.Runnable.html#stream): yields the output a Runnable as it is generated. +2. [`streamEvents`](https://v03.api.js.langchain.com/classes/_langchain_core.runnables.Runnable.html#streamEvents): a more advanced streaming API that allows streaming intermediate steps and final output +3. **legacy** `streamLog`: a legacy streaming API that streams intermediate steps and final output + +Please refer to the [Streaming Conceptual Guide](/docs/concepts/streaming) for more details on how to stream in LangChain. + +## Input and output types + +Every `Runnable` is characterized by an input and output type. These input and output types can be any TypeScript object, and are defined by the Runnable itself. + +Runnable methods that result in the execution of the Runnable (e.g., `invoke`, `batch`, `stream`, `streamEvents`) work with these input and output types. + +- `invoke`: Accepts an input and returns an output. +- `batch`: Accepts a list of inputs and returns a list of outputs. +- `stream`: Accepts an input and returns a generator that yields outputs. + +The **input type** and **output type** vary by component: + +| Component | Input Type | Output Type | +| ------------ | ---------------------------------------------------- | --------------------- | +| Prompt | `object` | `PromptValue` | +| ChatModel | a `string`, list of chat messages or a `PromptValue` | `ChatMessage` | +| LLM | a `string`, list of chat messages or a `PromptValue` | `string` | +| OutputParser | the output of an LLM or ChatModel | Depends on the parser | +| Retriever | a `string` | List of `Document`s | +| Tool | a `string` or `object`, depending on the tool | Depends on the tool | + +Please refer to the individual component documentation for more information on the input and output types and how to use them. + +## RunnableConfig + +Any of the methods that are used to execute the runnable (e.g., `invoke`, `batch`, `stream`, `streamEvents`) accept a second argument called +`RunnableConfig` ([API Reference](https://api.js.langchain.com/interfaces/_langchain_core.runnables.RunnableConfig.html)). This argument is an object that contains configuration for the Runnable that will be used +at run time during the execution of the runnable. + +A `RunnableConfig` can have any of the following properties defined: + +| Attribute | Description | +| ---------------- | ------------------------------------------------------------------------------------------ | +| `runName` | Name used for the given Runnable (not inherited). | +| `runId` | Unique identifier for this call. sub-calls will get their own unique run ids. | +| `tags` | Tags for this call and any sub-calls. | +| `metadata` | Metadata for this call and any sub-calls. | +| `callbacks` | Callbacks for this call and any sub-calls. | +| `maxConcurrency` | Maximum number of parallel calls to make (e.g., used by batch). | +| `recursionLimit` | Maximum number of times a call can recurse (e.g., used by Runnables that return Runnables) | +| `configurable` | Runtime values for configurable attributes of the Runnable. | + +Passing `config` to the `invoke` method is done like so: + +```typescript +await someRunnable.invoke(someInput, { + runName: "myRun", + tags: ["tag1", "tag2"], + metadata: { key: "value" }, +}); +``` + +### Propagation of RunnableConfig + +Many `Runnables` are composed of other Runnables, and it is important that the `RunnableConfig` is propagated to all sub-calls made by the Runnable. This allows providing run time configuration values to the parent Runnable that are inherited by all sub-calls. + +If this were not the case, it would be impossible to set and propagate [callbacks](/docs/concepts/callbacks) or other configuration values like `tags` and `metadata` which +are expected to be inherited by all sub-calls. + +There are two main patterns by which new `Runnables` are created: + +1. Declaratively using [LangChain Expression Language (LCEL)](/docs/concepts/lcel): + + ```typescript + const chain = prompt.pipe(chatModel).pipe(outputParser); + ``` + +2. Using a [custom Runnable](#custom-runnables) (e.g., `RunnableLambda`) or using the `tool` function: + + ```typescript + const foo = (input) => { + // Note that .invoke() is used directly here + // highlight-next-line + return barRunnable.invoke(input); + }; + const fooRunnable = RunnableLambda.from(foo); + ``` + +LangChain will try to propagate `RunnableConfig` automatically for both of the patterns. + +Propagating the `RunnableConfig` manually is done like so: + +```typescript +// Note the config argument +// highlight-next-line +const foo = (input, config) => { + return barRunnable.invoke(input, config); +}; +const fooRunnable = RunnableLambda.from(foo); +``` + +### Setting custom run name, tags, and metadata + +The `runName`, `tags`, and `metadata` attributes of the `RunnableConfig` object can be used to set custom values for the run name, tags, and metadata for a given Runnable. + +The `runName` is a string that can be used to set a custom name for the run. This name will be used in logs and other places to identify the run. It is not inherited by sub-calls. + +The `tags` and `metadata` attributes are arrays and objects, respectively, that can be used to set custom tags and metadata for the run. These values are inherited by sub-calls. + +Using these attributes can be useful for tracking and debugging runs, as they will be surfaced in [LangSmith](https://docs.smith.langchain.com/) as trace attributes that you can +filter and search on. + +The attributes will also be propagated to [callbacks](/docs/concepts/callbacks), and will appear in streaming APIs like [streamEvents](/docs/concepts/streaming) as part of each event in the stream. + +:::note Related + +- [How-to trace with LangChain](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain) + +::: + +### Setting run id + +:::note +This is an advanced feature that is unnecessary for most users. +::: + +You may need to set a custom `runId` for a given run, in case you want +to reference it later or correlate it with other systems. + +The `runId` MUST be a valid UUID string and **unique** for each run. It is used to identify +the parent run, sub-class will get their own unique run ids automatically. + +To set a custom `runId`, you can pass it as a key-value pair in the `config` object when invoking the Runnable: + +```typescript +import { v4 as uuidv4 } from "uuid"; + +const runId = uuidv4(); + +await someRunnable.invoke(someInput, { + runId, +}); + +// Do something with the runId +``` + +### Setting recursion limit + +:::note +This is an advanced feature that is unnecessary for most users. +::: + +Some Runnables may return other Runnables, which can lead to infinite recursion if not handled properly. To prevent this, you can set a `recursion_limit` in the `RunnableConfig` object. This will limit the number of times a Runnable can recurse. + +### Setting max concurrency + +If using the `batch` methods, you can set the `maxConcurrency` attribute in the `RunnableConfig` object to control the maximum number of parallel calls to make. This can be useful when you want to limit the number of parallel calls to prevent overloading a server or API. + +### Setting configurable + +The `configurable` field is used to pass runtime values for configurable attributes of the Runnable. + +It is used frequently in [LangGraph](/docs/concepts/architecture#langgraph) with +[LangGraph Persistence](https://langchain-ai.github.io/langgraphjs/concepts/persistence/) +and [memory](https://langchain-ai.github.io/langgraphjs/concepts/memory/). + +It is used for a similar purpose in [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html) to specify +a `session_id` to keep track of conversation history. + +### Setting callbacks + +Use this option to configure [callbacks](/docs/concepts/callbacks) for the runnable at +runtime. The callbacks will be passed to all sub-calls made by the runnable. + +```typescript +await someRunnable.invoke(someInput, { + callbacks: [SomeCallbackHandler(), AnotherCallbackHandler()], +}); +``` + +Please read the [Callbacks Conceptual Guide](/docs/concepts/callbacks) for more information on how to use callbacks in LangChain. + +## Creating a runnable from a function + +You may need to create a custom Runnable that runs arbitrary logic. This is especially +useful if using [LangChain Expression Language (LCEL)](/docs/concepts/lcel) to compose +multiple Runnables and you need to add custom processing logic in one of the steps. + +There are two ways to create a custom Runnable from a function: + +- `RunnableLambda`: Use this simple transformations where streaming is not required. +- `RunnableGenerator`: use this for more complex transformations when streaming is needed. + +See the [How to run custom functions](/docs/how_to/functions) guide for more information on how to use `RunnableLambda` and `RunnableGenerator`. + +:::important +Users should not try to subclass Runnables to create a new custom Runnable. It is +much more complex and error-prone than simply using `RunnableLambda` or `RunnableGenerator`. +::: diff --git a/docs/core_docs/docs/concepts/streaming.mdx b/docs/core_docs/docs/concepts/streaming.mdx new file mode 100644 index 000000000000..a7e1f2a21a46 --- /dev/null +++ b/docs/core_docs/docs/concepts/streaming.mdx @@ -0,0 +1,189 @@ +# Streaming + +:::info Prerequisites + +- [Runnable Interface](/docs/concepts/runnables) +- [Chat Models](/docs/concepts/chat_models) + +::: + +**Streaming** is crucial for enhancing the responsiveness of applications built on [LLMs](/docs/concepts/chat_models). By displaying output progressively, even before a complete response is ready, streaming significantly improves user experience (UX), particularly when dealing with the latency of LLMs. + +## Overview + +Generating full responses from [LLMs](/docs/concepts/chat_models) often incurs a delay of several seconds, which becomes more noticeable in complex applications with multiple model calls. Fortunately, LLMs generate responses iteratively, allowing for intermediate results to be displayed as they are produced. By streaming these intermediate outputs, LangChain enables smoother UX in LLM-powered apps and offers built-in support for streaming at the core of its design. + +In this guide, we'll discuss streaming in LLM applications and explore how LangChain's streaming APIs facilitate real-time output from various components in your application. + +## What to stream in LLM applications + +In applications involving LLMs, several types of data can be streamed to improve user experience by reducing perceived latency and increasing transparency. These include: + +### 1. Streaming LLM outputs + +The most common and critical data to stream is the output generated by the LLM itself. LLMs often take time to generate full responses, and by streaming the output in real-time, users can see partial results as they are produced. This provides immediate feedback and helps reduce the wait time for users. + +### 2. Streaming pipeline or workflow progress + +Beyond just streaming LLM output, it’s useful to stream progress through more complex workflows or pipelines, giving users a sense of how the application is progressing overall. This could include: + +- **In LangGraph Workflows:** + With [LangGraph](/docs/concepts/architecture#langgraph), workflows are composed of nodes and edges that represent various steps. Streaming here involves tracking changes to the **graph state** as individual **nodes** request updates. This allows for more granular monitoring of which node in the workflow is currently active, giving real-time updates about the status of the workflow as it progresses through different stages. + +- **In LCEL Pipelines:** + Streaming updates from an [LCEL](/docs/concepts/lcel) pipeline involves capturing progress from individual **sub-runnables**. For example, as different steps or components of the pipeline execute, you can stream which sub-runnable is currently running, providing real-time insight into the overall pipeline's progress. + +Streaming pipeline or workflow progress is essential in providing users with a clear picture of where the application is in the execution process. + +### 3. Streaming custom data + +In some cases, you may need to stream **custom data** that goes beyond the information provided by the pipeline or workflow structure. This custom information is injected within a specific step in the workflow, whether that step is a tool or a LangGraph node. For example, you could stream updates about what a tool is doing in real-time or the progress through a LangGraph node. This granular data, which is emitted directly from within the step, provides more detailed insights into the execution of the workflow and is especially useful in complex processes where more visibility is needed. + +## Streaming APIs + +LangChain two main APIs for streaming output in real-time. These APIs are supported by any component that implements the [Runnable Interface](/docs/concepts/runnables), including [LLMs](/docs/concepts/chat_models), [compiled LangGraph graphs](https://langchain-ai.github.io/langgraphjs/concepts/low_level/), and any Runnable generated with [LCEL](/docs/concepts/lcel). + +1. [`stream`](https://api.js.langchain.com/classes/_langchain_core.runnables.Runnable.html#stream): Use to stream outputs from individual Runnables (e.g., a chat model) as they are generated or stream any workflow created with LangGraph. +2. [`streamEvents`](https://api.js.langchain.com/classes/_langchain_core.runnables.Runnable.html#streamEvents): Use this API to get access to custom events and intermediate outputs from LLM applications built entirely with [LCEL](/docs/concepts/lcel). Note that this API is available, but not needed when working with LangGraph. + +:::note +In addition, there is a **legacy** [streamLog](https://api.js.langchain.com/classes/_langchain_core.runnables.Runnable.html#streamLog) API. This API is not recommended for new projects it is more complex and less feature-rich than the other streaming APIs. +::: + +### `stream()` + +The `stream()` method returns an iterator that yields chunks of output synchronously as they are produced. You can use a `for await` loop to process each chunk in real-time. For example, when using an LLM, this allows the output to be streamed incrementally as it is generated, reducing the wait time for users. + +The type of chunk yielded by the `stream()` methods depends on the component being streamed. For example, when streaming from an [LLM](/docs/concepts/chat_models) each component will be an [`AIMessageChunk`](/docs/concepts/messages#aimessagechunk); however, for other components, the chunk may be different. + +The `stream()` method returns an iterator that yields these chunks as they are produced. For example, + +```typescript +for await (const chunk in await component.stream(someInput)) { + // IMPORTANT: Keep the processing of each chunk as efficient as possible. + // While you're processing the current chunk, the upstream component is + // waiting to produce the next one. For example, if working with LangGraph, + // graph execution is paused while the current chunk is being processed. + // In extreme cases, this could even result in timeouts (e.g., when llm outputs are + // streamed from an API that has a timeout). + console.log(chunk) +} +``` + +#### Usage with chat models + +When using `stream()` with chat models, the output is streamed as [`AIMessageChunks`](/docs/concepts/messages#aimessagechunk) as it is generated by the LLM. This allows you to present or process the LLM's output incrementally as it's being produced, which is particularly useful in interactive applications or interfaces. + +#### Usage with LangGraph + +[LangGraph](/docs/concepts/architecture#langgraph) compiled graphs are [Runnables](/docs/concepts/runnables) and support the standard streaming APIs. + +When using the _stream_ and methods with LangGraph, you can **one or more** [streaming mode](https://langchain-ai.github.io/langgraphjs/reference/classes/langgraph_pregel.Pregel.html#streamMode) which allow you to control the type of output that is streamed. The available streaming modes are: + +- **"values"**: Emit all values of the [state](https://langchain-ai.github.io/langgraphjs/concepts/low_level/) for each step. +- **"updates"**: Emit only the node name(s) and updates that were returned by the node(s) after each step. +- **"debug"**: Emit debug events for each step. +- **"messages"**: Emit LLM [messages](/docs/concepts/messages) [token-by-token](/docs/concepts/tokens). + +For more information, please see: + +- [LangGraph streaming conceptual guide](https://langchain-ai.github.io/langgraphjs/concepts/streaming/) for more information on how to stream when working with LangGraph. +- [LangGraph streaming how-to guides](https://langchain-ai.github.io/langgraphjs/how-tos/#streaming) for specific examples of streaming in LangGraph. + +#### Usage with LCEL + +If you compose multiple Runnables using [LangChain’s Expression Language (LCEL)](/docs/concepts/lcel), the `stream()` methods will, by convention, stream the output of the last step in the chain. This allows the final processed result to be streamed incrementally. **LCEL** tries to optimize streaming latency in pipelines such that the streaming results from the last step are available as soon as possible. + +### `streamEvents` + + + +:::tip +Use the `streamEvents` API to access custom data and intermediate outputs from LLM applications built entirely with [LCEL](/docs/concepts/lcel). + +While this API is available for use with [LangGraph](/docs/concepts/architecture#langgraph) as well, it is usually not necessary when working with LangGraph, as the `stream` methods provide comprehensive streaming capabilities for LangGraph graphs. +::: + +For chains constructed using **LCEL**, the `.stream()` method only streams the output of the final step from te chain. This might be sufficient for some applications, but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of the chain alongside the final output. For example, you may want to return sources alongside the final generation when building a chat-over-documents app. + +There are ways to do this [using callbacks](/docs/concepts/callbacks), or by constructing your chain in such a way that it passes intermediate +values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an +`.streamEvents()` method that combines the flexibility of callbacks with the ergonomics of `.stream()`. When called, it returns an iterator +which yields [various types of events](/docs/how_to/streaming/#event-reference) that you can filter and process according +to the needs of your project. + +Here's one small example that prints just events containing streamed chat model output: + +```typescript +import { StringOutputParser } from "@langchain/core/output_parsers"; +import { ChatPromptTemplate } from "@langchain/core/prompts"; +import { ChatAnthropic } from "@langchain/anthropic"; + +const model = new ChatAnthropic({ model: "claude-3-sonnet-20240229" }); + +const prompt = ChatPromptTemplate.fromTemplate("tell me a joke about {topic}"); +const parser = StringOutputParser(); +const chain = prompt.pipe(model).pipe(parser); + +for await (const event of await chain.streamEvents( + { topic: "parrot" }, + { version: "v2" } +)) { + if (event.event === "on_chat_model_stream") { + console.log(event); + } +} +``` + +You can roughly think of it as an iterator over callback events (though the format differs) - and you can use it on almost all LangChain components! + +See [this guide](/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.streamEvents()`, including a table listing available events. + +## Writing custom data to the stream + +To write custom data to the stream, you will need to choose one of the following methods based on the component you are working with: + +1. [dispatch_events](https://api.js.langchain.com/functions/_langchain_core.callbacks_dispatch.dispatchCustomEvent.html#) can be used to write custom data that will be surfaced through the **streamEvents** API. See [how to dispatch custom callback events](/docs/how_to/callbacks_custom_events/#stream-events-api) for more information. + +## "Auto-Streaming" Chat Models + +LangChain simplifies streaming from [chat models](/docs/concepts/chat_models) by automatically enabling streaming mode in certain cases, even when you're not explicitly calling the streaming methods. This is particularly useful when you use the non-streaming `invoke` method but still want to stream the entire application, including intermediate results from the chat model. + +### How It Works + +When you call the `invoke` method on a chat model, LangChain will automatically switch to streaming mode if it detects that you are trying to stream the overall application. + +Under the hood, it'll have `invoke` use the `stream` method to generate its output. The result of the invocation will be the same as far as the code that was using `invoke` is concerned; however, while the chat model is being streamed, LangChain will take care of invoking `on_llm_new_token` events in LangChain's [callback system](/docs/concepts/callbacks). These callback events +allow LangGraph `stream` and `streamEvents` to surface the chat model's output in real-time. + +Example: + +```typescript +const node = (state) => { + ... + // The code below uses the invoke method, but LangChain will + // automatically switch to streaming mode + // when it detects that the overall + // application is being streamed. + ai_message = model.invoke(state["messages"]) + ... + + for await (const chunk of await compiledGraph.stream(..., { streamMode: "messages" })) { + // ... do something + } +} +``` + +## Related Resources + +Please see the following how-to guides for specific examples of streaming in LangChain: + +- [LangGraph conceptual guide on streaming](https://langchain-ai.github.io/langgraphjs/concepts/streaming/) +- [LangGraph streaming how-to guides](https://langchain-ai.github.io/langgraphjs/how-tos/#streaming) +- [How to stream runnables](/docs/how_to/streaming/): This how-to guide goes over common streaming patterns with LangChain components (e.g., chat models) and with [LCEL](/docs/concepts/lcel). +- [How to stream chat models](/docs/how_to/chat_streaming/) +- [How to stream tool calls](/docs/how_to/tool_streaming/) + +For writing custom data to the stream, please see the following resources: + +- If using LCEL, see [how to dispatch custom callback events](/docs/how_to/callbacks_custom_events/#stream-events-api). diff --git a/docs/core_docs/docs/concepts/structured_outputs.mdx b/docs/core_docs/docs/concepts/structured_outputs.mdx new file mode 100644 index 000000000000..173fa2fbcc9b --- /dev/null +++ b/docs/core_docs/docs/concepts/structured_outputs.mdx @@ -0,0 +1,169 @@ +# Structured outputs + +## Overview + +For many applications, such as chatbots, models need to respond to users directly in natural language. +However, there are scenarios where we need models to output in a _structured format_. +For example, we might want to store the model output in a database and ensure that the output conforms to the database schema. +This need motivates the concept of structured output, where models can be instructed to respond with a particular output structure. + +![Structured output](/img/structured_output.png) + +## Key concepts + +**(1) Schema definition:** The output structure is represented as a schema, which can be defined in several ways. +**(2) Returning structured output:** The model is given this schema, and is instructed to return output that conforms to it. + +## Recommended usage + +This pseudo-code illustrates the recommended workflow when using structured output. +LangChain provides a method, [`withStructuredOutput()`](/docs/how_to/structured_output/#the-.withstructuredoutput-method), that automates the process of binding the schema to the [model](/docs/concepts/chat_models/) and parsing the output. +This helper function is available for all model providers that support structured output. + +```typescript +// Define schema +const schema = { foo: "bar" }; +// Bind schema to model +const modelWithStructure = model.withStructuredOutput(schema); +// Invoke the model to produce structured output that matches the schema +const structuredOutput = await modelWithStructure.invoke(userInput); +``` + +## Schema definition + +The central concept is that the output structure of model responses needs to be represented in some way. +While types of objects you can use depend on the model you're working with, there are common types of objects that are typically allowed or recommended for structured output in TypeScript. + +The simplest and most common format for structured output is a Zod schema definition: + +```typescript +import { z } from "zod"; + +const ResponseFormatter = z.object({ + answer: z.string().describe("The answer to the user's question"), + followup_question: z + .string() + .describe("A followup question the user could ask"), +}); +``` + +You can also define a JSONSchema object, which is what Zod schemas are converted to internally before being sent to the model provider: + +```json +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://example.com/product.schema.json", + "title": "ResponseFormatter", + "type": "object", + "properties": { + "answer": { + "description": "The answer to the user's question", + "type": "string" + }, + "followup_question": { + "description": "A followup question the user could ask", + "type": "string" + } + }, + "required": ["answer", "followup_question"] +} +``` + +## Returning structured output + +With a schema defined, we need a way to instruct the model to use it. +While one approach is to include this schema in the prompt and _ask nicely_ for the model to use it, this is not recommended. +Several more powerful methods that utilizes native features in the model provider's API are available. + +### Using tool calling + +Many [model providers support](/docs/integrations/chat/) tool calling, a concept discussed in more detail in our [tool calling guide](/docs/concepts/tool_calling/). +In short, tool calling involves binding a tool to a model and, when appropriate, the model can _decide_ to call this tool and ensure its response conforms to the tool's schema. +With this in mind, the central concept is straightforward: _simply bind our schema to a model as a tool!_ +Here is an example using the `ResponseFormatter` schema defined above: + +```typescript +import { ChatOpenAI } from "@langchain/openai"; + +const model = new ChatOpenAI({ + modelName: "gpt-4", + temperature: 0, +}); + +// Bind ResponseFormatter schema as a tool to the model +const modelWithTools = model.bindTools([ResponseFormatter]); + +// Invoke the model +const aiMsg = await modelWithTools.invoke( + "What is the powerhouse of the cell?" +); +``` + +### JSON mode + +In addition to tool calling, some model providers support a feature called `JSON mode`. +This supports JSON schema definition as input and enforces the model to produce a conforming JSON output. +You can find a table of model providers that support JSON mode [here](/docs/integrations/chat/). +Here is an example of how to use JSON mode with OpenAI: + +```typescript +import { ChatOpenAI } from "@langchain/openai"; + +const model = new ChatOpenAI({ + model: "gpt-4", +}).bind({ + response_format: { type: "json_object" }, +}); + +const aiMsg = await model.invoke( + "Return a JSON object with key 'random_nums' and a value of 10 random numbers in [0-99]" +); +console.log(aiMsg.content); +// Output: { +// "random_nums": [23, 47, 89, 15, 34, 76, 58, 3, 62, 91] +// } +``` + +One important point to flag: the model _still_ returns a string, which needs to be parsed into a JSON object. +This can, of course, simply use the `json` library or a JSON output parser if you need more advanced functionality. +See this [how-to guide on the JSON output parser](/docs/how_to/output_parser_json) for more details. + +```typescript +import json +const jsonObject = JSON.parse(aiMsg.content) +// {'random_ints': [23, 47, 89, 15, 34, 76, 58, 3, 62, 91]} +``` + +## Structured output method + +There are a few challenges when producing structured output with the above methods: + +(1) If using tool calling, tool call arguments needs to be parsed from an object back to the original schema. + +(2) In addition, the model needs to be instructed to _always_ use the tool when we want to enforce structured output, which is a provider specific setting. + +(3) If using JSON mode, the output needs to be parsed into a JSON object. + +With these challenges in mind, LangChain provides a helper function (`withStructuredOutput()`) to streamline the process. + +![Diagram of with structured output](/img/with_structured_output.png) + +This both binds the schema to the model as a tool and parses the output to the specified output schema. + +```typescript +// Bind the schema to the model +const modelWithStructure = model.withStructuredOutput(ResponseFormatter); +// Invoke the model +const structuredOutput = await modelWithStructure.invoke( + "What is the powerhouse of the cell?" +); +// Get back the object +console.log(structuredOutput); +// { answer: "The powerhouse of the cell is the mitochondrion. Mitochondria are organelles that generate most of the cell's supply of adenosine triphosphate (ATP), which is used as a source of chemical energy.", followup_question: "What is the function of ATP in the cell?" } +``` + +:::info[Further reading] + +For more details on usage, see our [how-to guide](/docs/how_to/structured_output/#the-.withstructuredoutput-method). + +::: diff --git a/docs/core_docs/docs/concepts/t.ipynb b/docs/core_docs/docs/concepts/t.ipynb new file mode 100644 index 000000000000..7b63222f889e --- /dev/null +++ b/docs/core_docs/docs/concepts/t.ipynb @@ -0,0 +1,81 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ChatPromptValue {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " messages: [\n", + " SystemMessage {\n", + " \"content\": \"You are a helpful assistant\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"hi!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + " ]\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'prompt_values' ],\n", + " messages: [\n", + " SystemMessage {\n", + " \"content\": \"You are a helpful assistant\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"hi!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "import {\n", + " ChatPromptTemplate,\n", + " MessagesPlaceholder,\n", + "} from \"@langchain/core/prompts\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const promptTemplate = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " new MessagesPlaceholder(\"msgs\"),\n", + "]);\n", + "\n", + "await promptTemplate.invoke({ msgs: [new HumanMessage(\"hi!\")] });" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/core_docs/docs/concepts/text_llms.mdx b/docs/core_docs/docs/concepts/text_llms.mdx new file mode 100644 index 000000000000..78f42143cc88 --- /dev/null +++ b/docs/core_docs/docs/concepts/text_llms.mdx @@ -0,0 +1,10 @@ +# String-in, string-out llms + +:::tip +You are probably looking for the [Chat Model Concept Guide](/docs/concepts/chat_models) page for more information. +::: + +LangChain has implementations for older language models that take a string as input and return a string as output. These models are typically named without the "Chat" prefix (e.g., `Ollama`, `Anthropic`, `OpenAI`, etc.), and may include the "LLM" suffix (e.g., `OpenAILLM`, etc.). These models implement the [`BaseLLM`](https://api.js.langchain.com/classes/_langchain_core.language_models_llms.BaseLLM.html) interface. + +Users should be using almost exclusively the newer [Chat Models](/docs/concepts/chat_models) as most +model providers have adopted a chat like interface for interacting with language models. diff --git a/docs/core_docs/docs/concepts/text_splitters.mdx b/docs/core_docs/docs/concepts/text_splitters.mdx new file mode 100644 index 000000000000..b668fc96e07b --- /dev/null +++ b/docs/core_docs/docs/concepts/text_splitters.mdx @@ -0,0 +1,145 @@ +# Text splitters + + + +:::info[Prerequisites] + +- [Documents](/docs/concepts/retrievers/#interface) +- Tokenization(/docs/concepts/tokens) + +::: + +## Overview + +Document splitting is often a crucial preprocessing step for many applications. +It involves breaking down large texts into smaller, manageable chunks. +This process offers several benefits, such as ensuring consistent processing of varying document lengths, overcoming input size limitations of models, and improving the quality of text representations used in retrieval systems. +There are several strategies for splitting documents, each with its own advantages. + +## Key concepts + +![Conceptual Overview](/img/text_splitters.png) + +Text splitters split documents into smaller chunks for use in downstream applications. + +## Why split documents? + +There are several reasons to split documents: + +- **Handling non-uniform document lengths**: Real-world document collections often contain texts of varying sizes. Splitting ensures consistent processing across all documents. +- **Overcoming model limitations**: Many embedding models and language models have maximum input size constraints. Splitting allows us to process documents that would otherwise exceed these limits. +- **Improving representation quality**: For longer documents, the quality of embeddings or other representations may degrade as they try to capture too much information. Splitting can lead to more focused and accurate representations of each section. +- **Enhancing retrieval precision**: In information retrieval systems, splitting can improve the granularity of search results, allowing for more precise matching of queries to relevant document sections. +- **Optimizing computational resources**: Working with smaller chunks of text can be more memory-efficient and allow for better parallelization of processing tasks. + +Now, the next question is _how_ to split the documents into chunks! There are several strategies, each with its own advantages. + +:::info[Further reading] + +- See Greg Kamradt's [chunkviz](https://chunkviz.up.railway.app/) to visualize different splitting strategies discussed below. + +::: + +## Approaches + +### Length-based + +The most intuitive strategy is to split documents based on their length. This simple yet effective approach ensures that each chunk doesn't exceed a specified size limit. +Key benefits of length-based splitting: + +- Straightforward implementation +- Consistent chunk sizes +- Easily adaptable to different model requirements + +Types of length-based splitting: + +- **Token-based**: Splits text based on the number of tokens, which is useful when working with language models. +- **Character-based**: Splits text based on the number of characters, which can be more consistent across different types of text. + +Example implementation using LangChain's `CharacterTextSplitter` with character based splitting: + +```typescript +import { CharacterTextSplitter } from "@langchain/textsplitters"; +const textSplitter = new CharacterTextSplitter({ + chunkSize: 100, + chunkOverlap: 0, +}); +const texts = await textSplitter.splitText(document); +``` + +:::info[Further reading] + +- See the how-to guide for [token-based](/docs/how_to/split_by_token/) splitting. +- See the how-to guide for [character-based](/docs/how_to/character_text_splitter/) splitting. + +::: + +### Text-structured based + +Text is naturally organized into hierarchical units such as paragraphs, sentences, and words. +We can leverage this inherent structure to inform our splitting strategy, creating split that maintain natural language flow, maintain semantic coherence within split, and adapts to varying levels of text granularity. +LangChain's [`RecursiveCharacterTextSplitter`](/docs/how_to/recursive_text_splitter/) implements this concept: + +- The `RecursiveCharacterTextSplitter` attempts to keep larger units (e.g., paragraphs) intact. +- If a unit exceeds the chunk size, it moves to the next level (e.g., sentences). +- This process continues down to the word level if necessary. + +Here is example usage: + +```typescript +import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; + +const textSplitter = new RecursiveCharacterTextSplitter({ + chunkSize: 100, + chunkOverlap: 0, +}); +const texts = await textSplitter.splitText(document); +``` + +:::info[Further reading] + +- See the how-to guide for [recursive text splitting](/docs/how_to/recursive_text_splitter/). + +::: + +### Document-structured based + +Some documents have an inherent structure, such as HTML, Markdown, or JSON files. +In these cases, it's beneficial to split the document based on its structure, as it often naturally groups semantically related text. +Key benefits of structure-based splitting: + +- Preserves the logical organization of the document +- Maintains context within each chunk +- Can be more effective for downstream tasks like retrieval or summarization + +Examples of structure-based splitting: + +- **Markdown**: Split based on headers (e.g., #, ##, ###) +- **HTML**: Split using tags +- **JSON**: Split by object or array elements +- **Code**: Split by functions, classes, or logical blocks + +:::info[Further reading] + +- See the how-to guide for [Code splitting](/docs/how_to/code_splitter/). + +::: + +### Semantic meaning based + +Unlike the previous methods, semantic-based splitting actually considers the _content_ of the text. +While other approaches use document or text structure as proxies for semantic meaning, this method directly analyzes the text's semantics. +There are several ways to implement this, but conceptually the approach is split text when there are significant changes in text _meaning_. +As an example, we can use a sliding window approach to generate embeddings, and compare the embeddings to find significant differences: + +- Start with the first few sentences and generate an embedding. +- Move to the next group of sentences and generate another embedding (e.g., using a sliding window approach). +- Compare the embeddings to find significant differences, which indicate potential "break points" between semantic sections. + +This technique helps create chunks that are more semantically coherent, potentially improving the quality of downstream tasks like retrieval or summarization. + +:::info[Further reading] + +- See Greg Kamradt's [notebook](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) showcasing semantic splitting. + +::: diff --git a/docs/core_docs/docs/concepts/tokens.mdx b/docs/core_docs/docs/concepts/tokens.mdx new file mode 100644 index 000000000000..bac37f5ef0ab --- /dev/null +++ b/docs/core_docs/docs/concepts/tokens.mdx @@ -0,0 +1,58 @@ +# Tokens + +Modern large language models (LLMs) are typically based on a transformer architecture that processes a sequence of units known as tokens. Tokens are the fundamental elements that models use to break down input and generate output. In this section, we'll discuss what tokens are and how they are used by language models. + +## What is a token? + +A **token** is the basic unit that a language model reads, processes, and generates. These units can vary based on how the model provider defines them, but in general, they could represent: + +- A whole word (e.g., "apple"), +- A part of a word (e.g., "app"), +- Or other linguistic components such as punctuation or spaces. + +The way the model tokenizes the input depends on its **tokenizer algorithm**, which converts the input into tokens. Similarly, the model’s output comes as a stream of tokens, which is then decoded back into human-readable text. + +## How tokens work in language models + +The reason language models use tokens is tied to how they understand and predict language. Rather than processing characters or entire sentences directly, language models focus on **tokens**, which represent meaningful linguistic units. Here's how the process works: + +1. **Input Tokenization**: When you provide a model with a prompt (e.g., "LangChain is cool!"), the tokenizer algorithm splits the text into tokens. For example, the sentence could be tokenized into parts like `["Lang", "Chain", " is", " cool", "!"]`. Note that token boundaries don’t always align with word boundaries. + ![](/img/tokenization.png) + +2. **Processing**: The transformer architecture behind these models processes tokens sequentially to predict the next token in a sentence. It does this by analyzing the relationships between tokens, capturing context and meaning from the input. +3. **Output Generation**: The model generates new tokens one by one. These output tokens are then decoded back into human-readable text. + +Using tokens instead of raw characters allows the model to focus on linguistically meaningful units, which helps it capture grammar, structure, and context more effectively. + +## Tokens don’t have to be text + +Although tokens are most commonly used to represent text, they don’t have to be limited to textual data. Tokens can also serve as abstract representations of **multi-modal data**, such as: + +- **Images**, +- **Audio**, +- **Video**, +- And other types of data. + +At the time of writing, virtually no models support **multi-modal output**, and only a few models can handle **multi-modal inputs** (e.g., text combined with images or audio). However, as advancements in AI continue, we expect **multi-modality** to become much more common. This would allow models to process and generate a broader range of media, significantly expanding the scope of what tokens can represent and how models can interact with diverse types of data. + +:::note +In principle, **anything that can be represented as a sequence of tokens** could be modeled in a similar way. For example, **DNA sequences**—which are composed of a series of nucleotides (A, T, C, G)—can be tokenized and modeled to capture patterns, make predictions, or generate sequences. This flexibility allows transformer-based models to handle diverse types of sequential data, further broadening their potential applications across various domains, including bioinformatics, signal processing, and other fields that involve structured or unstructured sequences. +::: + +Please see the [multimodality](/docs/concepts/multimodality) section for more information on multi-modal inputs and outputs. + +## Why not use characters? + +Using tokens instead of individual characters makes models both more efficient and better at understanding context and grammar. Tokens represent meaningful units, like whole words or parts of words, allowing models to capture language structure more effectively than by processing raw characters. Token-level processing also reduces the number of units the model has to handle, leading to faster computation. + +In contrast, character-level processing would require handling a much larger sequence of input, making it harder for the model to learn relationships and context. Tokens enable models to focus on linguistic meaning, making them more accurate and efficient in generating responses. + +## How tokens correspond to text + +Please see this post from [OpenAI](https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them) for more details on how tokens are counted and how they correspond to text. + +According to the OpenAI post, the approximate token counts for English text are as follows: + +- 1 token ~= 4 chars in English +- 1 token ~= ¾ words +- 100 tokens ~= 75 words diff --git a/docs/core_docs/docs/concepts/tool_calling.mdx b/docs/core_docs/docs/concepts/tool_calling.mdx new file mode 100644 index 000000000000..929212275ba7 --- /dev/null +++ b/docs/core_docs/docs/concepts/tool_calling.mdx @@ -0,0 +1,185 @@ +# Tool calling + +:::info[Prerequisites] + +- [Tools](/docs/concepts/tools) +- [Chat Models](/docs/concepts/chat_models) + +::: + +## Overview + +Many AI applications interact directly with humans. In these cases, it is appropriate for models to respond in natural language. +But what about cases where we want a model to also interact _directly_ with systems, such as databases or an API? +These systems often have a particular input schema; for example, APIs frequently have a required payload structure. +This need motivates the concept of _tool calling_. You can use [tool calling](https://platform.openai.com/docs/guides/function-calling/example-use-cases) to request model responses that match a particular schema. + +:::info +You will sometimes hear the term `function calling`. We use this term interchangeably with `tool calling`. +::: + +![Conceptual overview of tool calling](/img/tool_calling_concept.png) + +## Key concepts + +**(1) Tool Creation:** Use the [tool](https://api.js.langchain.com/functions/_langchain_core.tools.tool-1.html) function to create a [tool](/docs/concepts/tools). A tool is an association between a function and its schema. +**(2) Tool Binding:** The tool needs to be connected to a model that supports tool calling. This gives the model awareness of the tool and the associated input schema required by the tool. +**(3) Tool Calling:** When appropriate, the model can decide to call a tool and ensure its response conforms to the tool's input schema. +**(4) Tool Execution:** The tool can be executed using the arguments provided by the model. + +![Conceptual parts of tool calling](/img/tool_calling_components.png) + +## Recommended usage + +This pseudo-code illustrates the recommended workflow for using tool calling. +Created tools are passed to `.bindTools()` method as a list. +This model can be called, as usual. If a tool call is made, model's response will contain the tool call arguments. +The tool call arguments can be passed directly to the tool. + +```typescript +// Tool creation +const tools = [myTool]; +// Tool binding +const modelWithTools = model.bindTools(tools); +// Tool calling +const response = await modelWithTools.invoke(userInput); +``` + +## Tool creation + +The recommended way to create a tool is using the `tool` function. + +```typescript +import { tool } from "@langchain/core/tools"; + +const multiply = tool( + ({ a, b }: { a: number; b: number }): number => { + /** + * Multiply a and b. + */ + return a * b; + }, + { + name: "multiply", + description: "Multiply two numbers", + schema: z.object({ + a: z.number(), + b: z.number(), + }), + } +); +``` + +:::info[Further reading] + +- See our conceptual guide on [tools](/docs/concepts/tools/) for more details. +- See our [model integrations](/docs/integrations/chat/) that support tool calling. +- See our [how-to guide](/docs/how_to/tool_calling/) on tool calling. + +::: + +For tool calling that does not require a function to execute, you can also define just the tool schema: + +```typescript +const multiplyTool = { + name: "multiply", + description: "Multiply two numbers", + schema: z.object({ + a: z.number(), + b: z.number(), + }), +}; +``` + +## Tool binding + +[Many](https://platform.openai.com/docs/guides/function-calling) [model providers](https://platform.openai.com/docs/guides/function-calling) support tool calling. + +:::tip +See our [model integration page](/docs/integrations/chat/) for a list of providers that support tool calling. +::: + +The central concept to understand is that LangChain provides a standardized interface for connecting tools to models. +The `.bindTools()` method can be used to specify which tools are available for a model to call. + +```typescript +const modelWithTools = model.bindTools([toolsList]); +``` + +As a specific example, let's take a function `multiply` and bind it as a tool to a model that supports tool calling. + +```typescript +const multiply = tool( + ({ a, b }: { a: number; b: number }): number => { + /** + * Multiply a and b. + * + * @param a - first number + * @param b - second number + * @returns The product of a and b + */ + return a * b; + }, + { + name: "multiply", + description: "Multiply two numbers", + schema: z.object({ + a: z.number(), + b: z.number(), + }), + } +); + +const llmWithTools = toolCallingModel.bindTools([multiply]); +``` + +## Tool calling + +![Diagram of a tool call by a model](/img/tool_call_example.png) + +A key principle of tool calling is that the model decides when to use a tool based on the input's relevance. The model doesn't always need to call a tool. +For example, given an unrelated input, the model would not call the tool: + +```typescript +const result = await llmWithTools.invoke("Hello world!"); +``` + +The result would be an `AIMessage` containing the model's response in natural language (e.g., "Hello!"). +However, if we pass an input _relevant to the tool_, the model should choose to call it: + +```typescript +const result = await llmWithTools.invoke("What is 2 multiplied by 3?"); +``` + +As before, the output `result` will be an `AIMessage`. +But, if the tool was called, `result` will have a `tool_calls` attribute. +This attribute includes everything needed to execute the tool, including the tool name and input arguments: + +``` +result.tool_calls +{'name': 'multiply', 'args': {'a': 2, 'b': 3}, 'id': 'xxx', 'type': 'tool_call'} +``` + +For more details on usage, see our [how-to guides](/docs/how_to/#tools)! + +## Tool execution + +[Tools](/docs/concepts/tools/) implement the [Runnable](/docs/concepts/runnables/) interface, which means that they can be invoked (e.g., `tool.invoke(args)`) directly. + +[LangGraph](https://langchain-ai.github.io/langgraphjs/) offers pre-built components (e.g., [`ToolNode`](https://langchain-ai.github.io/langgraphjs/reference/classes/langgraph_prebuilt.ToolNode.html)) that will often invoke the tool in behalf of the user. + +:::info[Further reading] + +- See our [how-to guide](/docs/how_to/tool_calling/) on tool calling. +- See the [LangGraph documentation on using ToolNode](https://langchain-ai.github.io/langgraphjs/how-tos/tool-calling/). + +::: + +## Best practices + +When designing [tools](/docs/concepts/tools/) to be used by a model, it is important to keep in mind that: + +- Models that have explicit [tool-calling APIs](/docs/concepts/tool_calling) will be better at tool calling than non-fine-tuned models. +- Models will perform better if the tools have well-chosen names and descriptions. +- Simple, narrowly scoped tools are easier for models to use than complex tools. +- Asking the model to select from a large list of tools poses challenges for the model. diff --git a/docs/core_docs/docs/concepts/tools.mdx b/docs/core_docs/docs/concepts/tools.mdx new file mode 100644 index 000000000000..e8c4eb66bcb5 --- /dev/null +++ b/docs/core_docs/docs/concepts/tools.mdx @@ -0,0 +1,178 @@ +# Tools + +:::info Prerequisites + +- [Chat models](/docs/concepts/chat_models/) + +::: + +## Overview + +The **tool** abstraction in LangChain associates a TypeScript **function** with a **schema** that defines the function's **name**, **description** and **input**. + +**Tools** can be passed to [chat models](/docs/concepts/chat_models) that support [tool calling](/docs/concepts/tool_calling) allowing the model to request the execution of a specific function with specific inputs. + +## Key concepts + +- Tools are a way to encapsulate a function and its schema in a way that can be passed to a chat model. +- Create tools using the [tool](https://api.js.langchain.com/functions/_langchain_core.tools.tool-1.html) function, which simplifies the process of tool creation, supporting the following: + - Defining tools that return **artifacts** (e.g. images, etc.) + - Hiding input arguments from the schema (and hence from the model) using **injected tool arguments**. + +## Tool interface + +The tool interface is defined in the [`StructuredTool`](https://api.js.langchain.com/classes/_langchain_core.tools.StructuredTool.html) class which is a subclass of the [Runnable Interface](/docs/concepts/runnables). + +The key attributes that correspond to the tool's **schema**: + +- **name**: The name of the tool. +- **description**: A description of what the tool does. +- **args**: Property that returns the JSON schema for the tool's arguments. + +The key methods to execute the function associated with the **tool**: + +- **invoke**: Invokes the tool with the given arguments. + +## Create tools using the `tool` function + +The recommended way to create tools is using the [tool](https://api.js.langchain.com/functions/_langchain_core.tools.tool-1.html) function. This function is designed to simplify the process of tool creation and should be used in most cases. + +```typescript +import { tool } from "@langchain/core/tools"; +import { z } from "zod"; + +const multiply = tool( + ({ a, b }: { a: number; b: number }): number => { + /** + * Multiply two numbers. + */ + return a * b; + }, + { + name: "multiply", + description: "Multiply two numbers", + schema: z.object({ + a: z.number(), + b: z.number(), + }), + } +); +``` + +For more details on how to create tools, see the [how to create custom tools](/docs/how_to/custom_tools/) guide. + +:::note +LangChain has a few other ways to create tools; e.g., by sub-classing the [`StructuredTool`](https://api.js.langchain.com/classes/_langchain_core.tools.StructuredTool.html) class or by using `StructuredTool`. These methods are shown in the [how to create custom tools guide](/docs/how_to/custom_tools/), but +we generally recommend using the `tool` function for most cases. +::: + +## Use the tool directly + +Once you have defined a tool, you can use it directly by calling the function. For example, to use the `multiply` tool defined above: + +```typescript +await multiply.invoke({ a: 2, b: 3 }); +``` + +### Inspect + +You can also inspect the tool's schema and other properties: + +```typescript +console.log(multiply.name); // multiply +console.log(multiply.description); // Multiply two numbers. +``` + +:::note +If you're using pre-built LangChain or LangGraph components like [createReactAgent](https://langchain-ai.github.io/langgraphjs/reference/functions/langgraph_prebuilt.createReactAgent.html),you might not need to interact with tools directly. However, understanding how to use them can be valuable for debugging and testing. Additionally, when building custom LangGraph workflows, you may find it necessary to work with tools directly. +::: + +## Configuring the schema + +The `tool` function offers additional options to configure the schema of the tool (e.g., modify name, description +or parse the function's doc-string to infer the schema). + +Please see the [API reference for tool](https://api.js.langchain.com/functions/_langchain_core.tools.tool-1.html) for more details and review the [how to create custom tools](/docs/how_to/custom_tools/) guide for examples. + +## Tool artifacts + +**Tools** are utilities that can be called by a model, and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns a custom object, a dataframe or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools. + +```typescript +const someTool = tool(({ ... }) => { + // do something +}, { + // ... tool schema args + // Set the returnType to "content_and_artifact" + responseFormat: "content_and_artifact" +}); +``` + +See [how to return artifacts from tools](/docs/how_to/tool_artifacts/) for more details. + +### RunnableConfig + +You can use the `RunnableConfig` object to pass custom run time values to tools. + +If you need to access the [RunnableConfig](/docs/concepts/runnables/#RunnableConfig) object from within a tool. This can be done by using the `RunnableConfig` in the tool's function signature. + +```typescript +import { RunnableConfig } from "@langchain/core/runnables"; + +const someTool = tool( + async (args: any, config: RunnableConfig): Promise<[string, any]> => { + /** + * Tool that does something. + */ + }, + { + name: "some_tool", + description: "Tool that does something", + schema: z.object({ ... }), + returnType: "content_and_artifact" + } +); + + +await someTool.invoke(..., { configurable: { value: "some_value" } }); +``` + +The `config` will not be part of the tool's schema and will be injected at runtime with appropriate values. + +## Best practices + +When designing tools to be used by models, keep the following in mind: + +- Tools that are well-named, correctly-documented and properly type-hinted are easier for models to use. +- Design simple and narrowly scoped tools, as they are easier for models to use correctly. +- Use chat models that support [tool-calling](/docs/concepts/tool_calling) APIs to take advantage of tools. + +## Toolkits + + + +LangChain has a concept of **toolkits**. This a very thin abstraction that groups tools together that +are designed to be used together for specific tasks. + +### Interface + +All Toolkits expose a `getTools` method which returns a list of tools. You can therefore do: + +```typescript +// Initialize a toolkit +const toolkit = new ExampleTookit(...) + +// Get list of tools +const tools = toolkit.getTools() +``` + +## Related resources + +See the following resources for more information: + +- [API Reference for `tool`](https://api.js.langchain.com/functions/_langchain_core.tools.tool-1.html) +- [How to create custom tools](/docs/how_to/custom_tools/) +- [How to pass run time values to tools](/docs/how_to/tool_runtime/) +- [All LangChain tool how-to guides](https://docs.langchain.com/docs/how_to/#tools) +- [Additional how-to guides that show usage with LangGraph](https://langchain-ai.github.io/langgraphjs/how-tos/tool-calling/) +- Tool integrations, see the [tool integration docs](https://docs.langchain.com/docs/integrations/tools/). diff --git a/docs/core_docs/docs/concepts/tracing.mdx b/docs/core_docs/docs/concepts/tracing.mdx new file mode 100644 index 000000000000..659992eeb957 --- /dev/null +++ b/docs/core_docs/docs/concepts/tracing.mdx @@ -0,0 +1,10 @@ +# Tracing + + + +A trace is essentially a series of steps that your application takes to go from input to output. +Traces contain individual steps called `runs`. These can be individual calls from a model, retriever, +tool, or sub-chains. +Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues. + +For a deeper dive, check out [this LangSmith conceptual guide](https://docs.smith.langchain.com/concepts/tracing). diff --git a/docs/core_docs/docs/concepts/vectorstores.mdx b/docs/core_docs/docs/concepts/vectorstores.mdx new file mode 100644 index 000000000000..b8d372c75d0f --- /dev/null +++ b/docs/core_docs/docs/concepts/vectorstores.mdx @@ -0,0 +1,194 @@ +# Vector stores + + + +:::info[Prerequisites] + +- [Embeddings](/docs/concepts/embedding_models/) +- [Text splitters](/docs/concepts/text_splitters/) + +::: +:::info[Note] + +This conceptual overview focuses on text-based indexing and retrieval for simplicity. +However, embedding models can be [multi-modal](https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-multimodal-embeddings) +and vector stores can be used to store and retrieve a variety of data types beyond text. +::: + +## Overview + +Vector stores are specialized data stores that enable indexing and retrieving information based on vector representations. + +These vectors, called [embeddings](/docs/concepts/embedding_models/), capture the semantic meaning of data that has been embedded. + +Vector stores are frequently used to search over unstructured data, such as text, images, and audio, to retrieve relevant information based on semantic similarity rather than exact keyword matches. + +![Vector stores](/img/vectorstores.png) + +## Integrations + +LangChain has a large number of vectorstore integrations, allowing users to easily switch between different vectorstore implementations. + +Please see the [full list of LangChain vectorstore integrations](/docs/integrations/vectorstores/). + +## Interface + +LangChain provides a standard interface for working with vector stores, allowing users to easily switch between different vectorstore implementations. + +The interface consists of basic methods for writing, deleting and searching for documents in the vector store. + +The key methods are: + +- `addDocuments`: Add a list of texts to the vector store. +- `deleteDocuments` / `delete`: Delete a list of documents from the vector store. +- `similaritySearch`: Search for similar documents to a given query. + +## Initialization + +Most vectors in LangChain accept an embedding model as an argument when initializing the vector store. + +We will use LangChain's [MemoryVectorStore](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html) implementation to illustrate the API. + +```typescript +import { MemoryVectorStore } from "langchain/vectorstores/memory"; +// Initialize with an embedding model +const vectorStore = new MemoryVectorStore(new SomeEmbeddingModel()); +``` + +## Adding documents + +To add documents, use the `addDocuments` method. + +This API works with a list of [Document](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) objects. +`Document` objects all have `pageContent` and `metadata` attributes, making them a universal way to store unstructured text and associated metadata. + +```typescript +import { Document } from "@langchain/core/documents"; + +const document1 = new Document( + pageContent: "I had chocalate chip pancakes and scrambled eggs for breakfast this morning.", + metadata: { source: "tweet" }, +) + +const document2 = new Document( + pageContent: "The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees.", + metadata: { source: "news" }, +) + +const documents = [document1, document2] + +await vectorStore.addDocuments(documents) +``` + +You should usually provide IDs for the documents you add to the vector store, so +that instead of adding the same document multiple times, you can update the existing document. + +```typescript +await vectorStore.addDocuments(documents, { ids: ["doc1", "doc2"] }); +``` + +## Delete + +To delete documents, use the `deleteDocuments` method which takes a list of document IDs to delete. + +```typescript +await vectorStore.deleteDocuments(["doc1"]); +``` + +or the `delete` method: + +```typescript +await vectorStore.deleteDocuments({ ids: ["doc1"] }); +``` + +## Search + +Vector stores embed and store the documents that added. +If we pass in a query, the vectorstore will embed the query, perform a similarity search over the embedded documents, and return the most similar ones. +This captures two important concepts: first, there needs to be a way to measure the similarity between the query and _any_ [embedded](/docs/concepts/embedding_models/) document. +Second, there needs to be an algorithm to efficiently perform this similarity search across _all_ embedded documents. + +### Similarity metrics + +A critical advantage of embeddings vectors is they can be compared using many simple mathematical operations: + +- **Cosine Similarity**: Measures the cosine of the angle between two vectors. +- **Euclidean Distance**: Measures the straight-line distance between two points. +- **Dot Product**: Measures the projection of one vector onto another. + +The choice of similarity metric can sometimes be selected when initializing the vectorstore. Please refer +to the documentation of the specific vectorstore you are using to see what similarity metrics are supported. + +:::info[Further reading] + +- See [this documentation](https://developers.google.com/machine-learning/clustering/dnn-clustering/supervised-similarity) from Google on similarity metrics to consider with embeddings. +- See Pinecone's [blog post](https://www.pinecone.io/learn/vector-similarity/) on similarity metrics. +- See OpenAI's [FAQ](https://platform.openai.com/docs/guides/embeddings/faq) on what similarity metric to use with OpenAI embeddings. + +::: + +### Similarity search + +Given a similarity metric to measure the distance between the embedded query and any embedded document, we need an algorithm to efficiently search over _all_ the embedded documents to find the most similar ones. +There are various ways to do this. As an example, many vectorstores implement [HNSW (Hierarchical Navigable Small World)](https://www.pinecone.io/learn/series/faiss/hnsw/), a graph-based index structure that allows for efficient similarity search. +Regardless of the search algorithm used under the hood, the LangChain vectorstore interface has a `similaritySearch` method for all integrations. +This will take the search query, create an embedding, find similar documents, and return them as a list of [Documents](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html). + +```typescript +const query = "my query"; +const docs = await vectorstore.similaritySearch(query); +``` + +Many vectorstores support search parameters to be passed with the `similaritySearch` method. See the documentation for the specific vectorstore you are using to see what parameters are supported. +As an example [Pinecone](https://api.js.langchain.com/classes/_langchain_pinecone.PineconeStore.html#similaritySearch) several parameters that are important general concepts: +Many vectorstores support [the `k`](/docs/integrations/vectorstores/pinecone/#query-directly), which controls the number of Documents to return, and `filter`, which allows for filtering documents by metadata. + +- `query (string) – Text to look up documents similar to.` +- `k (number) – Number of Documents to return. Defaults to 4.` +- `filter (Record | undefined) – Object of argument(s) to filter on metadata` + +:::info[Further reading] + +- See the [how-to guide](/docs/how_to/vectorstores/) for more details on how to use the `similaritySearch` method. +- See the [integrations page](/docs/integrations/vectorstores/) for more details on arguments that can be passed in to the `similaritySearch` method for specific vectorstores. + +::: + +### Metadata filtering + +While vectorstore implement a search algorithm to efficiently search over _all_ the embedded documents to find the most similar ones, many also support filtering on metadata. +This allows structured filters to reduce the size of the similarity search space. These two concepts work well together: + +1. **Semantic search**: Query the unstructured data directly, often using via embedding or keyword similarity. +2. **Metadata search**: Apply structured query to the metadata, filtering specific documents. + +Vector store support for metadata filtering is typically dependent on the underlying vector store implementation. + +Here is example usage with [Pinecone](/docs/integrations/vectorstores/pinecone/#query-directly), showing that we filter for all documents that have the metadata key `source` with value `tweet`. + +```typescript +await vectorstore.similaritySearch( + "LangChain provides abstractions to make working with LLMs easy", + 2, + { + // The arguments of this field are provider specific. + filter: { source: "tweet" }, + } +); +``` + +:::info[Further reading] + +- See Pinecone's [documentation](https://docs.pinecone.io/guides/data/filter-with-metadata) on filtering with metadata. +- See the [list of LangChain vectorstore integrations](/docs/integrations/retrievers/self_query/) that support metadata filtering. + +::: + +## Advanced search and retrieval techniques + +While algorithms like HNSW provide the foundation for efficient similarity search in many cases, additional techniques can be employed to improve search quality and diversity. +For example, maximal marginal relevance is a re-ranking algorithm used to diversify search results, which is applied after the initial similarity search to ensure a more diverse set of results. + +| Name | When to use | Description | +| ----------------------------------------------------------------------------------------------------------------- | ----------------------------------------- | ----------------------------------------------------------------------------------------------------- | +| [Maximal Marginal Relevance (MMR)](/docs/integrations/vectorstores/pinecone/#maximal-marginal-relevance-searches) | When needing to diversify search results. | MMR attempts to diversify the results of a search to avoid returning similar and redundant documents. | diff --git a/docs/core_docs/docs/concepts/why_langchain.mdx b/docs/core_docs/docs/concepts/why_langchain.mdx new file mode 100644 index 000000000000..51f571df1d3a --- /dev/null +++ b/docs/core_docs/docs/concepts/why_langchain.mdx @@ -0,0 +1,122 @@ +# Why LangChain? + +The goal of the `langchain` package and LangChain the company is to make it as easy possible for developers to build applications that reason. +While LangChain originally started as a single open source package, it has evolved into a company and a whole ecosystem. +This page will talk about the LangChain ecosystem as a whole. +Most of the components within in the LangChain ecosystem can be used by themselves - so if you feel particularly drawn to certain components but not others, that is totally fine! Pick and choose whichever components you like best. + +## Features + +There are several primary needs that LangChain aims to address: + +1. **Standardized component interfaces:** The growing number of [models](/docs/integrations/chat/) and [related components](/docs/integrations/vectorstores/) for AI applications has resulted in a wide variety of different APIs that developers need to learn and use. + This diversity can make it challenging for developers to switch between providers or combine components when building applications. + LangChain exposes a standard interface for key components, making it easy to switch between providers. + +2. **Orchestration:** As applications become more complex, combining multiple components and models, there's [a growing need to efficiently connect these elements into control flows](https://lilianweng.github.io/posts/2023-06-23-agent/) that can [accomplish diverse tasks](https://www.sequoiacap.com/article/generative-ais-act-o1/). + [Orchestration]() is crucial for building such applications. + +3. **Observability and evaluation:** As applications become more complex, it becomes increasingly difficult to understand what is happening within them. + Furthermore, the pace of development can become rate-limited by the [paradox of choice](https://en.wikipedia.org/wiki/Paradox_of_choice): + for example, developers often wonder how to engineer their prompt or which LLM best balances accuracy, latency, and cost. + [Observability](https://en.wikipedia.org/wiki/Observability) and evaluations can help developers monitor their applications and rapidly answer these types of questions with confidence. + +## Standardized component interfaces + +LangChain provides common interfaces for components that are central to many AI applications. +As an example, all [chat models](/docs/concepts/chat_models/) implement the [BaseChatModel](https://api.js.langchain.com/classes/_langchain_core.language_models_chat_models.BaseChatModel.html) interface. +This provides a standard way to interact with chat models, supporting important but often provider-specific features like [tool calling](/docs/concepts/tool_calling/) and [structured outputs](/docs/concepts/structured_outputs/). + +### Example: chat models + +Many [model providers](/docs/concepts/chat_models/) support [tool calling](/docs/concepts/tool_calling/), a critical features for many applications (e.g., [agents](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/)), that allows a developer to request model responses that match a particular schema. +The APIs for each provider differ. +LangChain's [chat model](/docs/concepts/chat_models/) interface provides a common way to bind [tools](/docs/concepts/tools) to a model in order to support [tool calling](/docs/concepts/tool_calling/): + +```typescript +// Tool creation +const tools = [myTool]; +// Tool binding +const modelWithTools = model.bindTools(tools); +``` + +Similarly, getting models to produce [structured outputs](/docs/concepts/structured_outputs/) is an extremely common use case. +Providers support different approaches for this, including [JSON mode or tool calling](https://platform.openai.com/docs/guides/structured-outputs), with different APIs. +LangChain's [chat model](/docs/concepts/chat_models/) interface provides a common way to produce structured outputs using the `withStructuredOutput()` method: + +```typescript +// Define tool as a Zod schema +const schema = z.object({ ... }); +// Bind schema to model +const modelWithStructure = model.withStructuredOutput(schema) +``` + +### Example: retrievers + +In the context of [RAG](/docs/concepts/rag/) and LLM application components, LangChain's [retriever](/docs/concepts/retrievers/) interface provides a standard way to connect to many different types of data services or databases (e.g., [vector stores](/docs/concepts/vectorstores) or databases). +The underlying implementation of the retriever depends on the type of data store or database you are connecting to, but all retrievers implement the [runnable interface](/docs/concepts/runnables/), meaning they can be invoked in a common manner. + +```typescript +const documents = await myRetriever.invoke("What is the meaning of life?"); +``` + +```text +[ + Document({ + pageContent: "The meaning of life is 42.", + metadata: { ... }, + }), + Document({ + pageContent: "The meaning of life is to use LangChain.", + metadata: { ... }, + }), + ... +] +``` + +## Orchestration + +While standardization for individual components is useful, we've increasingly seen that developers want to _combine_ components into more complex applications. +This motivates the need for [orchestration](). +There are several common characteristics of LLM applications that this orchestration layer should support: + +- **Complex control flow:** The application requires complex patterns such as cycles (e.g., a loop that reiterates until a condition is met). +- **[Persistence](https://langchain-ai.github.io/langgraphjs/concepts/persistence/):** The application needs to maintain [short-term and / or long-term memory](https://langchain-ai.github.io/langgraphjs/concepts/memory/). +- **[Human-in-the-loop](https://langchain-ai.github.io/langgraphjs/concepts/human_in_the_loop/):** The application needs human interaction, e.g., pausing, reviewing, editing, approving certain steps. + +The recommended way to do orchestration for these complex applications is [LangGraph](https://langchain-ai.github.io/langgraphjs/concepts/high_level/). +LangGraph is a library that gives developers a high degree of control by expressing the flow of the application as a set of nodes and edges. +LangGraph comes with built-in support for [persistence](https://langchain-ai.github.io/langgraphjs/concepts/persistence/), [human-in-the-loop](https://langchain-ai.github.io/langgraphjs/concepts/human_in_the_loop/), [memory](https://langchain-ai.github.io/langgraphjs/concepts/memory/), and other features. +It's particularly well suited for building [agents](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/) or [multi-agent](https://langchain-ai.github.io/langgraphjs/concepts/multi_agent/) applications. +Importantly, individual LangChain components can be used within LangGraph nodes, but you can also use LangGraph **without** using LangChain components. + +:::info[Further reading] + +Have a look at our free course, [Introduction to LangGraph](https://academy.langchain.com/courses/intro-to-langgraph), to learn more about how to use LangGraph to build complex applications. + +::: + +## Observability and evaluation + +The pace of AI application development is often rate-limited by high-quality evaluations because there is a paradox of choice. +Developers often wonder how to engineer their prompt or which LLM best balances accuracy, latency, and cost. +High quality tracing and evaluations can help you rapidly answer these types of questions with confidence. +[LangSmith](https://docs.smith.langchain.com/) is our platform that supports observability and evaluation for AI applications. +See our conceptual guides on [evaluations](https://docs.smith.langchain.com/concepts/evaluation) and [tracing](https://docs.smith.langchain.com/concepts/tracing) for more details. + +:::info[Further reading] + +See our video playlist on [LangSmith tracing and evaluations](https://youtube.com/playlist?list=PLfaIDFEXuae0um8Fj0V4dHG37fGFU8Q5S&feature=shared) for more details. + +::: + +## Conclusion + +LangChain offers standard interfaces for components that are central to many AI applications, which offers a few specific advantages: + +- **Ease of swapping providers:** It allows you to swap out different component providers without having to change the underlying code. +- **Advanced features:** It provides common methods for more advanced features, such as [streaming](/docs/concepts/runnables/#streaming) and [tool calling](/docs/concepts/tool_calling/). + +[LangGraph](https://langchain-ai.github.io/langgraphjs/concepts/high_level/) makes it possible to orchestrate complex applications (e.g., [agents](/docs/concepts/agents/)) and provide features like including [persistence](https://langchain-ai.github.io/langgraphjs/concepts/persistence/), [human-in-the-loop](https://langchain-ai.github.io/langgraphjs/concepts/human_in_the_loop/), or [memory](https://langchain-ai.github.io/langgraphjs/concepts/memory/). + +[LangSmith](https://docs.smith.langchain.com/) makes it possible to iterate with confidence on your applications, by providing LLM-specific observability and framework for testing and evaluating your application. diff --git a/docs/core_docs/docs/how_to/agent_executor.ipynb b/docs/core_docs/docs/how_to/agent_executor.ipynb index 1bf617ebec21..d9d566fb81db 100644 --- a/docs/core_docs/docs/how_to/agent_executor.ipynb +++ b/docs/core_docs/docs/how_to/agent_executor.ipynb @@ -1,959 +1,959 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "f4c03f40-1328-412d-8a48-1db0cd481b77", - "metadata": {}, - "source": [ - "# How to use legacy LangChain Agents (AgentExecutor)\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Tools](/docs/concepts#tools)\n", - "\n", - ":::\n", - "\n", - "By themselves, language models can't take actions - they just output text.\n", - "Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be.\n", - "The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.\n", - "\n", - "In this tutorial we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n", - "\n", - ":::{.callout-important}\n", - "This section will cover building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph](/docs/concepts/#langgraphjs).\n", - ":::\n", - "\n", - "## Concepts\n", - "\n", - "Concepts we will cover are:\n", - "- Using [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n", - "- Creating a [Retriever](/docs/concepts/#retrievers) to expose specific information to our agent\n", - "- Using a Search [Tool](/docs/concepts/#tools) to look up things online\n", - "- [`Chat History`](/docs/concepts/#chat-history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to followup questions. \n", - "- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n", - "\n", - "## Setup\n", - "\n", - "### Jupyter Notebook\n", - "\n", - "This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n", - "\n", - "This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n", - "\n", - "### Installation\n", - "\n", - "To install LangChain (and `cheerio` for the web loader) run:\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from '@theme/Npm2Yarn';\n", - "\n", - "\n", - " langchain @langchain/core cheerio\n", - "\n", - "```\n", - "\n", - "For more details, see our [Installation guide](/docs/how_to/installation/).\n", - "\n", - "### LangSmith\n", - "\n", - "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n", - "As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n", - "The best way to do this is with [LangSmith](https://smith.langchain.com).\n", - "\n", - "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", - "\n", - "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", - "\n", - "# Reduce tracing latency if you are not in a serverless environment\n", - "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", - "```\n" - ] - }, - { - "cell_type": "markdown", - "id": "c335d1bf", - "metadata": {}, - "source": [ - "## Define tools\n", - "\n", - "We first need to create the tools we want to use. We will use two tools: [Tavily](/docs/integrations/tools/tavily_search) (to search online) and then a retriever over a local index we will create\n", - "\n", - "### [Tavily](/docs/integrations/tools/tavily_search)\n", - "\n", - "We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n", - "Note that this requires an API key - they have a free tier, but if you don't have one or don't want to create one, you can always ignore this step.\n", - "\n", - "Once you create your API key, you will need to export that as:\n", - "\n", - "```bash\n", - "export TAVILY_API_KEY=\"...\"\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9cc86c0b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[32m`[{\"title\":\"Weather in San Francisco\",\"url\":\"https://www.weatherapi.com/\",\"content\":\"{'location': {'n`\u001b[39m... 1358 more characters" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import \"cheerio\"; // This is required in notebooks to use the `CheerioWebBaseLoader`\n", - "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\"\n", - "\n", - "const search = new TavilySearchResults({\n", - " maxResults: 2\n", - "});\n", - "\n", - "await search.invoke(\"what is the weather in SF\")" - ] - }, - { - "cell_type": "markdown", - "id": "e8097977", - "metadata": {}, - "source": [ - "### Retriever\n", - "\n", - "We will also create a retriever over some data of our own. For a deeper explanation of each step here, see [this tutorial](/docs/tutorials/rag)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "9c9ce713", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Document {\n", - " pageContent: \u001b[32m'description=\"A sample dataset in LangSmith.\")client.create_examples( inputs=[ {\"postfix\": '\u001b[39m... 891 more characters,\n", - " metadata: {\n", - " source: \u001b[32m\"https://docs.smith.langchain.com/overview\"\u001b[39m,\n", - " loc: { lines: { from: \u001b[33m4\u001b[39m, to: \u001b[33m4\u001b[39m } }\n", - " },\n", - " id: \u001b[90mundefined\u001b[39m\n", - "}" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", - "\n", - "const loader = new CheerioWebBaseLoader(\"https://docs.smith.langchain.com/overview\");\n", - "const docs = await loader.load();\n", - "const splitter = new RecursiveCharacterTextSplitter(\n", - " {\n", - " chunkSize: 1000,\n", - " chunkOverlap: 200\n", - " }\n", - ");\n", - "const documents = await splitter.splitDocuments(docs);\n", - "const vectorStore = await MemoryVectorStore.fromDocuments(documents, new OpenAIEmbeddings());\n", - "const retriever = vectorStore.asRetriever();\n", - "\n", - "(await retriever.invoke(\"how to upload a dataset\"))[0];" - ] - }, - { - "cell_type": "markdown", - "id": "04aeca39", - "metadata": {}, - "source": [ - "Now that we have populated our index that we will do doing retrieval over, we can easily turn it into a tool (the format needed for an agent to properly use it)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "7280b031", - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const retrieverTool = tool(async ({ input }, config) => {\n", - " const docs = await retriever.invoke(input, config);\n", - " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", - "}, {\n", - " name: \"langsmith_search\",\n", - " description:\n", - " \"Search for information about LangSmith. For any questions about LangSmith, you must use this tool!\",\n", - " schema: z.object({\n", - " input: z.string()\n", - " }),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "c3b47c1d", - "metadata": {}, - "source": [ - "### Tools\n", - "\n", - "Now that we have created both, we can create a list of tools that we will use downstream." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "b8e8e710", - "metadata": {}, - "outputs": [], - "source": [ - "const tools = [search, retrieverTool];" - ] - }, - { - "cell_type": "markdown", - "id": "e00068b0", - "metadata": {}, - "source": [ - "## Using Language Models\n", - "\n", - "Next, let's learn how to use a language model by to call tools. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "642ed8bf", - "metadata": {}, - "source": [ - "You can call the language model by passing in a list of messages. By default, the response is a `content` string." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "def033a4", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const model = new ChatOpenAI({ model: \"gpt-4o-mini\", temperature: 0 })" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "c96c960b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[32m\"Hello! How can I assist you today?\"\u001b[39m" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const response = await model.invoke([{\n", - " role: \"user\",\n", - " content: \"hi!\"\n", - "}]);\n", - "\n", - "response.content;" - ] - }, - { - "cell_type": "markdown", - "id": "47bf8210", - "metadata": {}, - "source": [ - "We can now see what it is like to enable this model to do tool calling. In order to enable that we use `.bind` to give the language model knowledge of these tools" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "ba692a74", - "metadata": {}, - "outputs": [], - "source": [ - "const modelWithTools = model.bindTools(tools);" - ] - }, - { - "cell_type": "markdown", - "id": "fd920b69", - "metadata": {}, - "source": [ - "We can now call the model. Let's first call it with a normal message, and see how it responds. We can look at both the `content` field as well as the `tool_calls` field." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "b6a7e925", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Content: Hello! How can I assist you today?\n", - "Tool calls: \n" - ] - } - ], - "source": [ - "const responseWithTools = await modelWithTools.invoke([{\n", - " role: \"user\",\n", - " content: \"Hi!\"\n", - "}])\n", - "\n", - "console.log(`Content: ${responseWithTools.content}`)\n", - "console.log(`Tool calls: ${responseWithTools.tool_calls}`)" - ] - }, - { - "cell_type": "markdown", - "id": "e8c81e76", - "metadata": {}, - "source": [ - "Now, let's try calling it with some input that would expect a tool to be called." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "688b465d", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Content: \n", - "Tool calls: [\n", - " {\n", - " \"name\": \"tavily_search_results_json\",\n", - " \"args\": {\n", - " \"input\": \"current weather in San Francisco\"\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_gtJ5rrjXswO8EIvePrxyGQbR\"\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const responseWithToolCalls = await modelWithTools.invoke([{\n", - " role: \"user\",\n", - " content: \"What's the weather in SF?\"\n", - "}])\n", - "\n", - "console.log(`Content: ${responseWithToolCalls.content}`)\n", - "console.log(`Tool calls: ${JSON.stringify(responseWithToolCalls.tool_calls, null, 2)}`)" - ] - }, - { - "cell_type": "markdown", - "id": "83c4bcd3", - "metadata": {}, - "source": [ - "We can see that there's now no content, but there is a tool call! It wants us to call the Tavily Search tool.\n", - "\n", - "This isn't calling that tool yet - it's just telling us to. In order to actually calll it, we'll want to create our agent." - ] - }, - { - "cell_type": "markdown", - "id": "40ccec80", - "metadata": {}, - "source": [ - "## Create the agent\n", - "\n", - "Now that we have defined the tools and the LLM, we can create the agent. We will be using a tool calling agent - for more information on this type of agent, as well as other options, see [this guide](/docs/concepts/#agent_types/).\n", - "\n", - "We can first choose the prompt we want to use to guide the agent:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "af83d3e3", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " SystemMessagePromptTemplate {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " prompt: PromptTemplate {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " inputVariables: [],\n", - " templateFormat: \"f-string\",\n", - " template: \"You are a helpful assistant\"\n", - " },\n", - " lc_runnable: true,\n", - " name: undefined,\n", - " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n", - " inputVariables: [],\n", - " outputParser: undefined,\n", - " partialVariables: undefined,\n", - " templateFormat: \"f-string\",\n", - " template: \"You are a helpful assistant\",\n", - " validateTemplate: true,\n", - " additionalContentFields: undefined\n", - " }\n", - " },\n", - " lc_runnable: true,\n", - " name: undefined,\n", - " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n", - " inputVariables: [],\n", - " additionalOptions: {},\n", - " prompt: PromptTemplate {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " inputVariables: [],\n", - " templateFormat: \"f-string\",\n", - " template: \"You are a helpful assistant\"\n", - " },\n", - " lc_runnable: true,\n", - " name: undefined,\n", - " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n", - " inputVariables: [],\n", - " outputParser: undefined,\n", - " partialVariables: undefined,\n", - " templateFormat: \"f-string\",\n", - " template: \"You are a helpful assistant\",\n", - " validateTemplate: true,\n", - " additionalContentFields: undefined\n", - " },\n", - " messageClass: undefined,\n", - " chatMessageClass: undefined\n", - " },\n", - " MessagesPlaceholder {\n", - " lc_serializable: true,\n", - " lc_kwargs: { variableName: \"chat_history\", optional: true },\n", - " lc_runnable: true,\n", - " name: undefined,\n", - " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n", - " variableName: \"chat_history\",\n", - " optional: true\n", - " },\n", - " HumanMessagePromptTemplate {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " prompt: PromptTemplate {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " inputVariables: [Array],\n", - " templateFormat: \"f-string\",\n", - " template: \"{input}\"\n", - " },\n", - " lc_runnable: true,\n", - " name: undefined,\n", - " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n", - " inputVariables: [ \"input\" ],\n", - " outputParser: undefined,\n", - " partialVariables: undefined,\n", - " templateFormat: \"f-string\",\n", - " template: \"{input}\",\n", - " validateTemplate: true,\n", - " additionalContentFields: undefined\n", - " }\n", - " },\n", - " lc_runnable: true,\n", - " name: undefined,\n", - " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n", - " inputVariables: [ \"input\" ],\n", - " additionalOptions: {},\n", - " prompt: PromptTemplate {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " inputVariables: [ \"input\" ],\n", - " templateFormat: \"f-string\",\n", - " template: \"{input}\"\n", - " },\n", - " lc_runnable: true,\n", - " name: undefined,\n", - " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n", - " inputVariables: [ \"input\" ],\n", - " outputParser: undefined,\n", - " partialVariables: undefined,\n", - " templateFormat: \"f-string\",\n", - " template: \"{input}\",\n", - " validateTemplate: true,\n", - " additionalContentFields: undefined\n", - " },\n", - " messageClass: undefined,\n", - " chatMessageClass: undefined\n", - " },\n", - " MessagesPlaceholder {\n", - " lc_serializable: true,\n", - " lc_kwargs: { variableName: \"agent_scratchpad\", optional: true },\n", - " lc_runnable: true,\n", - " name: undefined,\n", - " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n", - " variableName: \"agent_scratchpad\",\n", - " optional: true\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant\"],\n", - " [\"placeholder\", \"{chat_history}\"],\n", - " [\"human\", \"{input}\"],\n", - " [\"placeholder\", \"{agent_scratchpad}\"],\n", - "]);\n", - "\n", - "console.log(prompt.promptMessages);" - ] - }, - { - "cell_type": "markdown", - "id": "f8014c9d", - "metadata": {}, - "source": [ - "Now, we can initalize the agent with the LLM, the prompt, and the tools. The agent is responsible for taking in input and deciding what actions to take. Crucially, the Agent does not execute those actions - that is done by the AgentExecutor (next step). For more information about how to think about these components, see our [conceptual guide](/docs/concepts/#agents).\n", - "\n", - "Note that we are passing in the `model`, not `modelWithTools`. That is because `createToolCallingAgent` will call `.bind` for us under the hood." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "89cf72b4-6046-4b47-8f27-5522d8cb8036", - "metadata": {}, - "outputs": [], - "source": [ - "import { createToolCallingAgent } from \"langchain/agents\";\n", - "\n", - "const agent = await createToolCallingAgent({ llm: model, tools, prompt })" - ] - }, - { - "cell_type": "markdown", - "id": "1a58c9f8", - "metadata": {}, - "source": [ - "Finally, we combine the agent (the brains) with the tools inside the AgentExecutor (which will repeatedly call the agent and execute tools)." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "ce33904a", - "metadata": {}, - "outputs": [], - "source": [ - "import { AgentExecutor } from \"langchain/agents\";\n", - "\n", - "const agentExecutor = new AgentExecutor({\n", - " agent,\n", - " tools\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "e4df0e06", - "metadata": {}, - "source": [ - "## Run the agent\n", - "\n", - "We can now run the agent on a few queries! Note that for now, these are all **stateless** queries (it won't remember previous interactions).\n", - "\n", - "First up, let's how it responds when there's no need to call a tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "114ba50d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{ input: \u001b[32m\"hi!\"\u001b[39m, output: \u001b[32m\"Hello! How can I assist you today?\"\u001b[39m }" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await agentExecutor.invoke({ input: \"hi!\" })" - ] - }, - { - "cell_type": "markdown", - "id": "71493a42", - "metadata": {}, - "source": [ - "In order to see exactly what is happening under the hood (and to make sure it's not calling a tool) we can take a look at the [LangSmith trace](https://smith.langchain.com/public/b8051e80-14fd-4931-be0f-6416280bc500/r)\n", - "\n", - "Let's now try it out on an example where it should be invoking the retriever" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "3fa4780a", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/plain": [ - "{\n", - " input: \u001b[32m\"how can langsmith help with testing?\"\u001b[39m,\n", - " output: \u001b[32m\"LangSmith can assist with testing in several ways, particularly for applications built using large l\"\u001b[39m... 1474 more characters\n", - "}" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await agentExecutor.invoke({ input: \"how can langsmith help with testing?\" })" - ] - }, - { - "cell_type": "markdown", - "id": "f2d94242", - "metadata": {}, - "source": [ - "Let's take a look at the [LangSmith trace](https://smith.langchain.com/public/35bd4f0f-aa2f-4ac2-b9a9-89ce0ca306ca/r) to make sure it's actually calling that.\n", - "\n", - "Now let's try one where it needs to call the search tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "77c2f769", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{\n", - " input: \u001b[32m\"whats the weather in sf?\"\u001b[39m,\n", - " output: \u001b[32m\"The current weather in San Francisco is as follows:\\n\"\u001b[39m +\n", - " \u001b[32m\"\\n\"\u001b[39m +\n", - " \u001b[32m\"- **Temperature**: 15.6°C (60.1°F)\\n\"\u001b[39m +\n", - " \u001b[32m\"- **Conditio\"\u001b[39m... 303 more characters\n", - "}" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await agentExecutor.invoke({ input: \"whats the weather in sf?\" })" - ] - }, - { - "cell_type": "markdown", - "id": "c174f838", - "metadata": {}, - "source": [ - "We can check out the [LangSmith trace](https://smith.langchain.com/public/dfde6f46-0e7b-4dfe-813c-87d7bfb2ade5/r) to make sure it's calling the search tool effectively." - ] - }, - { - "cell_type": "markdown", - "id": "022cbc8a", - "metadata": {}, - "source": [ - "## Adding in memory\n", - "\n", - "As mentioned earlier, this agent is stateless. This means it does not remember previous interactions. To give it memory we need to pass in previous `chat_history`.\n", - "\n", - "**Note**: The input variable needs to be called `chat_history` because of the prompt we are using. If we use a different prompt, we could change the variable name." - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "c4073e35", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{\n", - " input: \u001b[32m\"hi! my name is bob\"\u001b[39m,\n", - " chat_history: [],\n", - " output: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n", - "}" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "// Here we pass in an empty list of messages for chat_history because it is the first message in the chat\n", - "await agentExecutor.invoke({ input: \"hi! my name is bob\", chat_history: [] })" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "550e0c6e", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{\n", - " chat_history: [\n", - " { role: \u001b[32m\"user\"\u001b[39m, content: \u001b[32m\"hi! my name is bob\"\u001b[39m },\n", - " {\n", - " role: \u001b[32m\"assistant\"\u001b[39m,\n", - " content: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n", - " }\n", - " ],\n", - " input: \u001b[32m\"what's my name?\"\u001b[39m,\n", - " output: \u001b[32m\"Your name is Bob. How can I help you today, Bob?\"\u001b[39m\n", - "}" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await agentExecutor.invoke(\n", - " {\n", - " chat_history: [\n", - " { role: \"user\", content: \"hi! my name is bob\" },\n", - " { role: \"assistant\", content: \"Hello Bob! How can I assist you today?\" },\n", - " ],\n", - " input: \"what's my name?\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "07b3bcf2", - "metadata": {}, - "source": [ - "If we want to keep track of these messages automatically, we can wrap this in a RunnableWithMessageHistory.\n", - "\n", - "Because we have multiple inputs, we need to specify two things:\n", - "\n", - "- `inputMessagesKey`: The input key to use to add to the conversation history.\n", - "- `historyMessagesKey`: The key to add the loaded messages into.\n", - "\n", - "For more information on how to use this, see [this guide](/docs/how_to/message_history). " - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "8edd96e6", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{\n", - " input: \u001b[32m\"hi! I'm bob\"\u001b[39m,\n", - " chat_history: [\n", - " HumanMessage {\n", - " \"content\": \"hi! I'm bob\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"content\": \"Hello Bob! How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " }\n", - " ],\n", - " output: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n", - "}" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" + "cells": [ + { + "cell_type": "markdown", + "id": "f4c03f40-1328-412d-8a48-1db0cd481b77", + "metadata": {}, + "source": [ + "# How to use legacy LangChain Agents (AgentExecutor)\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Tools](/docs/concepts/tools)\n", + "\n", + ":::\n", + "\n", + "By themselves, language models can't take actions - they just output text.\n", + "Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be.\n", + "The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.\n", + "\n", + "In this tutorial we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n", + "\n", + ":::{.callout-important}\n", + "This section will cover building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph](https://langchain-ai.github.io/langgraphjs).\n", + ":::\n", + "\n", + "## Concepts\n", + "\n", + "Concepts we will cover are:\n", + "- Using [language models](/docs/concepts/chat_models), in particular their tool calling ability\n", + "- Creating a [Retriever](/docs/concepts/retrievers) to expose specific information to our agent\n", + "- Using a Search [Tool](/docs/concepts/tools) to look up things online\n", + "- [`Chat History`](/docs/concepts/chat_history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to followup questions. \n", + "- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n", + "\n", + "## Setup\n", + "\n", + "### Jupyter Notebook\n", + "\n", + "This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n", + "\n", + "This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n", + "\n", + "### Installation\n", + "\n", + "To install LangChain (and `cheerio` for the web loader) run:\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from '@theme/Npm2Yarn';\n", + "\n", + "\n", + " langchain @langchain/core cheerio\n", + "\n", + "```\n", + "\n", + "For more details, see our [Installation guide](/docs/how_to/installation/).\n", + "\n", + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n", + "As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n", + "The best way to do this is with [LangSmith](https://smith.langchain.com).\n", + "\n", + "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "```shell\n", + "export LANGCHAIN_TRACING_V2=\"true\"\n", + "export LANGCHAIN_API_KEY=\"...\"\n", + "\n", + "# Reduce tracing latency if you are not in a serverless environment\n", + "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "id": "c335d1bf", + "metadata": {}, + "source": [ + "## Define tools\n", + "\n", + "We first need to create the tools we want to use. We will use two tools: [Tavily](/docs/integrations/tools/tavily_search) (to search online) and then a retriever over a local index we will create\n", + "\n", + "### [Tavily](/docs/integrations/tools/tavily_search)\n", + "\n", + "We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n", + "Note that this requires an API key - they have a free tier, but if you don't have one or don't want to create one, you can always ignore this step.\n", + "\n", + "Once you create your API key, you will need to export that as:\n", + "\n", + "```bash\n", + "export TAVILY_API_KEY=\"...\"\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9cc86c0b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m`[{\"title\":\"Weather in San Francisco\",\"url\":\"https://www.weatherapi.com/\",\"content\":\"{'location': {'n`\u001b[39m... 1358 more characters" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import \"cheerio\"; // This is required in notebooks to use the `CheerioWebBaseLoader`\n", + "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\"\n", + "\n", + "const search = new TavilySearchResults({\n", + " maxResults: 2\n", + "});\n", + "\n", + "await search.invoke(\"what is the weather in SF\")" + ] + }, + { + "cell_type": "markdown", + "id": "e8097977", + "metadata": {}, + "source": [ + "### Retriever\n", + "\n", + "We will also create a retriever over some data of our own. For a deeper explanation of each step here, see [this tutorial](/docs/tutorials/rag)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "9c9ce713", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Document {\n", + " pageContent: \u001b[32m'description=\"A sample dataset in LangSmith.\")client.create_examples( inputs=[ {\"postfix\": '\u001b[39m... 891 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://docs.smith.langchain.com/overview\"\u001b[39m,\n", + " loc: { lines: { from: \u001b[33m4\u001b[39m, to: \u001b[33m4\u001b[39m } }\n", + " },\n", + " id: \u001b[90mundefined\u001b[39m\n", + "}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", + "\n", + "const loader = new CheerioWebBaseLoader(\"https://docs.smith.langchain.com/overview\");\n", + "const docs = await loader.load();\n", + "const splitter = new RecursiveCharacterTextSplitter(\n", + " {\n", + " chunkSize: 1000,\n", + " chunkOverlap: 200\n", + " }\n", + ");\n", + "const documents = await splitter.splitDocuments(docs);\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(documents, new OpenAIEmbeddings());\n", + "const retriever = vectorStore.asRetriever();\n", + "\n", + "(await retriever.invoke(\"how to upload a dataset\"))[0];" + ] + }, + { + "cell_type": "markdown", + "id": "04aeca39", + "metadata": {}, + "source": [ + "Now that we have populated our index that we will do doing retrieval over, we can easily turn it into a tool (the format needed for an agent to properly use it)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "7280b031", + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const retrieverTool = tool(async ({ input }, config) => {\n", + " const docs = await retriever.invoke(input, config);\n", + " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", + "}, {\n", + " name: \"langsmith_search\",\n", + " description:\n", + " \"Search for information about LangSmith. For any questions about LangSmith, you must use this tool!\",\n", + " schema: z.object({\n", + " input: z.string()\n", + " }),\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "c3b47c1d", + "metadata": {}, + "source": [ + "### Tools\n", + "\n", + "Now that we have created both, we can create a list of tools that we will use downstream." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "b8e8e710", + "metadata": {}, + "outputs": [], + "source": [ + "const tools = [search, retrieverTool];" + ] + }, + { + "cell_type": "markdown", + "id": "e00068b0", + "metadata": {}, + "source": [ + "## Using Language Models\n", + "\n", + "Next, let's learn how to use a language model by to call tools. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "642ed8bf", + "metadata": {}, + "source": [ + "You can call the language model by passing in a list of messages. By default, the response is a `content` string." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "def033a4", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const model = new ChatOpenAI({ model: \"gpt-4o-mini\", temperature: 0 })" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "c96c960b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Hello! How can I assist you today?\"\u001b[39m" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const response = await model.invoke([{\n", + " role: \"user\",\n", + " content: \"hi!\"\n", + "}]);\n", + "\n", + "response.content;" + ] + }, + { + "cell_type": "markdown", + "id": "47bf8210", + "metadata": {}, + "source": [ + "We can now see what it is like to enable this model to do tool calling. In order to enable that we use `.bind` to give the language model knowledge of these tools" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "ba692a74", + "metadata": {}, + "outputs": [], + "source": [ + "const modelWithTools = model.bindTools(tools);" + ] + }, + { + "cell_type": "markdown", + "id": "fd920b69", + "metadata": {}, + "source": [ + "We can now call the model. Let's first call it with a normal message, and see how it responds. We can look at both the `content` field as well as the `tool_calls` field." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "b6a7e925", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Content: Hello! How can I assist you today?\n", + "Tool calls: \n" + ] + } + ], + "source": [ + "const responseWithTools = await modelWithTools.invoke([{\n", + " role: \"user\",\n", + " content: \"Hi!\"\n", + "}])\n", + "\n", + "console.log(`Content: ${responseWithTools.content}`)\n", + "console.log(`Tool calls: ${responseWithTools.tool_calls}`)" + ] + }, + { + "cell_type": "markdown", + "id": "e8c81e76", + "metadata": {}, + "source": [ + "Now, let's try calling it with some input that would expect a tool to be called." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "688b465d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Content: \n", + "Tool calls: [\n", + " {\n", + " \"name\": \"tavily_search_results_json\",\n", + " \"args\": {\n", + " \"input\": \"current weather in San Francisco\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_gtJ5rrjXswO8EIvePrxyGQbR\"\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const responseWithToolCalls = await modelWithTools.invoke([{\n", + " role: \"user\",\n", + " content: \"What's the weather in SF?\"\n", + "}])\n", + "\n", + "console.log(`Content: ${responseWithToolCalls.content}`)\n", + "console.log(`Tool calls: ${JSON.stringify(responseWithToolCalls.tool_calls, null, 2)}`)" + ] + }, + { + "cell_type": "markdown", + "id": "83c4bcd3", + "metadata": {}, + "source": [ + "We can see that there's now no content, but there is a tool call! It wants us to call the Tavily Search tool.\n", + "\n", + "This isn't calling that tool yet - it's just telling us to. In order to actually calll it, we'll want to create our agent." + ] + }, + { + "cell_type": "markdown", + "id": "40ccec80", + "metadata": {}, + "source": [ + "## Create the agent\n", + "\n", + "Now that we have defined the tools and the LLM, we can create the agent. We will be using a tool calling agent - for more information on this type of agent, as well as other options, see [this guide](/docs/concepts/agents/).\n", + "\n", + "We can first choose the prompt we want to use to guide the agent:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "af83d3e3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " SystemMessagePromptTemplate {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " prompt: PromptTemplate {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " inputVariables: [],\n", + " templateFormat: \"f-string\",\n", + " template: \"You are a helpful assistant\"\n", + " },\n", + " lc_runnable: true,\n", + " name: undefined,\n", + " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n", + " inputVariables: [],\n", + " outputParser: undefined,\n", + " partialVariables: undefined,\n", + " templateFormat: \"f-string\",\n", + " template: \"You are a helpful assistant\",\n", + " validateTemplate: true,\n", + " additionalContentFields: undefined\n", + " }\n", + " },\n", + " lc_runnable: true,\n", + " name: undefined,\n", + " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n", + " inputVariables: [],\n", + " additionalOptions: {},\n", + " prompt: PromptTemplate {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " inputVariables: [],\n", + " templateFormat: \"f-string\",\n", + " template: \"You are a helpful assistant\"\n", + " },\n", + " lc_runnable: true,\n", + " name: undefined,\n", + " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n", + " inputVariables: [],\n", + " outputParser: undefined,\n", + " partialVariables: undefined,\n", + " templateFormat: \"f-string\",\n", + " template: \"You are a helpful assistant\",\n", + " validateTemplate: true,\n", + " additionalContentFields: undefined\n", + " },\n", + " messageClass: undefined,\n", + " chatMessageClass: undefined\n", + " },\n", + " MessagesPlaceholder {\n", + " lc_serializable: true,\n", + " lc_kwargs: { variableName: \"chat_history\", optional: true },\n", + " lc_runnable: true,\n", + " name: undefined,\n", + " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n", + " variableName: \"chat_history\",\n", + " optional: true\n", + " },\n", + " HumanMessagePromptTemplate {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " prompt: PromptTemplate {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " inputVariables: [Array],\n", + " templateFormat: \"f-string\",\n", + " template: \"{input}\"\n", + " },\n", + " lc_runnable: true,\n", + " name: undefined,\n", + " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n", + " inputVariables: [ \"input\" ],\n", + " outputParser: undefined,\n", + " partialVariables: undefined,\n", + " templateFormat: \"f-string\",\n", + " template: \"{input}\",\n", + " validateTemplate: true,\n", + " additionalContentFields: undefined\n", + " }\n", + " },\n", + " lc_runnable: true,\n", + " name: undefined,\n", + " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n", + " inputVariables: [ \"input\" ],\n", + " additionalOptions: {},\n", + " prompt: PromptTemplate {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " inputVariables: [ \"input\" ],\n", + " templateFormat: \"f-string\",\n", + " template: \"{input}\"\n", + " },\n", + " lc_runnable: true,\n", + " name: undefined,\n", + " lc_namespace: [ \"langchain_core\", \"prompts\", \"prompt\" ],\n", + " inputVariables: [ \"input\" ],\n", + " outputParser: undefined,\n", + " partialVariables: undefined,\n", + " templateFormat: \"f-string\",\n", + " template: \"{input}\",\n", + " validateTemplate: true,\n", + " additionalContentFields: undefined\n", + " },\n", + " messageClass: undefined,\n", + " chatMessageClass: undefined\n", + " },\n", + " MessagesPlaceholder {\n", + " lc_serializable: true,\n", + " lc_kwargs: { variableName: \"agent_scratchpad\", optional: true },\n", + " lc_runnable: true,\n", + " name: undefined,\n", + " lc_namespace: [ \"langchain_core\", \"prompts\", \"chat\" ],\n", + " variableName: \"agent_scratchpad\",\n", + " optional: true\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"placeholder\", \"{chat_history}\"],\n", + " [\"human\", \"{input}\"],\n", + " [\"placeholder\", \"{agent_scratchpad}\"],\n", + "]);\n", + "\n", + "console.log(prompt.promptMessages);" + ] + }, + { + "cell_type": "markdown", + "id": "f8014c9d", + "metadata": {}, + "source": [ + "Now, we can initalize the agent with the LLM, the prompt, and the tools. The agent is responsible for taking in input and deciding what actions to take. Crucially, the Agent does not execute those actions - that is done by the AgentExecutor (next step). For more information about how to think about these components, see our [conceptual guide](/docs/concepts/agents).\n", + "\n", + "Note that we are passing in the `model`, not `modelWithTools`. That is because `createToolCallingAgent` will call `.bind` for us under the hood." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "89cf72b4-6046-4b47-8f27-5522d8cb8036", + "metadata": {}, + "outputs": [], + "source": [ + "import { createToolCallingAgent } from \"langchain/agents\";\n", + "\n", + "const agent = await createToolCallingAgent({ llm: model, tools, prompt })" + ] + }, + { + "cell_type": "markdown", + "id": "1a58c9f8", + "metadata": {}, + "source": [ + "Finally, we combine the agent (the brains) with the tools inside the AgentExecutor (which will repeatedly call the agent and execute tools)." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "ce33904a", + "metadata": {}, + "outputs": [], + "source": [ + "import { AgentExecutor } from \"langchain/agents\";\n", + "\n", + "const agentExecutor = new AgentExecutor({\n", + " agent,\n", + " tools\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "e4df0e06", + "metadata": {}, + "source": [ + "## Run the agent\n", + "\n", + "We can now run the agent on a few queries! Note that for now, these are all **stateless** queries (it won't remember previous interactions).\n", + "\n", + "First up, let's how it responds when there's no need to call a tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "114ba50d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{ input: \u001b[32m\"hi!\"\u001b[39m, output: \u001b[32m\"Hello! How can I assist you today?\"\u001b[39m }" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await agentExecutor.invoke({ input: \"hi!\" })" + ] + }, + { + "cell_type": "markdown", + "id": "71493a42", + "metadata": {}, + "source": [ + "In order to see exactly what is happening under the hood (and to make sure it's not calling a tool) we can take a look at the [LangSmith trace](https://smith.langchain.com/public/b8051e80-14fd-4931-be0f-6416280bc500/r)\n", + "\n", + "Let's now try it out on an example where it should be invoking the retriever" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "3fa4780a", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " input: \u001b[32m\"how can langsmith help with testing?\"\u001b[39m,\n", + " output: \u001b[32m\"LangSmith can assist with testing in several ways, particularly for applications built using large l\"\u001b[39m... 1474 more characters\n", + "}" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await agentExecutor.invoke({ input: \"how can langsmith help with testing?\" })" + ] + }, + { + "cell_type": "markdown", + "id": "f2d94242", + "metadata": {}, + "source": [ + "Let's take a look at the [LangSmith trace](https://smith.langchain.com/public/35bd4f0f-aa2f-4ac2-b9a9-89ce0ca306ca/r) to make sure it's actually calling that.\n", + "\n", + "Now let's try one where it needs to call the search tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "77c2f769", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " input: \u001b[32m\"whats the weather in sf?\"\u001b[39m,\n", + " output: \u001b[32m\"The current weather in San Francisco is as follows:\\n\"\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"- **Temperature**: 15.6°C (60.1°F)\\n\"\u001b[39m +\n", + " \u001b[32m\"- **Conditio\"\u001b[39m... 303 more characters\n", + "}" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await agentExecutor.invoke({ input: \"whats the weather in sf?\" })" + ] + }, + { + "cell_type": "markdown", + "id": "c174f838", + "metadata": {}, + "source": [ + "We can check out the [LangSmith trace](https://smith.langchain.com/public/dfde6f46-0e7b-4dfe-813c-87d7bfb2ade5/r) to make sure it's calling the search tool effectively." + ] + }, + { + "cell_type": "markdown", + "id": "022cbc8a", + "metadata": {}, + "source": [ + "## Adding in memory\n", + "\n", + "As mentioned earlier, this agent is stateless. This means it does not remember previous interactions. To give it memory we need to pass in previous `chat_history`.\n", + "\n", + "**Note**: The input variable needs to be called `chat_history` because of the prompt we are using. If we use a different prompt, we could change the variable name." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "c4073e35", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " input: \u001b[32m\"hi! my name is bob\"\u001b[39m,\n", + " chat_history: [],\n", + " output: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "// Here we pass in an empty list of messages for chat_history because it is the first message in the chat\n", + "await agentExecutor.invoke({ input: \"hi! my name is bob\", chat_history: [] })" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "550e0c6e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " chat_history: [\n", + " { role: \u001b[32m\"user\"\u001b[39m, content: \u001b[32m\"hi! my name is bob\"\u001b[39m },\n", + " {\n", + " role: \u001b[32m\"assistant\"\u001b[39m,\n", + " content: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n", + " }\n", + " ],\n", + " input: \u001b[32m\"what's my name?\"\u001b[39m,\n", + " output: \u001b[32m\"Your name is Bob. How can I help you today, Bob?\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await agentExecutor.invoke(\n", + " {\n", + " chat_history: [\n", + " { role: \"user\", content: \"hi! my name is bob\" },\n", + " { role: \"assistant\", content: \"Hello Bob! How can I assist you today?\" },\n", + " ],\n", + " input: \"what's my name?\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "07b3bcf2", + "metadata": {}, + "source": [ + "If we want to keep track of these messages automatically, we can wrap this in a RunnableWithMessageHistory.\n", + "\n", + "Because we have multiple inputs, we need to specify two things:\n", + "\n", + "- `inputMessagesKey`: The input key to use to add to the conversation history.\n", + "- `historyMessagesKey`: The key to add the loaded messages into.\n", + "\n", + "For more information on how to use this, see [this guide](/docs/how_to/message_history). " + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "8edd96e6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " input: \u001b[32m\"hi! I'm bob\"\u001b[39m,\n", + " chat_history: [\n", + " HumanMessage {\n", + " \"content\": \"hi! I'm bob\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"Hello Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " }\n", + " ],\n", + " output: \u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ChatMessageHistory } from \"@langchain/community/stores/message/in_memory\";\n", + "import { BaseChatMessageHistory } from \"@langchain/core/chat_history\";\n", + "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "\n", + "const store = {};\n", + "\n", + "function getMessageHistory(sessionId: string): BaseChatMessageHistory {\n", + " if (!(sessionId in store)) {\n", + " store[sessionId] = new ChatMessageHistory();\n", + " }\n", + " return store[sessionId];\n", + "}\n", + "\n", + "const agentWithChatHistory = new RunnableWithMessageHistory({\n", + " runnable: agentExecutor,\n", + " getMessageHistory,\n", + " inputMessagesKey: \"input\",\n", + " historyMessagesKey: \"chat_history\",\n", + "})\n", + "\n", + "await agentWithChatHistory.invoke(\n", + " { input: \"hi! I'm bob\" },\n", + " { configurable: { sessionId: \"\" }},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "ae627966", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " input: \u001b[32m\"what's my name?\"\u001b[39m,\n", + " chat_history: [\n", + " HumanMessage {\n", + " \"content\": \"hi! I'm bob\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"Hello Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"what's my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"Your name is Bob! How can I help you today, Bob?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " }\n", + " ],\n", + " output: \u001b[32m\"Your name is Bob! How can I help you today, Bob?\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await agentWithChatHistory.invoke(\n", + " { input: \"what's my name?\" },\n", + " { configurable: { sessionId: \"\" }},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6de2798e", + "metadata": {}, + "source": [ + "Example LangSmith trace: https://smith.langchain.com/public/98c8d162-60ae-4493-aa9f-992d87bd0429/r" + ] + }, + { + "cell_type": "markdown", + "id": "c029798f", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "That's a wrap! In this quick start we covered how to create a simple agent. Agents are a complex topic, and there's lot to learn! \n", + "\n", + ":::{.callout-important}\n", + "This section covered building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph](https://langchain-ai.github.io/langgraphjs).\n", + "\n", + "You can also see [this guide to help migrate to LangGraph](/docs/how_to/migrate_agent).\n", + ":::" + ] } - ], - "source": [ - "import { ChatMessageHistory } from \"@langchain/community/stores/message/in_memory\";\n", - "import { BaseChatMessageHistory } from \"@langchain/core/chat_history\";\n", - "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", - "\n", - "const store = {};\n", - "\n", - "function getMessageHistory(sessionId: string): BaseChatMessageHistory {\n", - " if (!(sessionId in store)) {\n", - " store[sessionId] = new ChatMessageHistory();\n", - " }\n", - " return store[sessionId];\n", - "}\n", - "\n", - "const agentWithChatHistory = new RunnableWithMessageHistory({\n", - " runnable: agentExecutor,\n", - " getMessageHistory,\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - "})\n", - "\n", - "await agentWithChatHistory.invoke(\n", - " { input: \"hi! I'm bob\" },\n", - " { configurable: { sessionId: \"\" }},\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "ae627966", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{\n", - " input: \u001b[32m\"what's my name?\"\u001b[39m,\n", - " chat_history: [\n", - " HumanMessage {\n", - " \"content\": \"hi! I'm bob\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"content\": \"Hello Bob! How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " },\n", - " HumanMessage {\n", - " \"content\": \"what's my name?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"content\": \"Your name is Bob! How can I help you today, Bob?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " }\n", - " ],\n", - " output: \u001b[32m\"Your name is Bob! How can I help you today, Bob?\"\u001b[39m\n", - "}" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "await agentWithChatHistory.invoke(\n", - " { input: \"what's my name?\" },\n", - " { configurable: { sessionId: \"\" }},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "6de2798e", - "metadata": {}, - "source": [ - "Example LangSmith trace: https://smith.langchain.com/public/98c8d162-60ae-4493-aa9f-992d87bd0429/r" - ] - }, - { - "cell_type": "markdown", - "id": "c029798f", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "That's a wrap! In this quick start we covered how to create a simple agent. Agents are a complex topic, and there's lot to learn! \n", - "\n", - ":::{.callout-important}\n", - "This section covered building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph](/docs/concepts/#langgraphjs).\n", - "\n", - "You can also see [this guide to help migrate to LangGraph](/docs/how_to/migrate_agent).\n", - ":::" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/assign.ipynb b/docs/core_docs/docs/how_to/assign.ipynb index 6faee4736433..147f189c3bc4 100644 --- a/docs/core_docs/docs/how_to/assign.ipynb +++ b/docs/core_docs/docs/how_to/assign.ipynb @@ -1,194 +1,194 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": {}, - "source": [ - "---\n", - "keywords: [RunnablePassthrough, assign, LCEL]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to add values to a chain's state\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n", - "- [Chaining runnables](/docs/how_to/sequence/)\n", - "- [Calling runnables in parallel](/docs/how_to/parallel/)\n", - "- [Custom functions](/docs/how_to/functions/)\n", - "- [Passing data through](/docs/how_to/passthrough)\n", - "\n", - ":::\n", - "\n", - "An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html#assign-2) static method takes an input value and adds the extra arguments passed to the assign function.\n", - "\n", - "This is useful in the common [LangChain Expression Language](/docs/concepts/#langchain-expression-language) pattern of additively creating a dictionary to use as input to a later step.\n", - "\n", - "Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "{ extra: { num: \u001b[33m1\u001b[39m, mult: \u001b[33m3\u001b[39m, modified: \u001b[33m2\u001b[39m } }" + "cell_type": "raw", + "metadata": {}, + "source": [ + "---\n", + "keywords: [RunnablePassthrough, assign, LCEL]\n", + "---" ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { RunnableParallel, RunnablePassthrough } from \"@langchain/core/runnables\";\n", - "\n", - "const runnable = RunnableParallel.from({\n", - " extra: RunnablePassthrough.assign({\n", - " mult: (input: { num: number }) => input.num * 3,\n", - " modified: (input: { num: number }) => input.num + 1\n", - " })\n", - "});\n", - "\n", - "await runnable.invoke({ num: 1 });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's break down what's happening here.\n", - "\n", - "- The input to the chain is `{\"num\": 1}`. This is passed into a `RunnableParallel`, which invokes the runnables it is passed in parallel with that input.\n", - "- The value under the `extra` key is invoked. `RunnablePassthrough.assign()` keeps the original keys in the input dict (`{\"num\": 1}`), and assigns a new key called `mult`. The value is `lambda x: x[\"num\"] * 3)`, which is `3`. Thus, the result is `{\"num\": 1, \"mult\": 3}`.\n", - "- `{\"num\": 1, \"mult\": 3}` is returned to the `RunnableParallel` call, and is set as the value to the key `extra`.\n", - "- At the same time, the `modified` key is called. The result is `2`, since the lambda extracts a key called `\"num\"` from its input and adds one.\n", - "\n", - "Thus, the result is `{'extra': {'num': 1, 'mult': 3}, 'modified': 2}`.\n", - "\n", - "## Streaming\n", - "\n", - "One convenient feature of this method is that it allows values to pass through as soon as they are available. To show this off, we'll use `RunnablePassthrough.assign()` to immediately return source docs in a retrieval chain:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to add values to a chain's state\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n", + "- [Chaining runnables](/docs/how_to/sequence/)\n", + "- [Calling runnables in parallel](/docs/how_to/parallel/)\n", + "- [Custom functions](/docs/how_to/functions/)\n", + "- [Passing data through](/docs/how_to/passthrough)\n", + "\n", + ":::\n", + "\n", + "An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html#assign-2) static method takes an input value and adds the extra arguments passed to the assign function.\n", + "\n", + "This is useful in the common [LangChain Expression Language](/docs/concepts/lcel) pattern of additively creating a dictionary to use as input to a later step.\n", + "\n", + "Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{ extra: { num: \u001b[33m1\u001b[39m, mult: \u001b[33m3\u001b[39m, modified: \u001b[33m2\u001b[39m } }" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { RunnableParallel, RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "\n", + "const runnable = RunnableParallel.from({\n", + " extra: RunnablePassthrough.assign({\n", + " mult: (input: { num: number }) => input.num * 3,\n", + " modified: (input: { num: number }) => input.num + 1\n", + " })\n", + "});\n", + "\n", + "await runnable.invoke({ num: 1 });" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{ question: \"where did harrison work?\" }\n", - "{ context: \"harrison worked at kensho\" }\n", - "{ output: \"\" }\n", - "{ output: \"H\" }\n", - "{ output: \"arrison\" }\n", - "{ output: \" worked\" }\n", - "{ output: \" at\" }\n", - "{ output: \" Kens\" }\n", - "{ output: \"ho\" }\n", - "{ output: \".\" }\n", - "{ output: \"\" }\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's break down what's happening here.\n", + "\n", + "- The input to the chain is `{\"num\": 1}`. This is passed into a `RunnableParallel`, which invokes the runnables it is passed in parallel with that input.\n", + "- The value under the `extra` key is invoked. `RunnablePassthrough.assign()` keeps the original keys in the input dict (`{\"num\": 1}`), and assigns a new key called `mult`. The value is `lambda x: x[\"num\"] * 3)`, which is `3`. Thus, the result is `{\"num\": 1, \"mult\": 3}`.\n", + "- `{\"num\": 1, \"mult\": 3}` is returned to the `RunnableParallel` call, and is set as the value to the key `extra`.\n", + "- At the same time, the `modified` key is called. The result is `2`, since the lambda extracts a key called `\"num\"` from its input and adds one.\n", + "\n", + "Thus, the result is `{'extra': {'num': 1, 'mult': 3}, 'modified': 2}`.\n", + "\n", + "## Streaming\n", + "\n", + "One convenient feature of this method is that it allows values to pass through as soon as they are available. To show this off, we'll use `RunnablePassthrough.assign()` to immediately return source docs in a retrieval chain:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ question: \"where did harrison work?\" }\n", + "{ context: \"harrison worked at kensho\" }\n", + "{ output: \"\" }\n", + "{ output: \"H\" }\n", + "{ output: \"arrison\" }\n", + "{ output: \" worked\" }\n", + "{ output: \" at\" }\n", + "{ output: \" Kens\" }\n", + "{ output: \"ho\" }\n", + "{ output: \".\" }\n", + "{ output: \"\" }\n" + ] + } + ], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { ChatOpenAI, OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments([\n", + " { pageContent: \"harrison worked at kensho\", metadata: {} }\n", + "], new OpenAIEmbeddings());\n", + "\n", + "const retriever = vectorstore.asRetriever();\n", + "\n", + "const template = `Answer the question based only on the following context:\n", + "{context}\n", + "\n", + "Question: {question}\n", + "`;\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(template);\n", + "\n", + "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + "const generationChain = prompt.pipe(model).pipe(new StringOutputParser());\n", + "\n", + "const retrievalChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe((docs) => docs[0].pageContent),\n", + " question: new RunnablePassthrough()\n", + " },\n", + " RunnablePassthrough.assign({ output: generationChain }),\n", + "]);\n", + "\n", + "const stream = await retrievalChain.stream(\"where did harrison work?\");\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(chunk);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that the first chunk contains the original `\"question\"` since that is immediately available. The second chunk contains `\"context\"` since the retriever finishes second. Finally, the output from the `generation_chain` streams in chunks as soon as it is available.\n", + "\n", + "## Next steps\n", + "\n", + "Now you've learned how to pass data through your chains to help to help format the data flowing through your chains.\n", + "\n", + "To learn more, see the other how-to guides on runnables in this section." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { ChatOpenAI, OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments([\n", - " { pageContent: \"harrison worked at kensho\", metadata: {} }\n", - "], new OpenAIEmbeddings());\n", - "\n", - "const retriever = vectorstore.asRetriever();\n", - "\n", - "const template = `Answer the question based only on the following context:\n", - "{context}\n", - "\n", - "Question: {question}\n", - "`;\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(template);\n", - "\n", - "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", - "\n", - "const generationChain = prompt.pipe(model).pipe(new StringOutputParser());\n", - "\n", - "const retrievalChain = RunnableSequence.from([\n", - " {\n", - " context: retriever.pipe((docs) => docs[0].pageContent),\n", - " question: new RunnablePassthrough()\n", - " },\n", - " RunnablePassthrough.assign({ output: generationChain }),\n", - "]);\n", - "\n", - "const stream = await retrievalChain.stream(\"where did harrison work?\");\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(chunk);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see that the first chunk contains the original `\"question\"` since that is immediately available. The second chunk contains `\"context\"` since the retriever finishes second. Finally, the output from the `generation_chain` streams in chunks as soon as it is available.\n", - "\n", - "## Next steps\n", - "\n", - "Now you've learned how to pass data through your chains to help to help format the data flowing through your chains.\n", - "\n", - "To learn more, see the other how-to guides on runnables in this section." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/binding.ipynb b/docs/core_docs/docs/how_to/binding.ipynb index 7eafd8ad400a..e323fa625da7 100644 --- a/docs/core_docs/docs/how_to/binding.ipynb +++ b/docs/core_docs/docs/how_to/binding.ipynb @@ -1,315 +1,315 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "fe63ffaf", - "metadata": {}, - "source": [ - "---\n", - "keywords: [RunnableBinding, LCEL]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "711752cb-4f15-42a3-9838-a0c67f397771", - "metadata": {}, - "source": [ - "# How to attach runtime arguments to a Runnable\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n", - "- [Chaining runnables](/docs/how_to/sequence/)\n", - "- [Tool calling](/docs/how_to/tool_calling/)\n", - "\n", - ":::\n", - "\n", - "Sometimes we want to invoke a [`Runnable`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html) within a [RunnableSequence](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#bind) method to set these arguments ahead of time.\n", - "\n", - "## Binding stop sequences\n", - "\n", - "Suppose we have a simple prompt + model chain:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "f3fdf86d-155f-4587-b7cd-52d363970c1d", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "EQUATION: x^3 + 7 = 12\n", - "\n", - "SOLUTION: \n", - "Subtract 7 from both sides:\n", - "x^3 = 5\n", - "\n", - "Take the cube root of both sides:\n", - "x = ∛5\n" - ] - } - ], - "source": [ - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"Write out the following equation using algebraic symbols then solve it. Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n\",\n", - " ],\n", - " [\"human\", \"{equation_statement}\"],\n", - " ]\n", - ")\n", - "\n", - "const model = new ChatOpenAI({ temperature: 0 });\n", - "\n", - "const runnable = prompt.pipe(model).pipe(new StringOutputParser());\n", - "\n", - "const res = await runnable.invoke({\n", - " equation_statement: \"x raised to the third plus seven equals 12\"\n", - "});\n", - "\n", - "console.log(res);" - ] - }, - { - "cell_type": "markdown", - "id": "929c9aba-a4a0-462c-adac-2cfc2156e117", - "metadata": {}, - "source": [ - "and want to call the model with certain `stop` words so that we shorten the output, which is useful in certain types of prompting techniques. While we can pass some arguments into the constructor, other runtime args use the `.bind()` method as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "32e0484a-78c5-4570-a00b-20d597245a96", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "fe63ffaf", + "metadata": {}, + "source": [ + "---\n", + "keywords: [RunnableBinding, LCEL]\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "EQUATION: x^3 + 7 = 12\n", - "\n", - "\n" - ] - } - ], - "source": [ - "const runnableWithStop = prompt\n", - " .pipe(model.bind({ stop: [\"SOLUTION\"] }))\n", - " .pipe(new StringOutputParser());\n", - "\n", - "const shorterResponse = await runnableWithStop.invoke({\n", - " equation_statement: \"x raised to the third plus seven equals 12\"\n", - "});\n", - "\n", - "console.log(shorterResponse);" - ] - }, - { - "cell_type": "markdown", - "id": "f07d7528-9269-4d6f-b12e-3669592a9e03", - "metadata": {}, - "source": [ - "What you can bind to a Runnable will depend on the extra parameters you can pass when invoking it.\n", - "\n", - "## Attaching OpenAI tools\n", - "\n", - "Another common use-case is tool calling. While you should generally use the [`.bind_tools()`](/docs/how_to/tool_calling/) method for tool-calling models, you can also bind provider-specific args directly if you want lower level control:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2cdeeb4c-0c1f-43da-bd58-4f591d9e0671", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "711752cb-4f15-42a3-9838-a0c67f397771", + "metadata": {}, + "source": [ + "# How to attach runtime arguments to a Runnable\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n", + "- [Chaining runnables](/docs/how_to/sequence/)\n", + "- [Tool calling](/docs/how_to/tool_calling/)\n", + "\n", + ":::\n", + "\n", + "Sometimes we want to invoke a [`Runnable`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html) within a [RunnableSequence](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#bind) method to set these arguments ahead of time.\n", + "\n", + "## Binding stop sequences\n", + "\n", + "Suppose we have a simple prompt + model chain:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "f3fdf86d-155f-4587-b7cd-52d363970c1d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EQUATION: x^3 + 7 = 12\n", + "\n", + "SOLUTION: \n", + "Subtract 7 from both sides:\n", + "x^3 = 5\n", + "\n", + "Take the cube root of both sides:\n", + "x = ∛5\n" + ] + } + ], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"Write out the following equation using algebraic symbols then solve it. Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n\",\n", + " ],\n", + " [\"human\", \"{equation_statement}\"],\n", + " ]\n", + ")\n", + "\n", + "const model = new ChatOpenAI({ temperature: 0 });\n", + "\n", + "const runnable = prompt.pipe(model).pipe(new StringOutputParser());\n", + "\n", + "const res = await runnable.invoke({\n", + " equation_statement: \"x raised to the third plus seven equals 12\"\n", + "});\n", + "\n", + "console.log(res);" + ] + }, + { + "cell_type": "markdown", + "id": "929c9aba-a4a0-462c-adac-2cfc2156e117", + "metadata": {}, + "source": [ + "and want to call the model with certain `stop` words so that we shorten the output, which is useful in certain types of prompting techniques. While we can pass some arguments into the constructor, other runtime args use the `.bind()` method as follows:" + ] + }, { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"\"\u001b[39m,\n", - " tool_calls: [\n", - " {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " args: { location: \u001b[32m\"San Francisco, CA\"\u001b[39m },\n", - " id: \u001b[32m\"call_iDKz4zU8PKBaaIT052fJkMMF\"\u001b[39m\n", - " },\n", - " {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " args: { location: \u001b[32m\"New York, NY\"\u001b[39m },\n", - " id: \u001b[32m\"call_niQwZDOqO6OJTBiDBFG8FODc\"\u001b[39m\n", - " },\n", - " {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " args: { location: \u001b[32m\"Los Angeles, CA\"\u001b[39m },\n", - " id: \u001b[32m\"call_zLXH2cDVQy0nAVC0ViWuEP4m\"\u001b[39m\n", - " }\n", - " ],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {\n", - " function_call: \u001b[90mundefined\u001b[39m,\n", - " tool_calls: [\n", - " {\n", - " id: \u001b[32m\"call_iDKz4zU8PKBaaIT052fJkMMF\"\u001b[39m,\n", - " type: \u001b[32m\"function\"\u001b[39m,\n", - " function: \u001b[36m[Object]\u001b[39m\n", - " },\n", - " {\n", - " id: \u001b[32m\"call_niQwZDOqO6OJTBiDBFG8FODc\"\u001b[39m,\n", - " type: \u001b[32m\"function\"\u001b[39m,\n", - " function: \u001b[36m[Object]\u001b[39m\n", - " },\n", - " {\n", - " id: \u001b[32m\"call_zLXH2cDVQy0nAVC0ViWuEP4m\"\u001b[39m,\n", - " type: \u001b[32m\"function\"\u001b[39m,\n", - " function: \u001b[36m[Object]\u001b[39m\n", - " }\n", - " ]\n", - " },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {\n", - " function_call: \u001b[90mundefined\u001b[39m,\n", - " tool_calls: [\n", - " {\n", - " id: \u001b[32m\"call_iDKz4zU8PKBaaIT052fJkMMF\"\u001b[39m,\n", - " type: \u001b[32m\"function\"\u001b[39m,\n", - " function: {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " arguments: \u001b[32m'{\"location\": \"San Francisco, CA\"}'\u001b[39m\n", - " }\n", - " },\n", - " {\n", - " id: \u001b[32m\"call_niQwZDOqO6OJTBiDBFG8FODc\"\u001b[39m,\n", - " type: \u001b[32m\"function\"\u001b[39m,\n", - " function: {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " arguments: \u001b[32m'{\"location\": \"New York, NY\"}'\u001b[39m\n", - " }\n", - " },\n", - " {\n", - " id: \u001b[32m\"call_zLXH2cDVQy0nAVC0ViWuEP4m\"\u001b[39m,\n", - " type: \u001b[32m\"function\"\u001b[39m,\n", - " function: {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " arguments: \u001b[32m'{\"location\": \"Los Angeles, CA\"}'\u001b[39m\n", - " }\n", - " }\n", - " ]\n", - " },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m70\u001b[39m, promptTokens: \u001b[33m82\u001b[39m, totalTokens: \u001b[33m152\u001b[39m },\n", - " finish_reason: \u001b[32m\"tool_calls\"\u001b[39m\n", - " },\n", - " tool_calls: [\n", - " {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " args: { location: \u001b[32m\"San Francisco, CA\"\u001b[39m },\n", - " id: \u001b[32m\"call_iDKz4zU8PKBaaIT052fJkMMF\"\u001b[39m\n", - " },\n", - " {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " args: { location: \u001b[32m\"New York, NY\"\u001b[39m },\n", - " id: \u001b[32m\"call_niQwZDOqO6OJTBiDBFG8FODc\"\u001b[39m\n", - " },\n", - " {\n", - " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", - " args: { location: \u001b[32m\"Los Angeles, CA\"\u001b[39m },\n", - " id: \u001b[32m\"call_zLXH2cDVQy0nAVC0ViWuEP4m\"\u001b[39m\n", - " }\n", - " ],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "code", + "execution_count": 2, + "id": "32e0484a-78c5-4570-a00b-20d597245a96", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EQUATION: x^3 + 7 = 12\n", + "\n", + "\n" + ] + } + ], + "source": [ + "const runnableWithStop = prompt\n", + " .pipe(model.bind({ stop: [\"SOLUTION\"] }))\n", + " .pipe(new StringOutputParser());\n", + "\n", + "const shorterResponse = await runnableWithStop.invoke({\n", + " equation_statement: \"x raised to the third plus seven equals 12\"\n", + "});\n", + "\n", + "console.log(shorterResponse);" + ] + }, + { + "cell_type": "markdown", + "id": "f07d7528-9269-4d6f-b12e-3669592a9e03", + "metadata": {}, + "source": [ + "What you can bind to a Runnable will depend on the extra parameters you can pass when invoking it.\n", + "\n", + "## Attaching OpenAI tools\n", + "\n", + "Another common use-case is tool calling. While you should generally use the [`.bind_tools()`](/docs/how_to/tool_calling/) method for tool-calling models, you can also bind provider-specific args directly if you want lower level control:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2cdeeb4c-0c1f-43da-bd58-4f591d9e0671", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"\"\u001b[39m,\n", + " tool_calls: [\n", + " {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " args: { location: \u001b[32m\"San Francisco, CA\"\u001b[39m },\n", + " id: \u001b[32m\"call_iDKz4zU8PKBaaIT052fJkMMF\"\u001b[39m\n", + " },\n", + " {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " args: { location: \u001b[32m\"New York, NY\"\u001b[39m },\n", + " id: \u001b[32m\"call_niQwZDOqO6OJTBiDBFG8FODc\"\u001b[39m\n", + " },\n", + " {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " args: { location: \u001b[32m\"Los Angeles, CA\"\u001b[39m },\n", + " id: \u001b[32m\"call_zLXH2cDVQy0nAVC0ViWuEP4m\"\u001b[39m\n", + " }\n", + " ],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {\n", + " function_call: \u001b[90mundefined\u001b[39m,\n", + " tool_calls: [\n", + " {\n", + " id: \u001b[32m\"call_iDKz4zU8PKBaaIT052fJkMMF\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: \u001b[36m[Object]\u001b[39m\n", + " },\n", + " {\n", + " id: \u001b[32m\"call_niQwZDOqO6OJTBiDBFG8FODc\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: \u001b[36m[Object]\u001b[39m\n", + " },\n", + " {\n", + " id: \u001b[32m\"call_zLXH2cDVQy0nAVC0ViWuEP4m\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: \u001b[36m[Object]\u001b[39m\n", + " }\n", + " ]\n", + " },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {\n", + " function_call: \u001b[90mundefined\u001b[39m,\n", + " tool_calls: [\n", + " {\n", + " id: \u001b[32m\"call_iDKz4zU8PKBaaIT052fJkMMF\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " arguments: \u001b[32m'{\"location\": \"San Francisco, CA\"}'\u001b[39m\n", + " }\n", + " },\n", + " {\n", + " id: \u001b[32m\"call_niQwZDOqO6OJTBiDBFG8FODc\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " arguments: \u001b[32m'{\"location\": \"New York, NY\"}'\u001b[39m\n", + " }\n", + " },\n", + " {\n", + " id: \u001b[32m\"call_zLXH2cDVQy0nAVC0ViWuEP4m\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " arguments: \u001b[32m'{\"location\": \"Los Angeles, CA\"}'\u001b[39m\n", + " }\n", + " }\n", + " ]\n", + " },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: \u001b[33m70\u001b[39m, promptTokens: \u001b[33m82\u001b[39m, totalTokens: \u001b[33m152\u001b[39m },\n", + " finish_reason: \u001b[32m\"tool_calls\"\u001b[39m\n", + " },\n", + " tool_calls: [\n", + " {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " args: { location: \u001b[32m\"San Francisco, CA\"\u001b[39m },\n", + " id: \u001b[32m\"call_iDKz4zU8PKBaaIT052fJkMMF\"\u001b[39m\n", + " },\n", + " {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " args: { location: \u001b[32m\"New York, NY\"\u001b[39m },\n", + " id: \u001b[32m\"call_niQwZDOqO6OJTBiDBFG8FODc\"\u001b[39m\n", + " },\n", + " {\n", + " name: \u001b[32m\"get_current_weather\"\u001b[39m,\n", + " args: { location: \u001b[32m\"Los Angeles, CA\"\u001b[39m },\n", + " id: \u001b[32m\"call_zLXH2cDVQy0nAVC0ViWuEP4m\"\u001b[39m\n", + " }\n", + " ],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const tools = [\n", + " {\n", + " \"type\": \"function\",\n", + " \"function\": {\n", + " \"name\": \"get_current_weather\",\n", + " \"description\": \"Get the current weather in a given location\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"location\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city and state, e.g. San Francisco, CA\",\n", + " },\n", + " \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n", + " },\n", + " \"required\": [\"location\"],\n", + " },\n", + " },\n", + " }\n", + "];\n", + "\n", + "const modelWithTools = new ChatOpenAI({ model: \"gpt-4o\" }).bind({ tools });\n", + "\n", + "await modelWithTools.invoke(\"What's the weather in SF, NYC and LA?\")" + ] + }, + { + "cell_type": "markdown", + "id": "095001f7", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You now know how to bind runtime arguments to a Runnable.\n", + "\n", + "Next, you might be interested in our how-to guides on [passing data through a chain](/docs/how_to/passthrough/)." ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "const tools = [\n", - " {\n", - " \"type\": \"function\",\n", - " \"function\": {\n", - " \"name\": \"get_current_weather\",\n", - " \"description\": \"Get the current weather in a given location\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"location\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"The city and state, e.g. San Francisco, CA\",\n", - " },\n", - " \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n", - " },\n", - " \"required\": [\"location\"],\n", - " },\n", - " },\n", - " }\n", - "];\n", - "\n", - "const modelWithTools = new ChatOpenAI({ model: \"gpt-4o\" }).bind({ tools });\n", - "\n", - "await modelWithTools.invoke(\"What's the weather in SF, NYC and LA?\")" - ] - }, - { - "cell_type": "markdown", - "id": "095001f7", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You now know how to bind runtime arguments to a Runnable.\n", - "\n", - "Next, you might be interested in our how-to guides on [passing data through a chain](/docs/how_to/passthrough/)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/caching_embeddings.mdx b/docs/core_docs/docs/how_to/caching_embeddings.mdx index 9a4432ffa779..0edb3c06673e 100644 --- a/docs/core_docs/docs/how_to/caching_embeddings.mdx +++ b/docs/core_docs/docs/how_to/caching_embeddings.mdx @@ -8,7 +8,7 @@ import RedisExample from "@examples/embeddings/cache_backed_redis.ts"; This guide assumes familiarity with the following concepts: -- [Embeddings](/docs/concepts/#embedding-models) +- [Embeddings](/docs/concepts/embedding_models) ::: diff --git a/docs/core_docs/docs/how_to/callbacks_attach.ipynb b/docs/core_docs/docs/how_to/callbacks_attach.ipynb index 3d3ac02bf7f0..e90e34fd635b 100644 --- a/docs/core_docs/docs/how_to/callbacks_attach.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_attach.ipynb @@ -1,278 +1,278 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to attach callbacks to a module\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Callbacks](/docs/concepts/#callbacks)\n", - "- [Chaining runnables](/docs/how_to/sequence)\n", - "- [Attach runtime arguments to a Runnable](/docs/how_to/binding)\n", - "\n", - ":::\n", - "\n", - "If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.withConfig()`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#withConfig) method. This saves you the need to pass callbacks in each time you invoke the chain.\n", - "\n", - "Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to attach callbacks to a module\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Callbacks](/docs/concepts/callbacks)\n", + "- [Chaining runnables](/docs/how_to/sequence)\n", + "- [Attach runtime arguments to a Runnable](/docs/how_to/binding)\n", + "\n", + ":::\n", + "\n", + "If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.withConfig()`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#withConfig) method. This saves you the need to pass callbacks in each time you invoke the chain.\n", + "\n", + "Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[32m[chain/start]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] Entering Chain run with input: {\n", - " \"number\": \"2\"\n", - "}\n", - "\u001b[32m[chain/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] Entering Chain run with input: {\n", - " \"number\": \"2\"\n", - "}\n", - "\u001b[36m[chain/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] [1ms] Exiting Chain run with output: {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"prompt_values\",\n", - " \"ChatPromptValue\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"messages\": [\n", - " {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"HumanMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"What is 1 + 2?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "\u001b[32m[llm/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] Entering LLM run with input: {\n", - " \"messages\": [\n", - " [\n", - " {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"HumanMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"What is 1 + 2?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " }\n", - " }\n", - " ]\n", - " ]\n", - "}\n", - "\u001b[36m[llm/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] [797ms] Exiting LLM run with output: {\n", - " \"generations\": [\n", - " [\n", - " {\n", - " \"text\": \"1 + 2 = 3\",\n", - " \"message\": {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"AIMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"1 + 2 = 3\",\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " }\n", - " }\n", - " }\n", - " }\n", - " ]\n", - " ],\n", - " \"llmOutput\": {\n", - " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " }\n", - "}\n", - "\u001b[36m[chain/end]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] [806ms] Exiting Chain run with output: {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"AIMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"1 + 2 = 3\",\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " }\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[32m[chain/start]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] Entering Chain run with input: {\n", + " \"number\": \"2\"\n", + "}\n", + "\u001b[32m[chain/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] Entering Chain run with input: {\n", + " \"number\": \"2\"\n", + "}\n", + "\u001b[36m[chain/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] [1ms] Exiting Chain run with output: {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"prompt_values\",\n", + " \"ChatPromptValue\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"messages\": [\n", + " {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"HumanMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"What is 1 + 2?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "\u001b[32m[llm/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] Entering LLM run with input: {\n", + " \"messages\": [\n", + " [\n", + " {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"HumanMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"What is 1 + 2?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + " }\n", + " ]\n", + " ]\n", + "}\n", + "\u001b[36m[llm/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] [797ms] Exiting LLM run with output: {\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"1 + 2 = 3\",\n", + " \"message\": {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"1 + 2 = 3\",\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llmOutput\": {\n", + " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " }\n", + "}\n", + "\u001b[36m[chain/end]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] [806ms] Exiting Chain run with output: {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"1 + 2 = 3\",\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01WvZAqTg2hZzC4AKyeUaADs\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " }\n", + " }\n", + "}\n" + ] + }, + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {\n", + " id: \u001b[32m\"msg_01WvZAqTg2hZzC4AKyeUaADs\"\u001b[39m,\n", + " type: \u001b[32m\"message\"\u001b[39m,\n", + " role: \u001b[32m\"assistant\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {\n", + " id: \u001b[32m\"msg_01WvZAqTg2hZzC4AKyeUaADs\"\u001b[39m,\n", + " type: \u001b[32m\"message\"\u001b[39m,\n", + " role: \u001b[32m\"assistant\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " response_metadata: {\n", + " id: \u001b[32m\"msg_01WvZAqTg2hZzC4AKyeUaADs\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ConsoleCallbackHandler } from \"@langchain/core/tracers/console\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const handler = new ConsoleCallbackHandler();\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + "});\n", + "\n", + "const chainWithCallbacks = prompt.pipe(model).withConfig({\n", + " callbacks: [handler],\n", + "});\n", + "\n", + "await chainWithCallbacks.invoke({ number: \"2\" });" + ] }, { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {\n", - " id: \u001b[32m\"msg_01WvZAqTg2hZzC4AKyeUaADs\"\u001b[39m,\n", - " type: \u001b[32m\"message\"\u001b[39m,\n", - " role: \u001b[32m\"assistant\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {\n", - " id: \u001b[32m\"msg_01WvZAqTg2hZzC4AKyeUaADs\"\u001b[39m,\n", - " type: \u001b[32m\"message\"\u001b[39m,\n", - " role: \u001b[32m\"assistant\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " response_metadata: {\n", - " id: \u001b[32m\"msg_01WvZAqTg2hZzC4AKyeUaADs\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The bound callbacks will run for all nested module runs.\n", + "\n", + "## Next steps\n", + "\n", + "You've now learned how to bind callbacks to a chain.\n", + "\n", + "Next, check out the other how-to guides in this section, such as how to create your own [custom callback handlers](/docs/how_to/custom_callbacks)." ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { ConsoleCallbackHandler } from \"@langchain/core/tracers/console\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const handler = new ConsoleCallbackHandler();\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - "});\n", - "\n", - "const chainWithCallbacks = prompt.pipe(model).withConfig({\n", - " callbacks: [handler],\n", - "});\n", - "\n", - "await chainWithCallbacks.invoke({ number: \"2\" });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The bound callbacks will run for all nested module runs.\n", - "\n", - "## Next steps\n", - "\n", - "You've now learned how to bind callbacks to a chain.\n", - "\n", - "Next, check out the other how-to guides in this section, such as how to create your own [custom callback handlers](/docs/how_to/custom_callbacks)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/callbacks_constructor.ipynb b/docs/core_docs/docs/how_to/callbacks_constructor.ipynb index b653161deada..d0a62e29b238 100644 --- a/docs/core_docs/docs/how_to/callbacks_constructor.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_constructor.ipynb @@ -1,206 +1,206 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to pass callbacks into a module constructor\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Callbacks](/docs/concepts/#callbacks)\n", - "\n", - ":::\n", - "\n", - "Most LangChain modules allow you to pass `callbacks` directly into the constructor. In this case, the callbacks will only be called for that instance (and any nested runs).\n", - "\n", - "Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to pass callbacks into a module constructor\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Callbacks](/docs/concepts/callbacks)\n", + "\n", + ":::\n", + "\n", + "Most LangChain modules allow you to pass `callbacks` directly into the constructor. In this case, the callbacks will only be called for that instance (and any nested runs).\n", + "\n", + "Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[32m[llm/start]\u001b[39m [\u001b[90m\u001b[1m1:llm:ChatAnthropic\u001b[22m\u001b[39m] Entering LLM run with input: {\n", - " \"messages\": [\n", - " [\n", - " {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"HumanMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"What is 1 + 2?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " }\n", - " }\n", - " ]\n", - " ]\n", - "}\n", - "\u001b[36m[llm/end]\u001b[39m [\u001b[90m\u001b[1m1:llm:ChatAnthropic\u001b[22m\u001b[39m] [1.00s] Exiting LLM run with output: {\n", - " \"generations\": [\n", - " [\n", - " {\n", - " \"text\": \"1 + 2 = 3\",\n", - " \"message\": {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"AIMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"1 + 2 = 3\",\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_011Z1cgi3gyNGxT55wnRNkXq\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_011Z1cgi3gyNGxT55wnRNkXq\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " }\n", - " }\n", - " }\n", - " }\n", - " ]\n", - " ],\n", - " \"llmOutput\": {\n", - " \"id\": \"msg_011Z1cgi3gyNGxT55wnRNkXq\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[32m[llm/start]\u001b[39m [\u001b[90m\u001b[1m1:llm:ChatAnthropic\u001b[22m\u001b[39m] Entering LLM run with input: {\n", + " \"messages\": [\n", + " [\n", + " {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"HumanMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"What is 1 + 2?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + " }\n", + " ]\n", + " ]\n", + "}\n", + "\u001b[36m[llm/end]\u001b[39m [\u001b[90m\u001b[1m1:llm:ChatAnthropic\u001b[22m\u001b[39m] [1.00s] Exiting LLM run with output: {\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"1 + 2 = 3\",\n", + " \"message\": {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"1 + 2 = 3\",\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_011Z1cgi3gyNGxT55wnRNkXq\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_011Z1cgi3gyNGxT55wnRNkXq\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llmOutput\": {\n", + " \"id\": \"msg_011Z1cgi3gyNGxT55wnRNkXq\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " }\n", + "}\n" + ] + }, + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {\n", + " id: \u001b[32m\"msg_011Z1cgi3gyNGxT55wnRNkXq\"\u001b[39m,\n", + " type: \u001b[32m\"message\"\u001b[39m,\n", + " role: \u001b[32m\"assistant\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {\n", + " id: \u001b[32m\"msg_011Z1cgi3gyNGxT55wnRNkXq\"\u001b[39m,\n", + " type: \u001b[32m\"message\"\u001b[39m,\n", + " role: \u001b[32m\"assistant\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " response_metadata: {\n", + " id: \u001b[32m\"msg_011Z1cgi3gyNGxT55wnRNkXq\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ConsoleCallbackHandler } from \"@langchain/core/tracers/console\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const handler = new ConsoleCallbackHandler();\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + " callbacks: [handler],\n", + "});\n", + "\n", + "const chain = prompt.pipe(model);\n", + "\n", + "await chain.invoke({ number: \"2\" });" + ] }, { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {\n", - " id: \u001b[32m\"msg_011Z1cgi3gyNGxT55wnRNkXq\"\u001b[39m,\n", - " type: \u001b[32m\"message\"\u001b[39m,\n", - " role: \u001b[32m\"assistant\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {\n", - " id: \u001b[32m\"msg_011Z1cgi3gyNGxT55wnRNkXq\"\u001b[39m,\n", - " type: \u001b[32m\"message\"\u001b[39m,\n", - " role: \u001b[32m\"assistant\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " response_metadata: {\n", - " id: \u001b[32m\"msg_011Z1cgi3gyNGxT55wnRNkXq\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see that we only see events from the chat model run - none from the prompt or broader chain.\n", + "\n", + "## Next steps\n", + "\n", + "You've now learned how to pass callbacks into a constructor.\n", + "\n", + "Next, check out the other how-to guides in this section, such as how to create your own [custom callback handlers](/docs/how_to/custom_callbacks)." ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { ConsoleCallbackHandler } from \"@langchain/core/tracers/console\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const handler = new ConsoleCallbackHandler();\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - " callbacks: [handler],\n", - "});\n", - "\n", - "const chain = prompt.pipe(model);\n", - "\n", - "await chain.invoke({ number: \"2\" });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see that we only see events from the chat model run - none from the prompt or broader chain.\n", - "\n", - "## Next steps\n", - "\n", - "You've now learned how to pass callbacks into a constructor.\n", - "\n", - "Next, check out the other how-to guides in this section, such as how to create your own [custom callback handlers](/docs/how_to/custom_callbacks)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/callbacks_custom_events.ipynb b/docs/core_docs/docs/how_to/callbacks_custom_events.ipynb index effe80810b02..7f91bc4eebea 100644 --- a/docs/core_docs/docs/how_to/callbacks_custom_events.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_custom_events.ipynb @@ -1,232 +1,232 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to dispatch custom callback events\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Callbacks](/docs/concepts/#callbacks)\n", - "- [Custom callback handlers](/docs/how_to/custom_callbacks)\n", - "- [Stream Events API](/docs/concepts#streamevents)\n", - "\n", - ":::\n", - "\n", - "In some situations, you may want to dipsatch a custom callback event from within a [Runnable](/docs/concepts/#runnable-interface) so it can be surfaced\n", - "in a custom callback handler or via the [Stream Events API](/docs/concepts/#streamevents).\n", - "\n", - "For example, if you have a long running tool with multiple steps, you can dispatch custom events between the steps and use these custom events to monitor progress.\n", - "You could also surface these custom events to an end user of your application to show them how the current task is progressing.\n", - "\n", - "To dispatch a custom event you need to decide on two attributes for the event: the `name` and the `data`.\n", - "\n", - "| Attribute | Type | Description |\n", - "|-----------|------|----------------------------------------------------------------------------------------------------------|\n", - "| name | string | A user defined name for the event. |\n", - "| data | any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |\n", - "\n", - "\n", - ":::info\n", - "- Custom callback events can only be dispatched from within an existing `Runnable`.\n", - "- If using `streamEvents`, you must use `version: \"v2\"` to consume custom events.\n", - "- Sending or rendering custom callback events in LangSmith is not yet supported.\n", - ":::\n", - "\n", - "## Stream Events API\n", - "\n", - "The most useful way to consume custom events is via the [`.streamEvents()`](/docs/concepts/#streamevents) method.\n", - "\n", - "We can use the `dispatchCustomEvent` API to emit custom events from this method. \n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "Dispatching custom callback events requires `@langchain/core>=0.2.16`. See [this guide](/docs/how_to/installation/#installing-integration-packages) for some considerations to take when upgrading `@langchain/core`.\n", - "\n", - "The default entrypoint below triggers an import and initialization of [`async_hooks`](https://nodejs.org/api/async_hooks.html) to enable automatic `RunnableConfig` passing, which is not supported in all environments. If you see import issues, you must import from `@langchain/core/callbacks/dispatch/web` and propagate the `RunnableConfig` object manually (see example below).\n", - ":::\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " event: 'on_custom_event',\n", - " run_id: '9eac217d-3a2d-4563-a91f-3bd49bee4b3d',\n", - " name: 'event1',\n", - " tags: [],\n", - " metadata: {},\n", - " data: { reversed: 'dlrow olleh' }\n", - "}\n", - "{\n", - " event: 'on_custom_event',\n", - " run_id: '9eac217d-3a2d-4563-a91f-3bd49bee4b3d',\n", - " name: 'event2',\n", - " tags: [],\n", - " metadata: {},\n", - " data: 5\n", - "}\n" - ] - } - ], - "source": [ - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "import { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\n", - "\n", - "const reflect = RunnableLambda.from(async (value: string) => {\n", - " await dispatchCustomEvent(\"event1\", { reversed: value.split(\"\").reverse().join(\"\") });\n", - " await dispatchCustomEvent(\"event2\", 5);\n", - " return value;\n", - "});\n", - "\n", - "const eventStream = await reflect.streamEvents(\"hello world\", { version: \"v2\" });\n", - "\n", - "for await (const event of eventStream) {\n", - " if (event.event === \"on_custom_event\") {\n", - " console.log(event);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you are in a web environment that does not support `async_hooks`, you must import from the web entrypoint and propagate the config manually instead:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to dispatch custom callback events\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Callbacks](/docs/concepts/callbacks)\n", + "- [Custom callback handlers](/docs/how_to/custom_callbacks)\n", + "- [Stream Events API](/docs/concepts/streaming#streamevents)\n", + "\n", + ":::\n", + "\n", + "In some situations, you may want to dipsatch a custom callback event from within a [Runnable](/docs/concepts/#runnable-interface) so it can be surfaced\n", + "in a custom callback handler or via the [Stream Events API](/docs/concepts/streaming#streamevents).\n", + "\n", + "For example, if you have a long running tool with multiple steps, you can dispatch custom events between the steps and use these custom events to monitor progress.\n", + "You could also surface these custom events to an end user of your application to show them how the current task is progressing.\n", + "\n", + "To dispatch a custom event you need to decide on two attributes for the event: the `name` and the `data`.\n", + "\n", + "| Attribute | Type | Description |\n", + "|-----------|------|----------------------------------------------------------------------------------------------------------|\n", + "| name | string | A user defined name for the event. |\n", + "| data | any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |\n", + "\n", + "\n", + ":::info\n", + "- Custom callback events can only be dispatched from within an existing `Runnable`.\n", + "- If using `streamEvents`, you must use `version: \"v2\"` to consume custom events.\n", + "- Sending or rendering custom callback events in LangSmith is not yet supported.\n", + ":::\n", + "\n", + "## Stream Events API\n", + "\n", + "The most useful way to consume custom events is via the [`.streamEvents()`](/docs/concepts/streaming#streamevents) method.\n", + "\n", + "We can use the `dispatchCustomEvent` API to emit custom events from this method. \n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "Dispatching custom callback events requires `@langchain/core>=0.2.16`. See [this guide](/docs/how_to/installation/#installing-integration-packages) for some considerations to take when upgrading `@langchain/core`.\n", + "\n", + "The default entrypoint below triggers an import and initialization of [`async_hooks`](https://nodejs.org/api/async_hooks.html) to enable automatic `RunnableConfig` passing, which is not supported in all environments. If you see import issues, you must import from `@langchain/core/callbacks/dispatch/web` and propagate the `RunnableConfig` object manually (see example below).\n", + ":::\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " event: 'on_custom_event',\n", - " run_id: 'dee1e4f0-c5ff-4118-9391-461a0dcc4cb2',\n", - " name: 'event1',\n", - " tags: [],\n", - " metadata: {},\n", - " data: { reversed: 'dlrow olleh' }\n", - "}\n", - "{\n", - " event: 'on_custom_event',\n", - " run_id: 'dee1e4f0-c5ff-4118-9391-461a0dcc4cb2',\n", - " name: 'event2',\n", - " tags: [],\n", - " metadata: {},\n", - " data: 5\n", - "}\n" - ] - } - ], - "source": [ - "import { RunnableConfig, RunnableLambda } from \"@langchain/core/runnables\";\n", - "import { dispatchCustomEvent as dispatchCustomEventWeb } from \"@langchain/core/callbacks/dispatch/web\";\n", - "\n", - "const reflect = RunnableLambda.from(async (value: string, config?: RunnableConfig) => {\n", - " await dispatchCustomEventWeb(\"event1\", { reversed: value.split(\"\").reverse().join(\"\") }, config);\n", - " await dispatchCustomEventWeb(\"event2\", 5, config);\n", - " return value;\n", - "});\n", - "\n", - "const eventStream = await reflect.streamEvents(\"hello world\", { version: \"v2\" });\n", - "\n", - "for await (const event of eventStream) {\n", - " if (event.event === \"on_custom_event\") {\n", - " console.log(event);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Callback Handler\n", - "\n", - "Let's see how to emit custom events with `dispatchCustomEvent`.\n", - "\n", - "Remember, you **must** call `dispatchCustomEvent` from within an existing `Runnable`." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " event: 'on_custom_event',\n", + " run_id: '9eac217d-3a2d-4563-a91f-3bd49bee4b3d',\n", + " name: 'event1',\n", + " tags: [],\n", + " metadata: {},\n", + " data: { reversed: 'dlrow olleh' }\n", + "}\n", + "{\n", + " event: 'on_custom_event',\n", + " run_id: '9eac217d-3a2d-4563-a91f-3bd49bee4b3d',\n", + " name: 'event2',\n", + " tags: [],\n", + " metadata: {},\n", + " data: 5\n", + "}\n" + ] + } + ], + "source": [ + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "import { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\n", + "\n", + "const reflect = RunnableLambda.from(async (value: string) => {\n", + " await dispatchCustomEvent(\"event1\", { reversed: value.split(\"\").reverse().join(\"\") });\n", + " await dispatchCustomEvent(\"event2\", 5);\n", + " return value;\n", + "});\n", + "\n", + "const eventStream = await reflect.streamEvents(\"hello world\", { version: \"v2\" });\n", + "\n", + "for await (const event of eventStream) {\n", + " if (event.event === \"on_custom_event\") {\n", + " console.log(event);\n", + " }\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "event1 { reversed: 'dlrow olleh' } 9c3770ac-c83d-4626-9643-b5fd80eb5431\n", - "event2 5 9c3770ac-c83d-4626-9643-b5fd80eb5431\n", - "hello world\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you are in a web environment that does not support `async_hooks`, you must import from the web entrypoint and propagate the config manually instead:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " event: 'on_custom_event',\n", + " run_id: 'dee1e4f0-c5ff-4118-9391-461a0dcc4cb2',\n", + " name: 'event1',\n", + " tags: [],\n", + " metadata: {},\n", + " data: { reversed: 'dlrow olleh' }\n", + "}\n", + "{\n", + " event: 'on_custom_event',\n", + " run_id: 'dee1e4f0-c5ff-4118-9391-461a0dcc4cb2',\n", + " name: 'event2',\n", + " tags: [],\n", + " metadata: {},\n", + " data: 5\n", + "}\n" + ] + } + ], + "source": [ + "import { RunnableConfig, RunnableLambda } from \"@langchain/core/runnables\";\n", + "import { dispatchCustomEvent as dispatchCustomEventWeb } from \"@langchain/core/callbacks/dispatch/web\";\n", + "\n", + "const reflect = RunnableLambda.from(async (value: string, config?: RunnableConfig) => {\n", + " await dispatchCustomEventWeb(\"event1\", { reversed: value.split(\"\").reverse().join(\"\") }, config);\n", + " await dispatchCustomEventWeb(\"event2\", 5, config);\n", + " return value;\n", + "});\n", + "\n", + "const eventStream = await reflect.streamEvents(\"hello world\", { version: \"v2\" });\n", + "\n", + "for await (const event of eventStream) {\n", + " if (event.event === \"on_custom_event\") {\n", + " console.log(event);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Callback Handler\n", + "\n", + "Let's see how to emit custom events with `dispatchCustomEvent`.\n", + "\n", + "Remember, you **must** call `dispatchCustomEvent` from within an existing `Runnable`." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "event1 { reversed: 'dlrow olleh' } 9c3770ac-c83d-4626-9643-b5fd80eb5431\n", + "event2 5 9c3770ac-c83d-4626-9643-b5fd80eb5431\n", + "hello world\n" + ] + } + ], + "source": [ + "import { RunnableConfig, RunnableLambda } from \"@langchain/core/runnables\";\n", + "import { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\n", + "\n", + "const reflect = RunnableLambda.from(async (value: string) => {\n", + " await dispatchCustomEvent(\"event1\", { reversed: value.split(\"\").reverse().join(\"\") });\n", + " await dispatchCustomEvent(\"event2\", 5);\n", + " return value;\n", + "});\n", + "\n", + "await reflect.invoke(\"hello world\", {\n", + " callbacks: [{\n", + " handleCustomEvent(eventName, data, runId) {\n", + " console.log(eventName, data, runId);\n", + " },\n", + " }]\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "You've now seen how to emit custom events from within your chains.\n", + "\n", + "You can check out the more in depth guide for [stream events](/docs/how_to/streaming/#using-stream-events) for more ways to parse and receive intermediate steps from your chains." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { RunnableConfig, RunnableLambda } from \"@langchain/core/runnables\";\n", - "import { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\n", - "\n", - "const reflect = RunnableLambda.from(async (value: string) => {\n", - " await dispatchCustomEvent(\"event1\", { reversed: value.split(\"\").reverse().join(\"\") });\n", - " await dispatchCustomEvent(\"event2\", 5);\n", - " return value;\n", - "});\n", - "\n", - "await reflect.invoke(\"hello world\", {\n", - " callbacks: [{\n", - " handleCustomEvent(eventName, data, runId) {\n", - " console.log(eventName, data, runId);\n", - " },\n", - " }]\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "You've now seen how to emit custom events from within your chains.\n", - "\n", - "You can check out the more in depth guide for [stream events](/docs/how_to/streaming/#using-stream-events) for more ways to parse and receive intermediate steps from your chains." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/callbacks_runtime.ipynb b/docs/core_docs/docs/how_to/callbacks_runtime.ipynb index 7f41ac7f403a..485072b5878a 100644 --- a/docs/core_docs/docs/how_to/callbacks_runtime.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_runtime.ipynb @@ -1,274 +1,274 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to pass callbacks in at runtime\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Callbacks](/docs/concepts/#callbacks)\n", - "\n", - ":::\n", - "\n", - "In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n", - "\n", - "This prevents us from having to manually attach the handlers to each individual nested object. Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to pass callbacks in at runtime\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Callbacks](/docs/concepts/callbacks)\n", + "\n", + ":::\n", + "\n", + "In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n", + "\n", + "This prevents us from having to manually attach the handlers to each individual nested object. Here's an example using LangChain's built-in [`ConsoleCallbackHandler`](https://api.js.langchain.com/classes/langchain_core.tracers_console.ConsoleCallbackHandler.html):" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[32m[chain/start]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] Entering Chain run with input: {\n", - " \"number\": \"2\"\n", - "}\n", - "\u001b[32m[chain/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] Entering Chain run with input: {\n", - " \"number\": \"2\"\n", - "}\n", - "\u001b[36m[chain/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] [1ms] Exiting Chain run with output: {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"prompt_values\",\n", - " \"ChatPromptValue\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"messages\": [\n", - " {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"HumanMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"What is 1 + 2?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "\u001b[32m[llm/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] Entering LLM run with input: {\n", - " \"messages\": [\n", - " [\n", - " {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"HumanMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"What is 1 + 2?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " }\n", - " }\n", - " ]\n", - " ]\n", - "}\n", - "\u001b[36m[llm/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] [766ms] Exiting LLM run with output: {\n", - " \"generations\": [\n", - " [\n", - " {\n", - " \"text\": \"1 + 2 = 3\",\n", - " \"message\": {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"AIMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"1 + 2 = 3\",\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " }\n", - " }\n", - " }\n", - " }\n", - " ]\n", - " ],\n", - " \"llmOutput\": {\n", - " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " }\n", - "}\n", - "\u001b[36m[chain/end]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] [778ms] Exiting Chain run with output: {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"AIMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"1 + 2 = 3\",\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 13\n", - " },\n", - " \"stop_reason\": \"end_turn\"\n", - " }\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[32m[chain/start]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] Entering Chain run with input: {\n", + " \"number\": \"2\"\n", + "}\n", + "\u001b[32m[chain/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] Entering Chain run with input: {\n", + " \"number\": \"2\"\n", + "}\n", + "\u001b[36m[chain/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m2:prompt:ChatPromptTemplate\u001b[22m\u001b[39m] [1ms] Exiting Chain run with output: {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"prompt_values\",\n", + " \"ChatPromptValue\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"messages\": [\n", + " {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"HumanMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"What is 1 + 2?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "\u001b[32m[llm/start]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] Entering LLM run with input: {\n", + " \"messages\": [\n", + " [\n", + " {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"HumanMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"What is 1 + 2?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + " }\n", + " ]\n", + " ]\n", + "}\n", + "\u001b[36m[llm/end]\u001b[39m [\u001b[90m1:chain:RunnableSequence > \u001b[1m3:llm:ChatAnthropic\u001b[22m\u001b[39m] [766ms] Exiting LLM run with output: {\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"1 + 2 = 3\",\n", + " \"message\": {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"1 + 2 = 3\",\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llmOutput\": {\n", + " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " }\n", + "}\n", + "\u001b[36m[chain/end]\u001b[39m [\u001b[90m\u001b[1m1:chain:RunnableSequence\u001b[22m\u001b[39m] [778ms] Exiting Chain run with output: {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"1 + 2 = 3\",\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01SGGkFVbUbH4fK7JS7agerD\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 13\n", + " },\n", + " \"stop_reason\": \"end_turn\"\n", + " }\n", + " }\n", + "}\n" + ] + }, + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {\n", + " id: \u001b[32m\"msg_01SGGkFVbUbH4fK7JS7agerD\"\u001b[39m,\n", + " type: \u001b[32m\"message\"\u001b[39m,\n", + " role: \u001b[32m\"assistant\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {\n", + " id: \u001b[32m\"msg_01SGGkFVbUbH4fK7JS7agerD\"\u001b[39m,\n", + " type: \u001b[32m\"message\"\u001b[39m,\n", + " role: \u001b[32m\"assistant\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " response_metadata: {\n", + " id: \u001b[32m\"msg_01SGGkFVbUbH4fK7JS7agerD\"\u001b[39m,\n", + " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", + " stop_sequence: \u001b[1mnull\u001b[22m,\n", + " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", + " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", + " },\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ConsoleCallbackHandler } from \"@langchain/core/tracers/console\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const handler = new ConsoleCallbackHandler();\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + "});\n", + "\n", + "const chain = prompt.pipe(model);\n", + "\n", + "await chain.invoke({ number: \"2\" }, { callbacks: [handler] });" + ] }, { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {\n", - " id: \u001b[32m\"msg_01SGGkFVbUbH4fK7JS7agerD\"\u001b[39m,\n", - " type: \u001b[32m\"message\"\u001b[39m,\n", - " role: \u001b[32m\"assistant\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"1 + 2 = 3\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {\n", - " id: \u001b[32m\"msg_01SGGkFVbUbH4fK7JS7agerD\"\u001b[39m,\n", - " type: \u001b[32m\"message\"\u001b[39m,\n", - " role: \u001b[32m\"assistant\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " response_metadata: {\n", - " id: \u001b[32m\"msg_01SGGkFVbUbH4fK7JS7agerD\"\u001b[39m,\n", - " model: \u001b[32m\"claude-3-sonnet-20240229\"\u001b[39m,\n", - " stop_sequence: \u001b[1mnull\u001b[22m,\n", - " usage: { input_tokens: \u001b[33m16\u001b[39m, output_tokens: \u001b[33m13\u001b[39m },\n", - " stop_reason: \u001b[32m\"end_turn\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If there are already existing callbacks associated with a module, these will run in addition to any passed in at runtime.\n", + "\n", + "## Next steps\n", + "\n", + "You've now learned how to pass callbacks at runtime.\n", + "\n", + "Next, check out the other how-to guides in this section, such as how to create your own [custom callback handlers](/docs/how_to/custom_callbacks)." ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { ConsoleCallbackHandler } from \"@langchain/core/tracers/console\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const handler = new ConsoleCallbackHandler();\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - "});\n", - "\n", - "const chain = prompt.pipe(model);\n", - "\n", - "await chain.invoke({ number: \"2\" }, { callbacks: [handler] });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If there are already existing callbacks associated with a module, these will run in addition to any passed in at runtime.\n", - "\n", - "## Next steps\n", - "\n", - "You've now learned how to pass callbacks at runtime.\n", - "\n", - "Next, check out the other how-to guides in this section, such as how to create your own [custom callback handlers](/docs/how_to/custom_callbacks)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/callbacks_serverless.ipynb b/docs/core_docs/docs/how_to/callbacks_serverless.ipynb index 90b4fdd2a77e..a2156577bcaf 100644 --- a/docs/core_docs/docs/how_to/callbacks_serverless.ipynb +++ b/docs/core_docs/docs/how_to/callbacks_serverless.ipynb @@ -1,132 +1,132 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to await callbacks in serverless environments\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Callbacks](/docs/concepts/#callbacks)\n", - "\n", - ":::\n", - "\n", - "As of `@langchain/core@0.3.0`, LangChain.js callbacks run in the background. This means that execution will **not** wait for the callback to either return before continuing. Prior to `0.3.0`, this behavior was the opposite.\n", - "\n", - "If you are running code in [serverless environments](https://en.wikipedia.org/wiki/Serverless_computing) such as [AWS Lambda](https://aws.amazon.com/pm/lambda/) or [Cloudflare Workers](https://workers.cloudflare.com/) you should set your callbacks to be blocking to allow them time to finish or timeout.\n", - "\n", - "To make callbacks blocking, set the `LANGCHAIN_CALLBACKS_BACKGROUND` environment variable to `\"false\"`. Alternatively, you can import the global [`awaitAllCallbacks`](https://api.js.langchain.com/functions/langchain_core.callbacks_promises.awaitAllCallbacks.html) method to ensure all callbacks finish if necessary.\n", - "\n", - "To illustrate this, we'll create a [custom callback handler](/docs/how_to/custom_callbacks) that takes some time to resolve, and show the timing with and without `LANGCHAIN_CALLBACKS_BACKGROUND` set to `\"false\"`. Here it is without the variable set along with the `awaitAllCallbacks` global:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Elapsed time: 1ms\n", - "Call finished\n", - "Final elapsed time: 2164ms\n" - ] - } - ], - "source": [ - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "import { awaitAllCallbacks } from \"@langchain/core/callbacks/promises\";\n", - "\n", - "const runnable = RunnableLambda.from(() => \"hello!\");\n", - "\n", - "const customHandler = {\n", - " handleChainEnd: async () => {\n", - " await new Promise((resolve) => setTimeout(resolve, 2000));\n", - " console.log(\"Call finished\");\n", - " },\n", - "};\n", - "\n", - "const startTime = new Date().getTime();\n", - "\n", - "await runnable.invoke({ number: \"2\" }, { callbacks: [customHandler] });\n", - "\n", - "console.log(`Elapsed time: ${new Date().getTime() - startTime}ms`);\n", - "\n", - "await awaitAllCallbacks();\n", - "\n", - "console.log(`Final elapsed time: ${new Date().getTime() - startTime}ms`);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see that the initial `runnable.invoke()` call finishes in a short amount of time, and then roughly two seconds later, the callbacks finish.\n", - "\n", - "And here it is with backgrounding off:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to await callbacks in serverless environments\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Callbacks](/docs/concepts/callbacks)\n", + "\n", + ":::\n", + "\n", + "As of `@langchain/core@0.3.0`, LangChain.js callbacks run in the background. This means that execution will **not** wait for the callback to either return before continuing. Prior to `0.3.0`, this behavior was the opposite.\n", + "\n", + "If you are running code in [serverless environments](https://en.wikipedia.org/wiki/Serverless_computing) such as [AWS Lambda](https://aws.amazon.com/pm/lambda/) or [Cloudflare Workers](https://workers.cloudflare.com/) you should set your callbacks to be blocking to allow them time to finish or timeout.\n", + "\n", + "To make callbacks blocking, set the `LANGCHAIN_CALLBACKS_BACKGROUND` environment variable to `\"false\"`. Alternatively, you can import the global [`awaitAllCallbacks`](https://api.js.langchain.com/functions/langchain_core.callbacks_promises.awaitAllCallbacks.html) method to ensure all callbacks finish if necessary.\n", + "\n", + "To illustrate this, we'll create a [custom callback handler](/docs/how_to/custom_callbacks) that takes some time to resolve, and show the timing with and without `LANGCHAIN_CALLBACKS_BACKGROUND` set to `\"false\"`. Here it is without the variable set along with the `awaitAllCallbacks` global:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Elapsed time: 1ms\n", + "Call finished\n", + "Final elapsed time: 2164ms\n" + ] + } + ], + "source": [ + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "import { awaitAllCallbacks } from \"@langchain/core/callbacks/promises\";\n", + "\n", + "const runnable = RunnableLambda.from(() => \"hello!\");\n", + "\n", + "const customHandler = {\n", + " handleChainEnd: async () => {\n", + " await new Promise((resolve) => setTimeout(resolve, 2000));\n", + " console.log(\"Call finished\");\n", + " },\n", + "};\n", + "\n", + "const startTime = new Date().getTime();\n", + "\n", + "await runnable.invoke({ number: \"2\" }, { callbacks: [customHandler] });\n", + "\n", + "console.log(`Elapsed time: ${new Date().getTime() - startTime}ms`);\n", + "\n", + "await awaitAllCallbacks();\n", + "\n", + "console.log(`Final elapsed time: ${new Date().getTime() - startTime}ms`);" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Call finished\n", - "Initial elapsed time: 2002ms\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that the initial `runnable.invoke()` call finishes in a short amount of time, and then roughly two seconds later, the callbacks finish.\n", + "\n", + "And here it is with backgrounding off:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Call finished\n", + "Initial elapsed time: 2002ms\n" + ] + } + ], + "source": [ + "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"false\";\n", + "\n", + "const startTimeBlocking = new Date().getTime();\n", + "\n", + "await runnable.invoke({ number: \"2\" }, { callbacks: [customHandler] });\n", + "\n", + "console.log(`Initial elapsed time: ${new Date().getTime() - startTimeBlocking}ms`);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This time, the initial call by itself takes two seconds because the `invoke()` call waits for the callback to return before returning.\n", + "\n", + "## Next steps\n", + "\n", + "You've now learned how to run callbacks in the background to reduce latency.\n", + "\n", + "Next, check out the other how-to guides in this section, such as [how to create custom callback handlers](/docs/how_to/custom_callbacks)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"false\";\n", - "\n", - "const startTimeBlocking = new Date().getTime();\n", - "\n", - "await runnable.invoke({ number: \"2\" }, { callbacks: [customHandler] });\n", - "\n", - "console.log(`Initial elapsed time: ${new Date().getTime() - startTimeBlocking}ms`);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This time, the initial call by itself takes two seconds because the `invoke()` call waits for the callback to return before returning.\n", - "\n", - "## Next steps\n", - "\n", - "You've now learned how to run callbacks in the background to reduce latency.\n", - "\n", - "Next, check out the other how-to guides in this section, such as [how to create custom callback handlers](/docs/how_to/custom_callbacks)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/cancel_execution.ipynb b/docs/core_docs/docs/how_to/cancel_execution.ipynb index 6f35150492ee..bfa2e0a1b25b 100644 --- a/docs/core_docs/docs/how_to/cancel_execution.ipynb +++ b/docs/core_docs/docs/how_to/cancel_execution.ipynb @@ -1,297 +1,297 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to cancel execution\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Expression Language](/docs/concepts/#langchain-expression-language)\n", - "- [Chains](/docs/how_to/sequence/)\n", - "- [Streaming](/docs/how_to/streaming/)\n", - "\n", - ":::\n", - "```\n", - "\n", - "When building longer-running chains or [LangGraph](https://langchain-ai.github.io/langgraphjs/) agents, you may want to interrupt execution in situations such as a user leaving your app or submitting a new query.\n", - "\n", - "[LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language) supports aborting runnables that are in-progress via a runtime [signal](https://developer.mozilla.org/en-US/docs/Web/API/AbortController/signal) option.\n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "\n", - "Built-in signal support requires `@langchain/core>=0.2.20`. Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", - "\n", - ":::\n", - "```\n", - "\n", - "**Note:** Individual integrations like chat models or retrievers may have missing or differing implementations for aborting execution. Signal support as described in this guide will apply in between steps of a chain.\n", - "\n", - "To see how this works, construct a chain such as the one below that performs [retrieval-augmented generation](/docs/tutorials/rag). It answers questions by first searching the web using [Tavily](/docs/integrations/retrievers/tavily), then passing the results to a chat model to generate a final answer:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const llm = new ChatAnthropic({\n", - " model: \"claude-3-5-sonnet-20240620\",\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import { TavilySearchAPIRetriever } from \"@langchain/community/retrievers/tavily_search_api\";\n", - "import type { Document } from \"@langchain/core/documents\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "\n", - "const formatDocsAsString = (docs: Document[]) => {\n", - " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\")\n", - "}\n", - "\n", - "const retriever = new TavilySearchAPIRetriever({\n", - " k: 3,\n", - "});\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`\n", - "Use the following context to answer questions to the best of your ability:\n", - "\n", - "\n", - "{context}\n", - "\n", - "\n", - "Question: {question}`)\n", - "\n", - "const chain = RunnableSequence.from([\n", - " {\n", - " context: retriever.pipe(formatDocsAsString),\n", - " question: new RunnablePassthrough(),\n", - " },\n", - " prompt,\n", - " llm,\n", - " new StringOutputParser(),\n", - "]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you invoke it normally, you can see it returns up-to-date information:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Based on the provided context, the current weather in San Francisco is:\n", - "\n", - "Temperature: 17.6°C (63.7°F)\n", - "Condition: Sunny\n", - "Wind: 14.4 km/h (8.9 mph) from WSW direction\n", - "Humidity: 74%\n", - "Cloud cover: 15%\n", - "\n", - "The information indicates it's a sunny day with mild temperatures and light winds. The data appears to be from August 2, 2024, at 17:00 local time.\n" - ] - } - ], - "source": [ - "await chain.invoke(\"what is the current weather in SF?\");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, let's interrupt it early. Initialize an [`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController) and pass its `signal` property into the chain execution. To illustrate the fact that the cancellation occurs as soon as possible, set a timeout of 100ms:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to cancel execution\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Expression Language](/docs/concepts/lcel)\n", + "- [Chains](/docs/how_to/sequence/)\n", + "- [Streaming](/docs/how_to/streaming/)\n", + "\n", + ":::\n", + "```\n", + "\n", + "When building longer-running chains or [LangGraph](https://langchain-ai.github.io/langgraphjs/) agents, you may want to interrupt execution in situations such as a user leaving your app or submitting a new query.\n", + "\n", + "[LangChain Expression Language (LCEL)](/docs/concepts/lcel) supports aborting runnables that are in-progress via a runtime [signal](https://developer.mozilla.org/en-US/docs/Web/API/AbortController/signal) option.\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "\n", + "Built-in signal support requires `@langchain/core>=0.2.20`. Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", + "\n", + ":::\n", + "```\n", + "\n", + "**Note:** Individual integrations like chat models or retrievers may have missing or differing implementations for aborting execution. Signal support as described in this guide will apply in between steps of a chain.\n", + "\n", + "To see how this works, construct a chain such as the one below that performs [retrieval-augmented generation](/docs/tutorials/rag). It answers questions by first searching the web using [Tavily](/docs/integrations/retrievers/tavily), then passing the results to a chat model to generate a final answer:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Error: Aborted\n", - " at EventTarget. (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/utils/signal.cjs:19:24)\n", - " at [nodejs.internal.kHybridDispatch] (node:internal/event_target:825:20)\n", - " at EventTarget.dispatchEvent (node:internal/event_target:760:26)\n", - " at abortSignal (node:internal/abort_controller:370:10)\n", - " at AbortController.abort (node:internal/abort_controller:392:5)\n", - " at Timeout._onTimeout (evalmachine.:7:29)\n", - " at listOnTimeout (node:internal/timers:573:17)\n", - " at process.processTimers (node:internal/timers:514:7)\n", - "timer1: 103.204ms\n" - ] - } - ], - "source": [ - "const controller = new AbortController();\n", - "\n", - "const startTimer = console.time(\"timer1\");\n", - "\n", - "setTimeout(() => controller.abort(), 100);\n", - "\n", - "try {\n", - " await chain.invoke(\"what is the current weather in SF?\", {\n", - " signal: controller.signal,\n", - " });\n", - "} catch (e) {\n", - " console.log(e);\n", - "}\n", - "\n", - "console.timeEnd(\"timer1\");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And you can see that execution ends after just over 100ms. Looking at [this LangSmith trace](https://smith.langchain.com/public/63c04c3b-2683-4b73-a4f7-fb12f5cb9180/r), you can see that the model is never called.\n", - "\n", - "## Streaming\n", - "\n", - "You can pass a `signal` when streaming too. This gives you more control over using a `break` statement within the `for await... of` loop to cancel the current run, which will only trigger after final output has already started streaming. The below example uses a `break` statement - note the time elapsed before cancellation occurs:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const llm = new ChatAnthropic({\n", + " model: \"claude-3-5-sonnet-20240620\",\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "chunk \n", - "timer2: 3.990s\n" - ] - } - ], - "source": [ - "const startTimer2 = console.time(\"timer2\");\n", - "\n", - "const stream = await chain.stream(\"what is the current weather in SF?\");\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(\"chunk\", chunk);\n", - " break;\n", - "}\n", - "\n", - "console.timeEnd(\"timer2\");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now compare this to using a signal. Note that you will need to wrap the stream in a `try/catch` block:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import { TavilySearchAPIRetriever } from \"@langchain/community/retrievers/tavily_search_api\";\n", + "import type { Document } from \"@langchain/core/documents\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "\n", + "const formatDocsAsString = (docs: Document[]) => {\n", + " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\")\n", + "}\n", + "\n", + "const retriever = new TavilySearchAPIRetriever({\n", + " k: 3,\n", + "});\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`\n", + "Use the following context to answer questions to the best of your ability:\n", + "\n", + "\n", + "{context}\n", + "\n", + "\n", + "Question: {question}`)\n", + "\n", + "const chain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocsAsString),\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser(),\n", + "]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you invoke it normally, you can see it returns up-to-date information:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Based on the provided context, the current weather in San Francisco is:\n", + "\n", + "Temperature: 17.6°C (63.7°F)\n", + "Condition: Sunny\n", + "Wind: 14.4 km/h (8.9 mph) from WSW direction\n", + "Humidity: 74%\n", + "Cloud cover: 15%\n", + "\n", + "The information indicates it's a sunny day with mild temperatures and light winds. The data appears to be from August 2, 2024, at 17:00 local time.\n" + ] + } + ], + "source": [ + "await chain.invoke(\"what is the current weather in SF?\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's interrupt it early. Initialize an [`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController) and pass its `signal` property into the chain execution. To illustrate the fact that the cancellation occurs as soon as possible, set a timeout of 100ms:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Error: Aborted\n", + " at EventTarget. (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/utils/signal.cjs:19:24)\n", + " at [nodejs.internal.kHybridDispatch] (node:internal/event_target:825:20)\n", + " at EventTarget.dispatchEvent (node:internal/event_target:760:26)\n", + " at abortSignal (node:internal/abort_controller:370:10)\n", + " at AbortController.abort (node:internal/abort_controller:392:5)\n", + " at Timeout._onTimeout (evalmachine.:7:29)\n", + " at listOnTimeout (node:internal/timers:573:17)\n", + " at process.processTimers (node:internal/timers:514:7)\n", + "timer1: 103.204ms\n" + ] + } + ], + "source": [ + "const controller = new AbortController();\n", + "\n", + "const startTimer = console.time(\"timer1\");\n", + "\n", + "setTimeout(() => controller.abort(), 100);\n", + "\n", + "try {\n", + " await chain.invoke(\"what is the current weather in SF?\", {\n", + " signal: controller.signal,\n", + " });\n", + "} catch (e) {\n", + " console.log(e);\n", + "}\n", + "\n", + "console.timeEnd(\"timer1\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And you can see that execution ends after just over 100ms. Looking at [this LangSmith trace](https://smith.langchain.com/public/63c04c3b-2683-4b73-a4f7-fb12f5cb9180/r), you can see that the model is never called.\n", + "\n", + "## Streaming\n", + "\n", + "You can pass a `signal` when streaming too. This gives you more control over using a `break` statement within the `for await... of` loop to cancel the current run, which will only trigger after final output has already started streaming. The below example uses a `break` statement - note the time elapsed before cancellation occurs:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Error: Aborted\n", - " at EventTarget. (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/utils/signal.cjs:19:24)\n", - " at [nodejs.internal.kHybridDispatch] (node:internal/event_target:825:20)\n", - " at EventTarget.dispatchEvent (node:internal/event_target:760:26)\n", - " at abortSignal (node:internal/abort_controller:370:10)\n", - " at AbortController.abort (node:internal/abort_controller:392:5)\n", - " at Timeout._onTimeout (evalmachine.:7:38)\n", - " at listOnTimeout (node:internal/timers:573:17)\n", - " at process.processTimers (node:internal/timers:514:7)\n", - "timer3: 100.684ms\n" - ] + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "chunk \n", + "timer2: 3.990s\n" + ] + } + ], + "source": [ + "const startTimer2 = console.time(\"timer2\");\n", + "\n", + "const stream = await chain.stream(\"what is the current weather in SF?\");\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(\"chunk\", chunk);\n", + " break;\n", + "}\n", + "\n", + "console.timeEnd(\"timer2\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now compare this to using a signal. Note that you will need to wrap the stream in a `try/catch` block:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Error: Aborted\n", + " at EventTarget. (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/utils/signal.cjs:19:24)\n", + " at [nodejs.internal.kHybridDispatch] (node:internal/event_target:825:20)\n", + " at EventTarget.dispatchEvent (node:internal/event_target:760:26)\n", + " at abortSignal (node:internal/abort_controller:370:10)\n", + " at AbortController.abort (node:internal/abort_controller:392:5)\n", + " at Timeout._onTimeout (evalmachine.:7:38)\n", + " at listOnTimeout (node:internal/timers:573:17)\n", + " at process.processTimers (node:internal/timers:514:7)\n", + "timer3: 100.684ms\n" + ] + } + ], + "source": [ + "const controllerForStream = new AbortController();\n", + "\n", + "const startTimer3 = console.time(\"timer3\");\n", + "\n", + "setTimeout(() => controllerForStream.abort(), 100);\n", + "\n", + "try {\n", + " const streamWithSignal = await chain.stream(\"what is the current weather in SF?\", {\n", + " signal: controllerForStream.signal\n", + " });\n", + " for await (const chunk of streamWithSignal) {\n", + " console.log(chunk);\n", + " break;\n", + " } \n", + "} catch (e) {\n", + " console.log(e); \n", + "}\n", + "\n", + "console.timeEnd(\"timer3\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "- [Pass through arguments from one step to the next](/docs/how_to/passthrough)\n", + "- [Dispatching custom events](/docs/how_to/callbacks_custom_events)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const controllerForStream = new AbortController();\n", - "\n", - "const startTimer3 = console.time(\"timer3\");\n", - "\n", - "setTimeout(() => controllerForStream.abort(), 100);\n", - "\n", - "try {\n", - " const streamWithSignal = await chain.stream(\"what is the current weather in SF?\", {\n", - " signal: controllerForStream.signal\n", - " });\n", - " for await (const chunk of streamWithSignal) {\n", - " console.log(chunk);\n", - " break;\n", - " } \n", - "} catch (e) {\n", - " console.log(e); \n", - "}\n", - "\n", - "console.timeEnd(\"timer3\");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "- [Pass through arguments from one step to the next](/docs/how_to/passthrough)\n", - "- [Dispatching custom events](/docs/how_to/callbacks_custom_events)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/character_text_splitter.ipynb b/docs/core_docs/docs/how_to/character_text_splitter.ipynb index f956da4b251a..e5085e86a690 100644 --- a/docs/core_docs/docs/how_to/character_text_splitter.ipynb +++ b/docs/core_docs/docs/how_to/character_text_splitter.ipynb @@ -1,158 +1,158 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "c3ee8d00", - "metadata": {}, - "source": [ - "# How to split by character\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Text splitters](/docs/concepts#text-splitters)\n", - "\n", - ":::\n", - "\n", - "This is the simplest method for splitting text. This splits based on a given character sequence, which defaults to `\"\\n\\n\"`. Chunk length is measured by number of characters.\n", - "\n", - "1. How the text is split: by single character separator.\n", - "2. How the chunk size is measured: by number of characters.\n", - "\n", - "To obtain the string content directly, use `.splitText()`.\n", - "\n", - "To create LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments()`." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "313fb032", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: \"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"... 839 more characters,\n", - " metadata: { loc: { lines: { from: 1, to: 17 } } }\n", - "}\n" - ] - } - ], - "source": [ - "import { CharacterTextSplitter } from \"@langchain/textsplitters\";\n", - "import * as fs from \"node:fs\";\n", - "\n", - "// Load an example document\n", - "const rawData = await fs.readFileSync(\"../../../../examples/state_of_the_union.txt\");\n", - "const stateOfTheUnion = rawData.toString();\n", - "\n", - "const textSplitter = new CharacterTextSplitter({\n", - " separator: \"\\n\\n\",\n", - " chunkSize: 1000,\n", - " chunkOverlap: 200,\n", - "});\n", - "const texts = await textSplitter.createDocuments([stateOfTheUnion]);\n", - "console.log(texts[0])" - ] - }, - { - "cell_type": "markdown", - "id": "dadcb9d6", - "metadata": {}, - "source": [ - "You can also propagate metadata associated with each document to the output chunks:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "1affda60", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c3ee8d00", + "metadata": {}, + "source": [ + "# How to split by character\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Text splitters](/docs/concepts/text_splitters)\n", + "\n", + ":::\n", + "\n", + "This is the simplest method for splitting text. This splits based on a given character sequence, which defaults to `\"\\n\\n\"`. Chunk length is measured by number of characters.\n", + "\n", + "1. How the text is split: by single character separator.\n", + "2. How the chunk size is measured: by number of characters.\n", + "\n", + "To obtain the string content directly, use `.splitText()`.\n", + "\n", + "To create LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments()`." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: \"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"... 839 more characters,\n", - " metadata: { document: 1, loc: { lines: { from: 1, to: 17 } } }\n", - "}\n" - ] - } - ], - "source": [ - "const metadatas = [{ document: 1 }, { document: 2 }];\n", - "\n", - "const documents = await textSplitter.createDocuments(\n", - " [stateOfTheUnion, stateOfTheUnion], metadatas\n", - ")\n", - "\n", - "console.log(documents[0])" - ] - }, - { - "cell_type": "markdown", - "id": "ee080e12-6f44-4311-b1ef-302520a41d66", - "metadata": {}, - "source": [ - "To obtain the string content directly, use `.splitText()`:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "2a830a9f", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "313fb032", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: \"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"... 839 more characters,\n", + " metadata: { loc: { lines: { from: 1, to: 17 } } }\n", + "}\n" + ] + } + ], + "source": [ + "import { CharacterTextSplitter } from \"@langchain/textsplitters\";\n", + "import * as fs from \"node:fs\";\n", + "\n", + "// Load an example document\n", + "const rawData = await fs.readFileSync(\"../../../../examples/state_of_the_union.txt\");\n", + "const stateOfTheUnion = rawData.toString();\n", + "\n", + "const textSplitter = new CharacterTextSplitter({\n", + " separator: \"\\n\\n\",\n", + " chunkSize: 1000,\n", + " chunkOverlap: 200,\n", + "});\n", + "const texts = await textSplitter.createDocuments([stateOfTheUnion]);\n", + "console.log(texts[0])" + ] + }, { - "data": { - "text/plain": [ - "\u001b[32m\"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"\u001b[39m... 839 more characters" + "cell_type": "markdown", + "id": "dadcb9d6", + "metadata": {}, + "source": [ + "You can also propagate metadata associated with each document to the output chunks:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "1affda60", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: \"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"... 839 more characters,\n", + " metadata: { document: 1, loc: { lines: { from: 1, to: 17 } } }\n", + "}\n" + ] + } + ], + "source": [ + "const metadatas = [{ document: 1 }, { document: 2 }];\n", + "\n", + "const documents = await textSplitter.createDocuments(\n", + " [stateOfTheUnion, stateOfTheUnion], metadatas\n", + ")\n", + "\n", + "console.log(documents[0])" + ] + }, + { + "cell_type": "markdown", + "id": "ee080e12-6f44-4311-b1ef-302520a41d66", + "metadata": {}, + "source": [ + "To obtain the string content directly, use `.splitText()`:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "2a830a9f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and th\"\u001b[39m... 839 more characters" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const chunks = await textSplitter.splitText(stateOfTheUnion);\n", + "\n", + "chunks[0];" + ] + }, + { + "cell_type": "markdown", + "id": "cd4dd67a", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned a method for splitting text by character.\n", + "\n", + "Next, check out a [more advanced way of splitting by character](/docs/how_to/recursive_text_splitter), or the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag)." ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "const chunks = await textSplitter.splitText(stateOfTheUnion);\n", - "\n", - "chunks[0];" - ] - }, - { - "cell_type": "markdown", - "id": "cd4dd67a", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned a method for splitting text by character.\n", - "\n", - "Next, check out a [more advanced way of splitting by character](/docs/how_to/recursive_text_splitter), or the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/chat_model_caching.mdx b/docs/core_docs/docs/how_to/chat_model_caching.mdx index 9c0c800e4ced..f8a8ad3caa87 100644 --- a/docs/core_docs/docs/how_to/chat_model_caching.mdx +++ b/docs/core_docs/docs/how_to/chat_model_caching.mdx @@ -8,8 +8,8 @@ sidebar_position: 3 This guide assumes familiarity with the following concepts: -- [Chat models](/docs/concepts/#chat-models) -- [LLMs](/docs/concepts/#llms) +- [Chat models](/docs/concepts/chat_models) +- [LLMs](/docs/concepts/text_llms) ::: diff --git a/docs/core_docs/docs/how_to/chat_models_universal_init.mdx b/docs/core_docs/docs/how_to/chat_models_universal_init.mdx index 46010ff67f61..f0cff1c86fa9 100644 --- a/docs/core_docs/docs/how_to/chat_models_universal_init.mdx +++ b/docs/core_docs/docs/how_to/chat_models_universal_init.mdx @@ -11,11 +11,11 @@ Keep in mind this feature is only for chat models. This guide assumes familiarity with the following concepts: -- [Chat models](/docs/concepts/#chat-models) +- [Chat models](/docs/concepts/chat_models) -- [LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language) +- [LangChain Expression Language (LCEL)](/docs/concepts/lcel) -- [Tool calling](/docs/concepts#tools) +- [Tool calling](/docs/concepts/tools) ::: diff --git a/docs/core_docs/docs/how_to/chat_token_usage_tracking.mdx b/docs/core_docs/docs/how_to/chat_token_usage_tracking.mdx index 644f3c2f5c0d..0ff0c2885fda 100644 --- a/docs/core_docs/docs/how_to/chat_token_usage_tracking.mdx +++ b/docs/core_docs/docs/how_to/chat_token_usage_tracking.mdx @@ -8,7 +8,7 @@ sidebar_position: 5 This guide assumes familiarity with the following concepts: -- [Chat models](/docs/concepts/#chat-models) +- [Chat models](/docs/concepts/chat_models) ::: diff --git a/docs/core_docs/docs/how_to/chatbots_tools.ipynb b/docs/core_docs/docs/how_to/chatbots_tools.ipynb index d9f8ff25e52f..f21b4efd1127 100644 --- a/docs/core_docs/docs/how_to/chatbots_tools.ipynb +++ b/docs/core_docs/docs/how_to/chatbots_tools.ipynb @@ -1,511 +1,511 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to add tools to chatbots\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chatbots](/docs/concepts/#messages)\n", - "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/)\n", - "- [Chat history](/docs/concepts/#chat-history)\n", - "\n", - ":::\n", - "\n", - "This section will cover how to create conversational agents: chatbots that can interact with other systems and APIs using tools.\n", - "\n", - ":::note\n", - "\n", - "This how-to guide previously built a chatbot using [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/chatbots_tools/).\n", - "\n", - "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", - "\n", - ":::\n", - "\n", - "## Setup\n", - "\n", - "For this guide, we'll be using a [tool calling agent](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#tool-calling-agent) with a single tool for searching the web. The default will be powered by [Tavily](/docs/integrations/tools/tavily_search), but you can switch it out for any similar tool. The rest of this section will assume you're using Tavily.\n", - "\n", - "You'll need to [sign up for an account](https://tavily.com/) on the Tavily website, and install the following packages:\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - " @langchain/core @langchain/langgraph @langchain/community\n", - "\n", - "```\n", - "\n", - "Let’s also set up a chat model that we’ll use for the below examples.\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```\n", - "\n", - "```typescript\n", - "process.env.TAVILY_API_KEY = \"YOUR_API_KEY\";\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Creating an agent\n", - "\n", - "Our end goal is to create an agent that can respond conversationally to user questions while looking up information as needed.\n", - "\n", - "First, let's initialize Tavily and an OpenAI chat model capable of tool calling:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\";\n", - "\n", - "const tools = [\n", - " new TavilySearchResults({\n", - " maxResults: 1,\n", - " }),\n", - "];" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To make our agent conversational, we can also specify a prompt. Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import {\n", - " ChatPromptTemplate,\n", - "} from \"@langchain/core/prompts\";\n", - "\n", - "// Adapted from https://smith.langchain.com/hub/jacob/tool-calling-agent\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant. You may not need to use tools for every query - the user may just want to chat!\",\n", - " ],\n", - "]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Great! Now let's assemble our agent using LangGraph's prebuilt [createReactAgent](https://langchain-ai.github.io/langgraphjs/reference/functions/langgraph_prebuilt.createReactAgent.html), which allows you to create a [tool-calling agent](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#tool-calling-agent):" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", - "\n", - "// messageModifier allows you to preprocess the inputs to the model inside ReAct agent\n", - "// in this case, since we're passing a prompt string, we'll just always add a SystemMessage\n", - "// with this prompt string before any other messages sent to the model\n", - "const agent = createReactAgent({ llm, tools, messageModifier: prompt })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running the agent\n", - "\n", - "Now that we've set up our agent, let's try interacting with it! It can handle both trivial queries that require no lookup:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " \"id\": \"8c5fa465-e8d8-472a-9434-f574bf74537f\",\n", - " \"content\": \"I'm Nemo!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABTKLLriRcZin65zLAMB3WUf9Sg1t\",\n", - " \"content\": \"How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 93,\n", - " \"totalTokens\": 101\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_3537616b13\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 93,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 101\n", - " }\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "await agent.invoke({ messages: [{ role: \"user\", content: \"I'm Nemo!\" }]})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Or, it can use of the passed search tool to get up to date information if needed:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to add tools to chatbots\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chatbots](/docs/concepts/messages)\n", + "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/)\n", + "- [Chat history](/docs/concepts/chat_history)\n", + "\n", + ":::\n", + "\n", + "This section will cover how to create conversational agents: chatbots that can interact with other systems and APIs using tools.\n", + "\n", + ":::note\n", + "\n", + "This how-to guide previously built a chatbot using [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/chatbots_tools/).\n", + "\n", + "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", + "\n", + ":::\n", + "\n", + "## Setup\n", + "\n", + "For this guide, we'll be using a [tool calling agent](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#tool-calling-agent) with a single tool for searching the web. The default will be powered by [Tavily](/docs/integrations/tools/tavily_search), but you can switch it out for any similar tool. The rest of this section will assume you're using Tavily.\n", + "\n", + "You'll need to [sign up for an account](https://tavily.com/) on the Tavily website, and install the following packages:\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + " @langchain/core @langchain/langgraph @langchain/community\n", + "\n", + "```\n", + "\n", + "Let’s also set up a chat model that we’ll use for the below examples.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```\n", + "\n", + "```typescript\n", + "process.env.TAVILY_API_KEY = \"YOUR_API_KEY\";\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " \"id\": \"65c315b6-2433-4cb1-97c7-b60b5546f518\",\n", - " \"content\": \"What is the current conservation status of the Great Barrier Reef?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABTKLQn1e4axRhqIhpKMyzWWTGauO\",\n", - " \"content\": \"How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 93,\n", - " \"totalTokens\": 101\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_3537616b13\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 93,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 101\n", - " }\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "await agent.invoke({ messages: [{ role: \"user\", content: \"What is the current conservation status of the Great Barrier Reef?\" }]})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Conversational responses\n", - "\n", - "Because our prompt contains a placeholder for chat history messages, our agent can also take previous interactions into account and respond conversationally like a standard chatbot:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating an agent\n", + "\n", + "Our end goal is to create an agent that can respond conversationally to user questions while looking up information as needed.\n", + "\n", + "First, let's initialize Tavily and an OpenAI chat model capable of tool calling:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " \"id\": \"6433afc5-31bd-44b3-b34c-f11647e1677d\",\n", - " \"content\": \"I'm Nemo!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " HumanMessage {\n", - " \"id\": \"f163b5f1-ea29-4d7a-9965-7c7c563d9cea\",\n", - " \"content\": \"Hello Nemo! How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " HumanMessage {\n", - " \"id\": \"382c3354-d02b-4888-98d8-44d75d045044\",\n", - " \"content\": \"What is my name?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABTKMKu7ThZDZW09yMIPTq2N723Cj\",\n", - " \"content\": \"How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 93,\n", - " \"totalTokens\": 101\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 93,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 101\n", - " }\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "await agent.invoke({\n", - " messages: [\n", - " { role: \"user\", content: \"I'm Nemo!\" },\n", - " { role: \"user\", content: \"Hello Nemo! How can I assist you today?\" },\n", - " { role: \"user\", content: \"What is my name?\" }\n", - " ]\n", - "})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If preferred, you can also add memory to the LangGraph agent to manage the history of messages. Let's redeclare it this way:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "import { MemorySaver } from \"@langchain/langgraph\"\n", - "\n", - "// highlight-start\n", - "const memory = new MemorySaver()\n", - "const agent2 = createReactAgent({ llm, tools, messageModifier: prompt, checkpointSaver: memory })\n", - "// highlight-end" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " \"id\": \"a4a4f663-8192-4179-afcc-88d9d186aa80\",\n", - " \"content\": \"I'm Nemo!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABTKi4tBzOWMh3hgA46xXo7bJzb8r\",\n", - " \"content\": \"How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 93,\n", - " \"totalTokens\": 101\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 93,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 101\n", - " }\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "await agent2.invoke({ messages: [{ role: \"user\", content: \"I'm Nemo!\" }]}, { configurable: { thread_id: \"1\" } })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And then if we rerun our wrapped agent executor:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\";\n", + "\n", + "const tools = [\n", + " new TavilySearchResults({\n", + " maxResults: 1,\n", + " }),\n", + "];" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To make our agent conversational, we can also specify a prompt. Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " ChatPromptTemplate,\n", + "} from \"@langchain/core/prompts\";\n", + "\n", + "// Adapted from https://smith.langchain.com/hub/jacob/tool-calling-agent\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant. You may not need to use tools for every query - the user may just want to chat!\",\n", + " ],\n", + "]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Great! Now let's assemble our agent using LangGraph's prebuilt [createReactAgent](https://langchain-ai.github.io/langgraphjs/reference/functions/langgraph_prebuilt.createReactAgent.html), which allows you to create a [tool-calling agent](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#tool-calling-agent):" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", + "\n", + "// messageModifier allows you to preprocess the inputs to the model inside ReAct agent\n", + "// in this case, since we're passing a prompt string, we'll just always add a SystemMessage\n", + "// with this prompt string before any other messages sent to the model\n", + "const agent = createReactAgent({ llm, tools, messageModifier: prompt })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running the agent\n", + "\n", + "Now that we've set up our agent, let's try interacting with it! It can handle both trivial queries that require no lookup:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"8c5fa465-e8d8-472a-9434-f574bf74537f\",\n", + " \"content\": \"I'm Nemo!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKLLriRcZin65zLAMB3WUf9Sg1t\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_3537616b13\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "await agent.invoke({ messages: [{ role: \"user\", content: \"I'm Nemo!\" }]})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Or, it can use of the passed search tool to get up to date information if needed:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"65c315b6-2433-4cb1-97c7-b60b5546f518\",\n", + " \"content\": \"What is the current conservation status of the Great Barrier Reef?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKLQn1e4axRhqIhpKMyzWWTGauO\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_3537616b13\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "await agent.invoke({ messages: [{ role: \"user\", content: \"What is the current conservation status of the Great Barrier Reef?\" }]})" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " \"id\": \"c5fd303c-eb49-41a0-868e-bc8c5aa02cf6\",\n", - " \"content\": \"I'm Nemo!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABTKi4tBzOWMh3hgA46xXo7bJzb8r\",\n", - " \"content\": \"How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 93,\n", - " \"totalTokens\": 101\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " },\n", - " HumanMessage {\n", - " \"id\": \"635b17b9-2ec7-412f-bf45-85d0e9944430\",\n", - " \"content\": \"What is my name?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABTKjBbmFlPb5t37aJ8p4NtoHb8YG\",\n", - " \"content\": \"How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 93,\n", - " \"totalTokens\": 101\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 93,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 101\n", - " }\n", - " }\n", - " ]\n", - "}\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conversational responses\n", + "\n", + "Because our prompt contains a placeholder for chat history messages, our agent can also take previous interactions into account and respond conversationally like a standard chatbot:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"6433afc5-31bd-44b3-b34c-f11647e1677d\",\n", + " \"content\": \"I'm Nemo!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"f163b5f1-ea29-4d7a-9965-7c7c563d9cea\",\n", + " \"content\": \"Hello Nemo! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"382c3354-d02b-4888-98d8-44d75d045044\",\n", + " \"content\": \"What is my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKMKu7ThZDZW09yMIPTq2N723Cj\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "await agent.invoke({\n", + " messages: [\n", + " { role: \"user\", content: \"I'm Nemo!\" },\n", + " { role: \"user\", content: \"Hello Nemo! How can I assist you today?\" },\n", + " { role: \"user\", content: \"What is my name?\" }\n", + " ]\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If preferred, you can also add memory to the LangGraph agent to manage the history of messages. Let's redeclare it this way:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "import { MemorySaver } from \"@langchain/langgraph\"\n", + "\n", + "// highlight-start\n", + "const memory = new MemorySaver()\n", + "const agent2 = createReactAgent({ llm, tools, messageModifier: prompt, checkpointSaver: memory })\n", + "// highlight-end" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"a4a4f663-8192-4179-afcc-88d9d186aa80\",\n", + " \"content\": \"I'm Nemo!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKi4tBzOWMh3hgA46xXo7bJzb8r\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "await agent2.invoke({ messages: [{ role: \"user\", content: \"I'm Nemo!\" }]}, { configurable: { thread_id: \"1\" } })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And then if we rerun our wrapped agent executor:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"c5fd303c-eb49-41a0-868e-bc8c5aa02cf6\",\n", + " \"content\": \"I'm Nemo!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKi4tBzOWMh3hgA46xXo7bJzb8r\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"635b17b9-2ec7-412f-bf45-85d0e9944430\",\n", + " \"content\": \"What is my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTKjBbmFlPb5t37aJ8p4NtoHb8YG\",\n", + " \"content\": \"How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 101\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 101\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "await agent2.invoke({ messages: [{ role: \"user\", content: \"What is my name?\" }]}, { configurable: { thread_id: \"1\" } })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This [LangSmith trace](https://smith.langchain.com/public/16cbcfa5-5ef1-4d4c-92c9-538a6e71f23d/r) shows what's going on under the hood.\n", + "\n", + "## Further reading\n", + "\n", + "For more on how to build agents, check these [LangGraph](https://langchain-ai.github.io/langgraphjs/) guides:\n", + "\n", + "* [agents conceptual guide](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/)\n", + "* [agents tutorials](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/multi_agent_collaboration/)\n", + "* [createReactAgent](https://langchain-ai.github.io/langgraphjs/how-tos/create-react-agent/)\n", + "\n", + "For more on tool usage, you can also check out [this use case section](/docs/how_to#tools)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "await agent2.invoke({ messages: [{ role: \"user\", content: \"What is my name?\" }]}, { configurable: { thread_id: \"1\" } })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This [LangSmith trace](https://smith.langchain.com/public/16cbcfa5-5ef1-4d4c-92c9-538a6e71f23d/r) shows what's going on under the hood.\n", - "\n", - "## Further reading\n", - "\n", - "For more on how to build agents, check these [LangGraph](https://langchain-ai.github.io/langgraphjs/) guides:\n", - "\n", - "* [agents conceptual guide](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/)\n", - "* [agents tutorials](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/multi_agent_collaboration/)\n", - "* [createReactAgent](https://langchain-ai.github.io/langgraphjs/how-tos/create-react-agent/)\n", - "\n", - "For more on tool usage, you can also check out [this use case section](/docs/how_to#tools)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/code_splitter.ipynb b/docs/core_docs/docs/how_to/code_splitter.ipynb index 28726f2650ad..1bb91cb34baa 100644 --- a/docs/core_docs/docs/how_to/code_splitter.ipynb +++ b/docs/core_docs/docs/how_to/code_splitter.ipynb @@ -1,699 +1,699 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "44b9976d", - "metadata": {}, - "source": [ - "# How to split code\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Text splitters](/docs/concepts#text-splitters)\n", - "- [Recursively splitting text by character](/docs/how_to/recursive_text_splitter)\n", - "\n", - ":::\n", - "\n", - "\n", - "[RecursiveCharacterTextSplitter](https://api.js.langchain.com/classes/langchain_textsplitters.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for splitting text in a specific programming language.\n", - "\n", - "Supported languages include:\n", - "\n", - "```\n", - "\"html\" | \"cpp\" | \"go\" | \"java\" | \"js\" | \"php\" | \"proto\" | \"python\" | \"rst\" | \"ruby\" | \"rust\" | \"scala\" | \"swift\" | \"markdown\" | \"latex\" | \"sol\"\n", - "```\n", - "\n", - "To view the list of separators for a given language, pass one of the values from the list above into the `getSeparatorsForLanguage()` static method" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "c92fb913", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "[\n", - " \u001b[32m\"\\nfunction \"\u001b[39m, \u001b[32m\"\\nconst \"\u001b[39m,\n", - " \u001b[32m\"\\nlet \"\u001b[39m, \u001b[32m\"\\nvar \"\u001b[39m,\n", - " \u001b[32m\"\\nclass \"\u001b[39m, \u001b[32m\"\\nif \"\u001b[39m,\n", - " \u001b[32m\"\\nfor \"\u001b[39m, \u001b[32m\"\\nwhile \"\u001b[39m,\n", - " \u001b[32m\"\\nswitch \"\u001b[39m, \u001b[32m\"\\ncase \"\u001b[39m,\n", - " \u001b[32m\"\\ndefault \"\u001b[39m, \u001b[32m\"\\n\\n\"\u001b[39m,\n", - " \u001b[32m\"\\n\"\u001b[39m, \u001b[32m\" \"\u001b[39m,\n", - " \u001b[32m\"\"\u001b[39m\n", - "]" + "cell_type": "markdown", + "id": "44b9976d", + "metadata": {}, + "source": [ + "# How to split code\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Text splitters](/docs/concepts/text_splitters)\n", + "- [Recursively splitting text by character](/docs/how_to/recursive_text_splitter)\n", + "\n", + ":::\n", + "\n", + "\n", + "[RecursiveCharacterTextSplitter](https://api.js.langchain.com/classes/langchain_textsplitters.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for splitting text in a specific programming language.\n", + "\n", + "Supported languages include:\n", + "\n", + "```\n", + "\"html\" | \"cpp\" | \"go\" | \"java\" | \"js\" | \"php\" | \"proto\" | \"python\" | \"rst\" | \"ruby\" | \"rust\" | \"scala\" | \"swift\" | \"markdown\" | \"latex\" | \"sol\"\n", + "```\n", + "\n", + "To view the list of separators for a given language, pass one of the values from the list above into the `getSeparatorsForLanguage()` static method" ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import {\n", - " RecursiveCharacterTextSplitter,\n", - "} from \"@langchain/textsplitters\";\n", - "\n", - "RecursiveCharacterTextSplitter.getSeparatorsForLanguage(\"js\");" - ] - }, - { - "cell_type": "markdown", - "id": "354f60a5", - "metadata": {}, - "source": [ - "## JS\n", - "Here's an example using the JS text splitter:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "7db0d486", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m'function helloWorld() {\\n console.log(\"Hello, World!\");\\n}'\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m4\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"// Call the function\\nhelloWorld();\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m6\u001b[39m, to: \u001b[33m7\u001b[39m } } }\n", - " }\n", - "]" + "cell_type": "code", + "execution_count": 1, + "id": "c92fb913", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " \u001b[32m\"\\nfunction \"\u001b[39m, \u001b[32m\"\\nconst \"\u001b[39m,\n", + " \u001b[32m\"\\nlet \"\u001b[39m, \u001b[32m\"\\nvar \"\u001b[39m,\n", + " \u001b[32m\"\\nclass \"\u001b[39m, \u001b[32m\"\\nif \"\u001b[39m,\n", + " \u001b[32m\"\\nfor \"\u001b[39m, \u001b[32m\"\\nwhile \"\u001b[39m,\n", + " \u001b[32m\"\\nswitch \"\u001b[39m, \u001b[32m\"\\ncase \"\u001b[39m,\n", + " \u001b[32m\"\\ndefault \"\u001b[39m, \u001b[32m\"\\n\\n\"\u001b[39m,\n", + " \u001b[32m\"\\n\"\u001b[39m, \u001b[32m\" \"\u001b[39m,\n", + " \u001b[32m\"\"\u001b[39m\n", + "]" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import {\n", + " RecursiveCharacterTextSplitter,\n", + "} from \"@langchain/textsplitters\";\n", + "\n", + "RecursiveCharacterTextSplitter.getSeparatorsForLanguage(\"js\");" ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const JS_CODE = `\n", - "function helloWorld() {\n", - " console.log(\"Hello, World!\");\n", - "}\n", - "\n", - "// Call the function\n", - "helloWorld();\n", - "`\n", - "\n", - "const jsSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", - " \"js\", {\n", - " chunkSize: 60,\n", - " chunkOverlap: 0,\n", - " }\n", - ")\n", - "const jsDocs = await jsSplitter.createDocuments([JS_CODE]);\n", - "\n", - "jsDocs" - ] - }, - { - "cell_type": "markdown", - "id": "dcb8931b", - "metadata": {}, - "source": [ - "## Python\n", - "\n", - "Here's an example for Python:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "a58512b9", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m'def hello_world():\\n print(\"Hello, World!\")'\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m3\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"# Call the function\\nhello_world()\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m5\u001b[39m, to: \u001b[33m6\u001b[39m } } }\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "354f60a5", + "metadata": {}, + "source": [ + "## JS\n", + "Here's an example using the JS text splitter:" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const PYTHON_CODE = `\n", - "def hello_world():\n", - " print(\"Hello, World!\")\n", - "\n", - "# Call the function\n", - "hello_world()\n", - "`\n", - "\n", - "const pythonSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", - " \"python\", {\n", - " chunkSize: 50,\n", - " chunkOverlap: 0,\n", - " }\n", - ")\n", - "const pythonDocs = await pythonSplitter.createDocuments([PYTHON_CODE])\n", - "pythonDocs" - ] - }, - { - "cell_type": "markdown", - "id": "ee2361f8", - "metadata": {}, - "source": [ - "## Markdown\n", - "\n", - "Here's an example of splitting on markdown separators:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "ac9295d3", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"# 🦜️🔗 LangChain\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m2\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"⚡ Building applications with LLMs through composability ⚡\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m4\u001b[39m, to: \u001b[33m4\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"## Quick Install\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m6\u001b[39m, to: \u001b[33m6\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"```bash\\n# Hopefully this code block isn't split\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m8\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"pip install langchain\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m10\u001b[39m, to: \u001b[33m10\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"```\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m11\u001b[39m, to: \u001b[33m11\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"As an open-source project in a rapidly developing field, we\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m13\u001b[39m, to: \u001b[33m13\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"are extremely open to contributions.\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m13\u001b[39m, to: \u001b[33m13\u001b[39m } } }\n", - " }\n", - "]" + "cell_type": "code", + "execution_count": 2, + "id": "7db0d486", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m'function helloWorld() {\\n console.log(\"Hello, World!\");\\n}'\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m4\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"// Call the function\\nhelloWorld();\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m6\u001b[39m, to: \u001b[33m7\u001b[39m } } }\n", + " }\n", + "]" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const JS_CODE = `\n", + "function helloWorld() {\n", + " console.log(\"Hello, World!\");\n", + "}\n", + "\n", + "// Call the function\n", + "helloWorld();\n", + "`\n", + "\n", + "const jsSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", + " \"js\", {\n", + " chunkSize: 60,\n", + " chunkOverlap: 0,\n", + " }\n", + ")\n", + "const jsDocs = await jsSplitter.createDocuments([JS_CODE]);\n", + "\n", + "jsDocs" ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const markdownText = `\n", - "# 🦜️🔗 LangChain\n", - "\n", - "⚡ Building applications with LLMs through composability ⚡\n", - "\n", - "## Quick Install\n", - "\n", - "\\`\\`\\`bash\n", - "# Hopefully this code block isn't split\n", - "pip install langchain\n", - "\\`\\`\\`\n", - "\n", - "As an open-source project in a rapidly developing field, we are extremely open to contributions.\n", - "`;\n", - "\n", - "const mdSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", - " \"markdown\", {\n", - " chunkSize: 60,\n", - " chunkOverlap: 0,\n", - " }\n", - ")\n", - "const mdDocs = await mdSplitter.createDocuments([markdownText])\n", - "\n", - "mdDocs" - ] - }, - { - "cell_type": "markdown", - "id": "7aa306f6", - "metadata": {}, - "source": [ - "## Latex\n", - "\n", - "Here's an example on Latex text:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "77d1049d", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"documentclass{article}\\n\\n\\begin{document}\\n\\nmaketitle\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m6\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"section{Introduction}\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m8\u001b[39m, to: \u001b[33m8\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"Large language models (LLMs) are a type of machine learning\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"model that can be trained on vast amounts of text data to\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"generate human-like language. In recent years, LLMs have\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"made significant advances in a variety of natural language\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"processing tasks, including language translation, text\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"generation, and sentiment analysis.\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"subsection{History of LLMs}\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m11\u001b[39m, to: \u001b[33m11\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"The earliest LLMs were developed in the 1980s and 1990s,\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"but they were limited by the amount of data that could be\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"processed and the computational power available at the\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"time. In the past decade, however, advances in hardware and\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"software have made it possible to train LLMs on massive\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"datasets, leading to significant improvements in\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"performance.\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"subsection{Applications of LLMs}\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m14\u001b[39m, to: \u001b[33m14\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"LLMs have many applications in industry, including\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"chatbots, content creation, and virtual assistants. They\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"can also be used in academia for research in linguistics,\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"psychology, and computational linguistics.\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"end{document}\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m17\u001b[39m, to: \u001b[33m17\u001b[39m } } }\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "dcb8931b", + "metadata": {}, + "source": [ + "## Python\n", + "\n", + "Here's an example for Python:" ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const latexText = `\n", - "\\documentclass{article}\n", - "\n", - "\\begin{document}\n", - "\n", - "\\maketitle\n", - "\n", - "\\section{Introduction}\n", - "Large language models (LLMs) are a type of machine learning model that can be trained on vast amounts of text data to generate human-like language. In recent years, LLMs have made significant advances in a variety of natural language processing tasks, including language translation, text generation, and sentiment analysis.\n", - "\n", - "\\subsection{History of LLMs}\n", - "The earliest LLMs were developed in the 1980s and 1990s, but they were limited by the amount of data that could be processed and the computational power available at the time. In the past decade, however, advances in hardware and software have made it possible to train LLMs on massive datasets, leading to significant improvements in performance.\n", - "\n", - "\\subsection{Applications of LLMs}\n", - "LLMs have many applications in industry, including chatbots, content creation, and virtual assistants. They can also be used in academia for research in linguistics, psychology, and computational linguistics.\n", - "\n", - "\\end{document}\n", - "`\n", - "\n", - "const latexSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", - " \"latex\", {\n", - " chunkSize: 60,\n", - " chunkOverlap: 0,\n", - " }\n", - ")\n", - "const latexDocs = await latexSplitter.createDocuments([latexText])\n", - "\n", - "latexDocs" - ] - }, - { - "cell_type": "markdown", - "id": "c29adadf", - "metadata": {}, - "source": [ - "## HTML\n", - "\n", - "Here's an example using an HTML text splitter:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "0fc78794", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"\\n\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m3\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"\\n 🦜️🔗 LangChain\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m4\u001b[39m, to: \u001b[33m5\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m11\u001b[39m, to: \u001b[33m13\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m14\u001b[39m, to: \u001b[33m14\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"
\\n

🦜️🔗 LangChain

\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m16\u001b[39m, to: \u001b[33m17\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"

⚡ Building applications with LLMs through composability\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m18\u001b[39m, to: \u001b[33m18\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"⚡

\\n
\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m18\u001b[39m, to: \u001b[33m19\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"
\\n As an open-source project in a rapidly\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m20\u001b[39m, to: \u001b[33m21\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"developing field, we are extremely open to contributions.\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m21\u001b[39m, to: \u001b[33m21\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"
\\n \\n\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m22\u001b[39m, to: \u001b[33m24\u001b[39m } } }\n", - " }\n", - "]" + "cell_type": "code", + "execution_count": 3, + "id": "a58512b9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m'def hello_world():\\n print(\"Hello, World!\")'\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m3\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"# Call the function\\nhello_world()\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m5\u001b[39m, to: \u001b[33m6\u001b[39m } } }\n", + " }\n", + "]" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const PYTHON_CODE = `\n", + "def hello_world():\n", + " print(\"Hello, World!\")\n", + "\n", + "# Call the function\n", + "hello_world()\n", + "`\n", + "\n", + "const pythonSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", + " \"python\", {\n", + " chunkSize: 50,\n", + " chunkOverlap: 0,\n", + " }\n", + ")\n", + "const pythonDocs = await pythonSplitter.createDocuments([PYTHON_CODE])\n", + "pythonDocs" ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const htmlText = `\n", - "\n", - "\n", - " \n", - " 🦜️🔗 LangChain\n", - " \n", - " \n", - " \n", - "
\n", - "

🦜️🔗 LangChain

\n", - "

⚡ Building applications with LLMs through composability ⚡

\n", - "
\n", - "
\n", - " As an open-source project in a rapidly developing field, we are extremely open to contributions.\n", - "
\n", - " \n", - "\n", - "`\n", - "\n", - "const htmlSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", - " \"html\", {\n", - " chunkSize: 60,\n", - " chunkOverlap: 0,\n", - " }\n", - ")\n", - "const htmlDocs = await htmlSplitter.createDocuments([htmlText])\n", - "htmlDocs" - ] - }, - { - "cell_type": "markdown", - "id": "fcaf7abf", - "metadata": {}, - "source": [ - "## Solidity\n", - "Here's an example using of splitting on [Solidity](https://soliditylang.org/) code:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "49a1df11", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"pragma solidity ^0.8.20;\"\u001b[39m,\n", - " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m2\u001b[39m } } }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"contract HelloWorld {\\n\"\u001b[39m +\n", - " \u001b[32m\" function add(uint a, uint b) pure public returns(uint) {\\n\"\u001b[39m +\n", - " \u001b[32m\" return a + \"\u001b[39m... 9 more characters,\n", - " metadata: { loc: { lines: { from: \u001b[33m3\u001b[39m, to: \u001b[33m7\u001b[39m } } }\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "ee2361f8", + "metadata": {}, + "source": [ + "## Markdown\n", + "\n", + "Here's an example of splitting on markdown separators:" ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const SOL_CODE = `\n", - "pragma solidity ^0.8.20;\n", - "contract HelloWorld {\n", - " function add(uint a, uint b) pure public returns(uint) {\n", - " return a + b;\n", - " }\n", - "}\n", - "`\n", - "\n", - "const solSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", - " \"sol\", {\n", - " chunkSize: 128,\n", - " chunkOverlap: 0,\n", - " }\n", - ")\n", - "const solDocs = await solSplitter.createDocuments([SOL_CODE])\n", - "solDocs" - ] - }, - { - "cell_type": "markdown", - "id": "4a11f7cd-cd85-430c-b307-5b5b5f07f8db", - "metadata": {}, - "source": [ - "## PHP\n", - "Here's an example of splitting on PHP code:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "90c66e7e-87a5-4a81-bece-7949aabf2369", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ac9295d3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m\"# 🦜️🔗 LangChain\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m2\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"⚡ Building applications with LLMs through composability ⚡\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m4\u001b[39m, to: \u001b[33m4\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"## Quick Install\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m6\u001b[39m, to: \u001b[33m6\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"```bash\\n# Hopefully this code block isn't split\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m8\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"pip install langchain\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m10\u001b[39m, to: \u001b[33m10\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"```\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m11\u001b[39m, to: \u001b[33m11\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"As an open-source project in a rapidly developing field, we\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m13\u001b[39m, to: \u001b[33m13\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"are extremely open to contributions.\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m13\u001b[39m, to: \u001b[33m13\u001b[39m } } }\n", + " }\n", + "]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const markdownText = `\n", + "# 🦜️🔗 LangChain\n", + "\n", + "⚡ Building applications with LLMs through composability ⚡\n", + "\n", + "## Quick Install\n", + "\n", + "\\`\\`\\`bash\n", + "# Hopefully this code block isn't split\n", + "pip install langchain\n", + "\\`\\`\\`\n", + "\n", + "As an open-source project in a rapidly developing field, we are extremely open to contributions.\n", + "`;\n", + "\n", + "const mdSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", + " \"markdown\", {\n", + " chunkSize: 60,\n", + " chunkOverlap: 0,\n", + " }\n", + ")\n", + "const mdDocs = await mdSplitter.createDocuments([markdownText])\n", + "\n", + "mdDocs" + ] + }, + { + "cell_type": "markdown", + "id": "7aa306f6", + "metadata": {}, + "source": [ + "## Latex\n", + "\n", + "Here's an example on Latex text:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "77d1049d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m\"documentclass{article}\\n\\n\\begin{document}\\n\\nmaketitle\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m6\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"section{Introduction}\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m8\u001b[39m, to: \u001b[33m8\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Large language models (LLMs) are a type of machine learning\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"model that can be trained on vast amounts of text data to\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"generate human-like language. In recent years, LLMs have\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"made significant advances in a variety of natural language\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"processing tasks, including language translation, text\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"generation, and sentiment analysis.\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m9\u001b[39m, to: \u001b[33m9\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"subsection{History of LLMs}\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m11\u001b[39m, to: \u001b[33m11\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The earliest LLMs were developed in the 1980s and 1990s,\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"but they were limited by the amount of data that could be\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"processed and the computational power available at the\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"time. In the past decade, however, advances in hardware and\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"software have made it possible to train LLMs on massive\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"datasets, leading to significant improvements in\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"performance.\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m12\u001b[39m, to: \u001b[33m12\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"subsection{Applications of LLMs}\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m14\u001b[39m, to: \u001b[33m14\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"LLMs have many applications in industry, including\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"chatbots, content creation, and virtual assistants. They\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"can also be used in academia for research in linguistics,\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"psychology, and computational linguistics.\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"end{document}\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m17\u001b[39m, to: \u001b[33m17\u001b[39m } } }\n", + " }\n", + "]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const latexText = `\n", + "\\documentclass{article}\n", + "\n", + "\\begin{document}\n", + "\n", + "\\maketitle\n", + "\n", + "\\section{Introduction}\n", + "Large language models (LLMs) are a type of machine learning model that can be trained on vast amounts of text data to generate human-like language. In recent years, LLMs have made significant advances in a variety of natural language processing tasks, including language translation, text generation, and sentiment analysis.\n", + "\n", + "\\subsection{History of LLMs}\n", + "The earliest LLMs were developed in the 1980s and 1990s, but they were limited by the amount of data that could be processed and the computational power available at the time. In the past decade, however, advances in hardware and software have made it possible to train LLMs on massive datasets, leading to significant improvements in performance.\n", + "\n", + "\\subsection{Applications of LLMs}\n", + "LLMs have many applications in industry, including chatbots, content creation, and virtual assistants. They can also be used in academia for research in linguistics, psychology, and computational linguistics.\n", + "\n", + "\\end{document}\n", + "`\n", + "\n", + "const latexSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", + " \"latex\", {\n", + " chunkSize: 60,\n", + " chunkOverlap: 0,\n", + " }\n", + ")\n", + "const latexDocs = await latexSplitter.createDocuments([latexText])\n", + "\n", + "latexDocs" + ] + }, + { + "cell_type": "markdown", + "id": "c29adadf", + "metadata": {}, + "source": [ + "## HTML\n", + "\n", + "Here's an example using an HTML text splitter:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "0fc78794", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m\"\\n\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m2\u001b[39m, to: \u001b[33m3\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"\\n 🦜️🔗 LangChain\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m4\u001b[39m, to: \u001b[33m5\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m11\u001b[39m, to: \u001b[33m13\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m14\u001b[39m, to: \u001b[33m14\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m15\u001b[39m, to: \u001b[33m15\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"
\\n

🦜️🔗 LangChain

\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m16\u001b[39m, to: \u001b[33m17\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"

⚡ Building applications with LLMs through composability\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m18\u001b[39m, to: \u001b[33m18\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"⚡

\\n
\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m18\u001b[39m, to: \u001b[33m19\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"
\\n As an open-source project in a rapidly\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m20\u001b[39m, to: \u001b[33m21\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"developing field, we are extremely open to contributions.\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m21\u001b[39m, to: \u001b[33m21\u001b[39m } } }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"
\\n \\n\"\u001b[39m,\n", + " metadata: { loc: { lines: { from: \u001b[33m22\u001b[39m, to: \u001b[33m24\u001b[39m } } }\n", + " }\n", + "]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const htmlText = `\n", + "\n", + "\n", + " \n", + " 🦜️🔗 LangChain\n", + " \n", + " \n", + " \n", + "
\n", + "

🦜️🔗 LangChain

\n", + "

⚡ Building applications with LLMs through composability ⚡

\n", + "
\n", + "
\n", + " As an open-source project in a rapidly developing field, we are extremely open to contributions.\n", + "
\n", + " \n", + "\n", + "`\n", + "\n", + "const htmlSplitter = RecursiveCharacterTextSplitter.fromLanguage(\n", + " \"html\", {\n", + " chunkSize: 60,\n", + " chunkOverlap: 0,\n", + " }\n", + ")\n", + "const htmlDocs = await htmlSplitter.createDocuments([htmlText])\n", + "htmlDocs" + ] + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"=0.2.16`. Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", - "\n", - ":::\n", - "```\n", - "\n", - "## `asTool`\n", - "\n", - "Tools have some additional requirements over general Runnables:\n", - "\n", - "- Their inputs are constrained to be serializable, specifically strings and objects;\n", - "- They contain names and descriptions indicating how and when they should be used;\n", - "- They contain a detailed `schema` property for their arguments. That is, while a tool (as a `Runnable`) might accept a single object input, the specific keys and type information needed to populate an object should be specified in the `schema` field.\n", - "\n", - "The `asTool()` method therefore requires this additional information to create a tool from a runnable. Here's a basic example:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "b2cc4231-64a3-4733-a284-932dcbf2fcc3", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Explanation of when to use the tool.\n" - ] - } - ], - "source": [ - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "import { z } from \"zod\";\n", - "\n", - "const schema = z.object({\n", - " a: z.number(),\n", - " b: z.array(z.number()),\n", - "});\n", - "\n", - "\n", - "const runnable = RunnableLambda.from((input: z.infer) => {\n", - " return input.a * Math.max(...input.b);\n", - "});\n", - "\n", - "const asTool = runnable.asTool({\n", - " name: \"My tool\",\n", - " description: \"Explanation of when to use the tool.\",\n", - " schema,\n", - "});\n", - "\n", - "asTool.description" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "54ae7384-a03d-4fa4-8cdf-9604a4bc39ee", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a8bceb3-95bd-4496-bb9e-57655136e070", + "metadata": {}, + "source": [ + "# How to convert Runnables to Tools\n", + "\n", + "```{=mdx}\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Runnables](/docs/concepts/runnables)\n", + "- [Tools](/docs/concepts/tools)\n", + "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/)\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "For convenience, `Runnables` that accept a string or object input can be converted to tools using the [`asTool`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#asTool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n", + "\n", + "Here we will demonstrate how to use this method to convert a LangChain `Runnable` into a tool that can be used by agents, chains, or chat models.\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "\n", + "This functionality requires `@langchain/core>=0.2.16`. Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", + "\n", + ":::\n", + "```\n", + "\n", + "## `asTool`\n", + "\n", + "Tools have some additional requirements over general Runnables:\n", + "\n", + "- Their inputs are constrained to be serializable, specifically strings and objects;\n", + "- They contain names and descriptions indicating how and when they should be used;\n", + "- They contain a detailed `schema` property for their arguments. That is, while a tool (as a `Runnable`) might accept a single object input, the specific keys and type information needed to populate an object should be specified in the `schema` field.\n", + "\n", + "The `asTool()` method therefore requires this additional information to create a tool from a runnable. Here's a basic example:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "6\n" - ] - } - ], - "source": [ - "await asTool.invoke({ a: 3, b: [1, 2] })" - ] - }, - { - "cell_type": "markdown", - "id": "7c474d85-4e01-4fae-9bba-0c6c8c26475c", - "metadata": {}, - "source": [ - "Runnables that take string inputs are also supported:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "c475282a-58d6-4c2b-af7d-99b73b7d8a13", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "b2cc4231-64a3-4733-a284-932dcbf2fcc3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Explanation of when to use the tool.\n" + ] + } + ], + "source": [ + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "import { z } from \"zod\";\n", + "\n", + "const schema = z.object({\n", + " a: z.number(),\n", + " b: z.array(z.number()),\n", + "});\n", + "\n", + "\n", + "const runnable = RunnableLambda.from((input: z.infer) => {\n", + " return input.a * Math.max(...input.b);\n", + "});\n", + "\n", + "const asTool = runnable.asTool({\n", + " name: \"My tool\",\n", + " description: \"Explanation of when to use the tool.\",\n", + " schema,\n", + "});\n", + "\n", + "asTool.description" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Adds letters to a string.\n" - ] - } - ], - "source": [ - "const firstRunnable = RunnableLambda.from((input) => {\n", - " return input + \"a\";\n", - "})\n", - "\n", - "const secondRunnable = RunnableLambda.from((input) => {\n", - " return input + \"z\";\n", - "})\n", - "\n", - "const runnable = firstRunnable.pipe(secondRunnable)\n", - "const asTool = runnable.asTool({\n", - " name: \"append_letters\",\n", - " description: \"Adds letters to a string.\",\n", - " schema: z.string(),\n", - "})\n", - "\n", - "asTool.description;" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "ad6d8d96-3a87-40bd-a2ac-44a8acde0a8e", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "id": "54ae7384-a03d-4fa4-8cdf-9604a4bc39ee", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "6\n" + ] + } + ], + "source": [ + "await asTool.invoke({ a: 3, b: [1, 2] })" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "baz\n" - ] - } - ], - "source": [ - "await asTool.invoke(\"b\")" - ] - }, - { - "cell_type": "markdown", - "id": "89fdb3a7-d228-48f0-8f73-262af4febb58", - "metadata": {}, - "source": [ - "## In an agents\n", - "\n", - "Below we will incorporate LangChain Runnables as tools in an [agent](/docs/concepts/#agents) application. We will demonstrate with:\n", - "\n", - "- a document [retriever](/docs/concepts/#retrievers);\n", - "- a simple [RAG](/docs/tutorials/rag/) chain, allowing an agent to delegate relevant queries to it.\n", - "\n", - "We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n", - "\n", - "```{=mdx}\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "e8a2038a-d762-4196-b5e3-fdb89c11e71d", - "metadata": {}, - "source": [ - "Following the [RAG tutorial](/docs/tutorials/rag/), let's first construct a retriever:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "23d2a47e-6712-4294-81c8-2c1d76b4bb81", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({ model: \"gpt-3.5-turbo-0125\", temperature: 0 })\n", - "\n", - "import { Document } from \"@langchain/core/documents\"\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const documents = [\n", - " new Document({\n", - " pageContent: \"Dogs are great companions, known for their loyalty and friendliness.\",\n", - " }),\n", - " new Document({\n", - " pageContent: \"Cats are independent pets that often enjoy their own space.\",\n", - " }),\n", - "]\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " documents, new OpenAIEmbeddings(),\n", - ");\n", - "\n", - "const retriever = vectorstore.asRetriever({\n", - " k: 1,\n", - " searchType: \"similarity\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1", - "metadata": {}, - "source": [ - "We next create a pre-built [LangGraph agent](/docs/how_to/migrate_agent/) and provide it with the tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "c939cf2a-60e9-4afd-8b47-84d76ccb13f5", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "7c474d85-4e01-4fae-9bba-0c6c8c26475c", + "metadata": {}, + "source": [ + "Runnables that take string inputs are also supported:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AGENT: AIMessage {\n", - " \"id\": \"chatcmpl-9m9RIN1GQVeXcrVdp0lNBTcZFVHb9\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_n30LPDbegmytrj5GdUxZt9xn\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 17,\n", - " \"promptTokens\": 52,\n", - " \"totalTokens\": 69\n", - " },\n", - " \"finish_reason\": \"tool_calls\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"pet_info_retriever\",\n", - " \"args\": {\n", - " \"input\": \"dogs\"\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_n30LPDbegmytrj5GdUxZt9xn\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 52,\n", - " \"output_tokens\": 17,\n", - " \"total_tokens\": 69\n", - " }\n", - "}\n", - "----\n", - "TOOLS: ToolMessage {\n", - " \"content\": \"[{\\\"pageContent\\\":\\\"Dogs are great companions, known for their loyalty and friendliness.\\\",\\\"metadata\\\":{}}]\",\n", - " \"name\": \"pet_info_retriever\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_n30LPDbegmytrj5GdUxZt9xn\"\n", - "}\n", - "----\n", - "AGENT: AIMessage {\n", - " \"id\": \"chatcmpl-9m9RJ3TT3ITfv6R0Tb7pcrNOUtnm8\",\n", - " \"content\": \"Dogs are known for being great companions, known for their loyalty and friendliness.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 18,\n", - " \"promptTokens\": 104,\n", - " \"totalTokens\": 122\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 104,\n", - " \"output_tokens\": 18,\n", - " \"total_tokens\": 122\n", - " }\n", - "}\n", - "----\n" - ] - } - ], - "source": [ - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", - "\n", - "const tools = [\n", - " retriever.asTool({\n", - " name: \"pet_info_retriever\",\n", - " description: \"Get information about pets.\",\n", - " schema: z.string(),\n", - " })\n", - "];\n", - "\n", - "const agent = createReactAgent({ llm: llm, tools });\n", - "\n", - "const stream = await agent.stream({\"messages\": [[\"human\", \"What are dogs known for?\"]]});\n", - "\n", - "for await (const chunk of stream) {\n", - " // Log output from the agent or tools node\n", - " if (chunk.agent) {\n", - " console.log(\"AGENT:\", chunk.agent.messages[0]);\n", - " } else if (chunk.tools) {\n", - " console.log(\"TOOLS:\", chunk.tools.messages[0]);\n", - " }\n", - " console.log(\"----\");\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "96f2ac9c-36f4-4b7a-ae33-f517734c86aa", - "metadata": {}, - "source": [ - "This [LangSmith trace](https://smith.langchain.com/public/5e141617-ae82-44af-8fe0-b64dbd007826/r) shows what's going on under the hood for the above run.\n", - "\n", - "Going further, we can even create a tool from a full [RAG chain](/docs/tutorials/rag/):" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "bea518c9-c711-47c2-b8cc-dbd102f71f09", - "metadata": {}, - "outputs": [], - "source": [ - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnableSequence } from \"@langchain/core/runnables\";\n", - "\n", - "const SYSTEM_TEMPLATE = `\n", - "You are an assistant for question-answering tasks.\n", - "Use the below context to answer the question. If\n", - "you don't know the answer, say you don't know.\n", - "Use three sentences maximum and keep the answer\n", - "concise.\n", - "\n", - "Answer in the style of {answer_style}.\n", - "\n", - "Context: {context}`;\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", SYSTEM_TEMPLATE],\n", - " [\"human\", \"{question}\"],\n", - "]);\n", - "\n", - "const ragChain = RunnableSequence.from([\n", - " {\n", - " context: (input, config) => retriever.invoke(input.question, config),\n", - " question: (input) => input.question,\n", - " answer_style: (input) => input.answer_style,\n", - " },\n", - " prompt,\n", - " llm,\n", - " new StringOutputParser(),\n", - "]);" - ] - }, - { - "cell_type": "markdown", - "id": "4570615b-8f96-4d97-ae01-1c08b14be584", - "metadata": {}, - "source": [ - "Below we again invoke the agent. Note that the agent populates the required parameters in its `tool_calls`:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "06409913-a2ad-400f-a202-7b8dd2ef483a", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "id": "c475282a-58d6-4c2b-af7d-99b73b7d8a13", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Adds letters to a string.\n" + ] + } + ], + "source": [ + "const firstRunnable = RunnableLambda.from((input) => {\n", + " return input + \"a\";\n", + "})\n", + "\n", + "const secondRunnable = RunnableLambda.from((input) => {\n", + " return input + \"z\";\n", + "})\n", + "\n", + "const runnable = firstRunnable.pipe(secondRunnable)\n", + "const asTool = runnable.asTool({\n", + " name: \"append_letters\",\n", + " description: \"Adds letters to a string.\",\n", + " schema: z.string(),\n", + "})\n", + "\n", + "asTool.description;" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ad6d8d96-3a87-40bd-a2ac-44a8acde0a8e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "baz\n" + ] + } + ], + "source": [ + "await asTool.invoke(\"b\")" + ] + }, + { + "cell_type": "markdown", + "id": "89fdb3a7-d228-48f0-8f73-262af4febb58", + "metadata": {}, + "source": [ + "## In an agents\n", + "\n", + "Below we will incorporate LangChain Runnables as tools in an [agent](/docs/concepts/agents) application. We will demonstrate with:\n", + "\n", + "- a document [retriever](/docs/concepts/retrievers);\n", + "- a simple [RAG](/docs/tutorials/rag/) chain, allowing an agent to delegate relevant queries to it.\n", + "\n", + "We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n", + "\n", + "```{=mdx}\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "e8a2038a-d762-4196-b5e3-fdb89c11e71d", + "metadata": {}, + "source": [ + "Following the [RAG tutorial](/docs/tutorials/rag/), let's first construct a retriever:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "23d2a47e-6712-4294-81c8-2c1d76b4bb81", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-3.5-turbo-0125\", temperature: 0 })\n", + "\n", + "import { Document } from \"@langchain/core/documents\"\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const documents = [\n", + " new Document({\n", + " pageContent: \"Dogs are great companions, known for their loyalty and friendliness.\",\n", + " }),\n", + " new Document({\n", + " pageContent: \"Cats are independent pets that often enjoy their own space.\",\n", + " }),\n", + "]\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " documents, new OpenAIEmbeddings(),\n", + ");\n", + "\n", + "const retriever = vectorstore.asRetriever({\n", + " k: 1,\n", + " searchType: \"similarity\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1", + "metadata": {}, + "source": [ + "We next create a pre-built [LangGraph agent](/docs/how_to/migrate_agent/) and provide it with the tool:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AGENT: AIMessage {\n", - " \"id\": \"chatcmpl-9m9RKY2nAa8LeGoBiO7N1SR4nAoED\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_ukzivO4jRn1XdDpuVTI6CvtU\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 30,\n", - " \"promptTokens\": 63,\n", - " \"totalTokens\": 93\n", - " },\n", - " \"finish_reason\": \"tool_calls\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"pet_expert\",\n", - " \"args\": {\n", - " \"context\": \"pirate\",\n", - " \"question\": \"What are dogs known for?\",\n", - " \"answer_style\": \"short\"\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_ukzivO4jRn1XdDpuVTI6CvtU\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 63,\n", - " \"output_tokens\": 30,\n", - " \"total_tokens\": 93\n", - " }\n", - "}\n", - "----\n", - "TOOLS: ToolMessage {\n", - " \"content\": \"Dogs are known for their loyalty, companionship, and ability to provide emotional support to their owners.\",\n", - " \"name\": \"pet_expert\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_ukzivO4jRn1XdDpuVTI6CvtU\"\n", - "}\n", - "----\n", - "AGENT: AIMessage {\n", - " \"id\": \"chatcmpl-9m9RMwAEc14TTKtitq3CH2x9wpGik\",\n", - " \"content\": \"A pirate would say that dogs are known for their loyalty, companionship, and ability to provide emotional support to their owners.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 26,\n", - " \"promptTokens\": 123,\n", - " \"totalTokens\": 149\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 123,\n", - " \"output_tokens\": 26,\n", - " \"total_tokens\": 149\n", - " }\n", - "}\n", - "----\n" - ] + "cell_type": "code", + "execution_count": 6, + "id": "c939cf2a-60e9-4afd-8b47-84d76ccb13f5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AGENT: AIMessage {\n", + " \"id\": \"chatcmpl-9m9RIN1GQVeXcrVdp0lNBTcZFVHb9\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_n30LPDbegmytrj5GdUxZt9xn\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 17,\n", + " \"promptTokens\": 52,\n", + " \"totalTokens\": 69\n", + " },\n", + " \"finish_reason\": \"tool_calls\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"pet_info_retriever\",\n", + " \"args\": {\n", + " \"input\": \"dogs\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_n30LPDbegmytrj5GdUxZt9xn\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 52,\n", + " \"output_tokens\": 17,\n", + " \"total_tokens\": 69\n", + " }\n", + "}\n", + "----\n", + "TOOLS: ToolMessage {\n", + " \"content\": \"[{\\\"pageContent\\\":\\\"Dogs are great companions, known for their loyalty and friendliness.\\\",\\\"metadata\\\":{}}]\",\n", + " \"name\": \"pet_info_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_n30LPDbegmytrj5GdUxZt9xn\"\n", + "}\n", + "----\n", + "AGENT: AIMessage {\n", + " \"id\": \"chatcmpl-9m9RJ3TT3ITfv6R0Tb7pcrNOUtnm8\",\n", + " \"content\": \"Dogs are known for being great companions, known for their loyalty and friendliness.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 18,\n", + " \"promptTokens\": 104,\n", + " \"totalTokens\": 122\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 104,\n", + " \"output_tokens\": 18,\n", + " \"total_tokens\": 122\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "\n", + "const tools = [\n", + " retriever.asTool({\n", + " name: \"pet_info_retriever\",\n", + " description: \"Get information about pets.\",\n", + " schema: z.string(),\n", + " })\n", + "];\n", + "\n", + "const agent = createReactAgent({ llm: llm, tools });\n", + "\n", + "const stream = await agent.stream({\"messages\": [[\"human\", \"What are dogs known for?\"]]});\n", + "\n", + "for await (const chunk of stream) {\n", + " // Log output from the agent or tools node\n", + " if (chunk.agent) {\n", + " console.log(\"AGENT:\", chunk.agent.messages[0]);\n", + " } else if (chunk.tools) {\n", + " console.log(\"TOOLS:\", chunk.tools.messages[0]);\n", + " }\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "96f2ac9c-36f4-4b7a-ae33-f517734c86aa", + "metadata": {}, + "source": [ + "This [LangSmith trace](https://smith.langchain.com/public/5e141617-ae82-44af-8fe0-b64dbd007826/r) shows what's going on under the hood for the above run.\n", + "\n", + "Going further, we can even create a tool from a full [RAG chain](/docs/tutorials/rag/):" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "bea518c9-c711-47c2-b8cc-dbd102f71f09", + "metadata": {}, + "outputs": [], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnableSequence } from \"@langchain/core/runnables\";\n", + "\n", + "const SYSTEM_TEMPLATE = `\n", + "You are an assistant for question-answering tasks.\n", + "Use the below context to answer the question. If\n", + "you don't know the answer, say you don't know.\n", + "Use three sentences maximum and keep the answer\n", + "concise.\n", + "\n", + "Answer in the style of {answer_style}.\n", + "\n", + "Context: {context}`;\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", SYSTEM_TEMPLATE],\n", + " [\"human\", \"{question}\"],\n", + "]);\n", + "\n", + "const ragChain = RunnableSequence.from([\n", + " {\n", + " context: (input, config) => retriever.invoke(input.question, config),\n", + " question: (input) => input.question,\n", + " answer_style: (input) => input.answer_style,\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser(),\n", + "]);" + ] + }, + { + "cell_type": "markdown", + "id": "4570615b-8f96-4d97-ae01-1c08b14be584", + "metadata": {}, + "source": [ + "Below we again invoke the agent. Note that the agent populates the required parameters in its `tool_calls`:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "06409913-a2ad-400f-a202-7b8dd2ef483a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AGENT: AIMessage {\n", + " \"id\": \"chatcmpl-9m9RKY2nAa8LeGoBiO7N1SR4nAoED\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_ukzivO4jRn1XdDpuVTI6CvtU\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 30,\n", + " \"promptTokens\": 63,\n", + " \"totalTokens\": 93\n", + " },\n", + " \"finish_reason\": \"tool_calls\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"pet_expert\",\n", + " \"args\": {\n", + " \"context\": \"pirate\",\n", + " \"question\": \"What are dogs known for?\",\n", + " \"answer_style\": \"short\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_ukzivO4jRn1XdDpuVTI6CvtU\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 63,\n", + " \"output_tokens\": 30,\n", + " \"total_tokens\": 93\n", + " }\n", + "}\n", + "----\n", + "TOOLS: ToolMessage {\n", + " \"content\": \"Dogs are known for their loyalty, companionship, and ability to provide emotional support to their owners.\",\n", + " \"name\": \"pet_expert\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_ukzivO4jRn1XdDpuVTI6CvtU\"\n", + "}\n", + "----\n", + "AGENT: AIMessage {\n", + " \"id\": \"chatcmpl-9m9RMwAEc14TTKtitq3CH2x9wpGik\",\n", + " \"content\": \"A pirate would say that dogs are known for their loyalty, companionship, and ability to provide emotional support to their owners.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 26,\n", + " \"promptTokens\": 123,\n", + " \"totalTokens\": 149\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 123,\n", + " \"output_tokens\": 26,\n", + " \"total_tokens\": 149\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const ragTool = ragChain.asTool({\n", + " name: \"pet_expert\",\n", + " description: \"Get information about pets.\",\n", + " schema: z.object({\n", + " context: z.string(),\n", + " question: z.string(),\n", + " answer_style: z.string(),\n", + " }),\n", + "});\n", + "\n", + "const agent = createReactAgent({ llm: llm, tools: [ragTool] });\n", + "\n", + "const stream = await agent.stream({\n", + " messages: [\n", + " [\"human\", \"What would a pirate say dogs are known for?\"]\n", + " ]\n", + "});\n", + "\n", + "for await (const chunk of stream) {\n", + " // Log output from the agent or tools node\n", + " if (chunk.agent) {\n", + " console.log(\"AGENT:\", chunk.agent.messages[0]);\n", + " } else if (chunk.tools) {\n", + " console.log(\"TOOLS:\", chunk.tools.messages[0]);\n", + " }\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "96cc9bc3-e79e-49a8-9915-428ea225358b", + "metadata": {}, + "source": [ + "See this [LangSmith trace](https://smith.langchain.com/public/147ae4e6-4dfb-4dd9-8ca0-5c5b954f08ac/r) for the above run to see what's going on internally.\n", + "\n", + "## Related\n", + "\n", + "- [How to: create custom tools](/docs/how_to/custom_tools)\n", + "- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model/)\n", + "- [How to: stream events from child runs within a custom tool](/docs/how_to/tool_stream_events)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const ragTool = ragChain.asTool({\n", - " name: \"pet_expert\",\n", - " description: \"Get information about pets.\",\n", - " schema: z.object({\n", - " context: z.string(),\n", - " question: z.string(),\n", - " answer_style: z.string(),\n", - " }),\n", - "});\n", - "\n", - "const agent = createReactAgent({ llm: llm, tools: [ragTool] });\n", - "\n", - "const stream = await agent.stream({\n", - " messages: [\n", - " [\"human\", \"What would a pirate say dogs are known for?\"]\n", - " ]\n", - "});\n", - "\n", - "for await (const chunk of stream) {\n", - " // Log output from the agent or tools node\n", - " if (chunk.agent) {\n", - " console.log(\"AGENT:\", chunk.agent.messages[0]);\n", - " } else if (chunk.tools) {\n", - " console.log(\"TOOLS:\", chunk.tools.messages[0]);\n", - " }\n", - " console.log(\"----\");\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "96cc9bc3-e79e-49a8-9915-428ea225358b", - "metadata": {}, - "source": [ - "See this [LangSmith trace](https://smith.langchain.com/public/147ae4e6-4dfb-4dd9-8ca0-5c5b954f08ac/r) for the above run to see what's going on internally.\n", - "\n", - "## Related\n", - "\n", - "- [How to: create custom tools](/docs/how_to/custom_tools)\n", - "- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model/)\n", - "- [How to: stream events from child runs within a custom tool](/docs/how_to/tool_stream_events)\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/custom_callbacks.ipynb b/docs/core_docs/docs/how_to/custom_callbacks.ipynb index fbd7c6465104..29205cca074a 100644 --- a/docs/core_docs/docs/how_to/custom_callbacks.ipynb +++ b/docs/core_docs/docs/how_to/custom_callbacks.ipynb @@ -1,149 +1,149 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to create custom callback handlers\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Callbacks](/docs/concepts/#callbacks)\n", - "\n", - ":::\n", - "\n", - "LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n", - "\n", - "To create a custom callback handler, we need to determine the [event(s)](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n", - "\n", - "An easy way to construct a custom callback handler is to initialize it as an object whose keys are functions with names matching the events we want to handle. Here's an example that only handles the start of a chat model and streamed tokens from the model run:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to create custom callback handlers\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Callbacks](/docs/concepts/callbacks)\n", + "\n", + ":::\n", + "\n", + "LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n", + "\n", + "To create a custom callback handler, we need to determine the [event(s)](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n", + "\n", + "An easy way to construct a custom callback handler is to initialize it as an object whose keys are functions with names matching the events we want to handle. Here's an example that only handles the start of a chat model and streamed tokens from the model run:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chat model start: {\n", + " lc: 1,\n", + " type: \"constructor\",\n", + " id: [ \"langchain\", \"chat_models\", \"anthropic\", \"ChatAnthropic\" ],\n", + " kwargs: {\n", + " callbacks: undefined,\n", + " model: \"claude-3-sonnet-20240229\",\n", + " verbose: undefined,\n", + " anthropic_api_key: { lc: 1, type: \"secret\", id: [ \"ANTHROPIC_API_KEY\" ] },\n", + " api_key: { lc: 1, type: \"secret\", id: [ \"ANTHROPIC_API_KEY\" ] }\n", + " }\n", + "} [\n", + " [\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"What is 1 + 2?\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"What is 1 + 2?\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " }\n", + " ]\n", + "] b6e3b7ad-c602-4cef-9652-d51781a657b7\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chat model new token The\n", + "Chat model new token sum\n", + "Chat model new token of\n", + "Chat model new token \n", + "Chat model new token 1\n", + "Chat model new token \n", + "Chat model new token an\n", + "Chat model new token d \n", + "Chat model new token 2\n", + "Chat model new token \n", + "Chat model new token is\n", + "Chat model new token \n", + "Chat model new token 3\n", + "Chat model new token .\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + "});\n", + "\n", + "const chain = prompt.pipe(model);\n", + "\n", + "const customHandler = {\n", + " handleChatModelStart: async (llm, inputMessages, runId) => {\n", + " console.log(\"Chat model start:\", llm, inputMessages, runId)\n", + " },\n", + " handleLLMNewToken: async (token) => {\n", + " console.log(\"Chat model new token\", token);\n", + " }\n", + "};\n", + "\n", + "const stream = await chain.stream({ number: \"2\" }, { callbacks: [customHandler] });\n", + "\n", + "for await (const _ of stream) {\n", + " // Just consume the stream so the callbacks run\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Chat model start: {\n", - " lc: 1,\n", - " type: \"constructor\",\n", - " id: [ \"langchain\", \"chat_models\", \"anthropic\", \"ChatAnthropic\" ],\n", - " kwargs: {\n", - " callbacks: undefined,\n", - " model: \"claude-3-sonnet-20240229\",\n", - " verbose: undefined,\n", - " anthropic_api_key: { lc: 1, type: \"secret\", id: [ \"ANTHROPIC_API_KEY\" ] },\n", - " api_key: { lc: 1, type: \"secret\", id: [ \"ANTHROPIC_API_KEY\" ] }\n", - " }\n", - "} [\n", - " [\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"What is 1 + 2?\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"What is 1 + 2?\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ]\n", - "] b6e3b7ad-c602-4cef-9652-d51781a657b7\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see [this reference page](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) for a list of events you can handle. Note that the `handleChain*` events run for most LCEL runnables.\n", + "\n", + "## Next steps\n", + "\n", + "You've now learned how to create your own custom callback handlers.\n", + "\n", + "Next, check out the other how-to guides in this section, such as [how to await callbacks in serverless environments](/docs/how_to/callbacks_serverless)." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Chat model new token The\n", - "Chat model new token sum\n", - "Chat model new token of\n", - "Chat model new token \n", - "Chat model new token 1\n", - "Chat model new token \n", - "Chat model new token an\n", - "Chat model new token d \n", - "Chat model new token 2\n", - "Chat model new token \n", - "Chat model new token is\n", - "Chat model new token \n", - "Chat model new token 3\n", - "Chat model new token .\n" - ] + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`What is 1 + {number}?`);\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - "});\n", - "\n", - "const chain = prompt.pipe(model);\n", - "\n", - "const customHandler = {\n", - " handleChatModelStart: async (llm, inputMessages, runId) => {\n", - " console.log(\"Chat model start:\", llm, inputMessages, runId)\n", - " },\n", - " handleLLMNewToken: async (token) => {\n", - " console.log(\"Chat model new token\", token);\n", - " }\n", - "};\n", - "\n", - "const stream = await chain.stream({ number: \"2\" }, { callbacks: [customHandler] });\n", - "\n", - "for await (const _ of stream) {\n", - " // Just consume the stream so the callbacks run\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see [this reference page](https://api.js.langchain.com/interfaces/langchain_core.callbacks_base.CallbackHandlerMethods.html) for a list of events you can handle. Note that the `handleChain*` events run for most LCEL runnables.\n", - "\n", - "## Next steps\n", - "\n", - "You've now learned how to create your own custom callback handlers.\n", - "\n", - "Next, check out the other how-to guides in this section, such as [how to await callbacks in serverless environments](/docs/how_to/callbacks_serverless)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/custom_chat.ipynb b/docs/core_docs/docs/how_to/custom_chat.ipynb index 0566a2e8339f..e93f13664ad7 100644 --- a/docs/core_docs/docs/how_to/custom_chat.ipynb +++ b/docs/core_docs/docs/how_to/custom_chat.ipynb @@ -1,508 +1,508 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 4\n", - "---\n", - "\n", - "# How to create a custom chat model class\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "\n", - ":::\n", - "```\n", - "\n", - "This notebook goes over how to create a custom chat model wrapper, in case you want to use your own chat model or a different wrapper than one that is directly supported in LangChain.\n", - "\n", - "There are a few required things that a chat model needs to implement after extending the [`SimpleChatModel` class](https://api.js.langchain.com/classes/langchain_core.language_models_chat_models.SimpleChatModel.html):\n", - "\n", - "- A `_call` method that takes in a list of messages and call options (which includes things like `stop` sequences), and returns a string.\n", - "- A `_llmType` method that returns a string. Used for logging purposes only.\n", - "\n", - "You can also implement the following optional method:\n", - "\n", - "- A `_streamResponseChunks` method that returns an `AsyncGenerator` and yields [`ChatGenerationChunks`](https://api.js.langchain.com/classes/langchain_core.outputs.ChatGenerationChunk.html). This allows the LLM to support streaming outputs.\n", - "\n", - "Let's implement a very simple custom chat model that just echoes back the first `n` characters of the input." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import {\n", - " SimpleChatModel,\n", - " type BaseChatModelParams,\n", - "} from \"@langchain/core/language_models/chat_models\";\n", - "import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", - "import { AIMessageChunk, type BaseMessage } from \"@langchain/core/messages\";\n", - "import { ChatGenerationChunk } from \"@langchain/core/outputs\";\n", - "\n", - "interface CustomChatModelInput extends BaseChatModelParams {\n", - " n: number;\n", - "}\n", - "\n", - "class CustomChatModel extends SimpleChatModel {\n", - " n: number;\n", - "\n", - " constructor(fields: CustomChatModelInput) {\n", - " super(fields);\n", - " this.n = fields.n;\n", - " }\n", - "\n", - " _llmType() {\n", - " return \"custom\";\n", - " }\n", - "\n", - " async _call(\n", - " messages: BaseMessage[],\n", - " options: this[\"ParsedCallOptions\"],\n", - " runManager?: CallbackManagerForLLMRun\n", - " ): Promise {\n", - " if (!messages.length) {\n", - " throw new Error(\"No messages provided.\");\n", - " }\n", - " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", - " // await subRunnable.invoke(params, runManager?.getChild());\n", - " if (typeof messages[0].content !== \"string\") {\n", - " throw new Error(\"Multimodal messages are not supported.\");\n", - " }\n", - " return messages[0].content.slice(0, this.n);\n", - " }\n", - "\n", - " async *_streamResponseChunks(\n", - " messages: BaseMessage[],\n", - " options: this[\"ParsedCallOptions\"],\n", - " runManager?: CallbackManagerForLLMRun\n", - " ): AsyncGenerator {\n", - " if (!messages.length) {\n", - " throw new Error(\"No messages provided.\");\n", - " }\n", - " if (typeof messages[0].content !== \"string\") {\n", - " throw new Error(\"Multimodal messages are not supported.\");\n", - " }\n", - " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", - " // await subRunnable.invoke(params, runManager?.getChild());\n", - " for (const letter of messages[0].content.slice(0, this.n)) {\n", - " yield new ChatGenerationChunk({\n", - " message: new AIMessageChunk({\n", - " content: letter,\n", - " }),\n", - " text: letter,\n", - " });\n", - " // Trigger the appropriate callback for new chunks\n", - " await runManager?.handleLLMNewToken(letter);\n", - " }\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now use this as any other chat model:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'I am',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'I am',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: undefined\n", - "}\n" - ] - } - ], - "source": [ - "const chatModel = new CustomChatModel({ n: 4 });\n", - "\n", - "await chatModel.invoke([[\"human\", \"I am an LLM\"]]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And support streaming:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 4\n", + "---\n", + "\n", + "# How to create a custom chat model class\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "\n", + ":::\n", + "```\n", + "\n", + "This notebook goes over how to create a custom chat model wrapper, in case you want to use your own chat model or a different wrapper than one that is directly supported in LangChain.\n", + "\n", + "There are a few required things that a chat model needs to implement after extending the [`SimpleChatModel` class](https://api.js.langchain.com/classes/langchain_core.language_models_chat_models.SimpleChatModel.html):\n", + "\n", + "- A `_call` method that takes in a list of messages and call options (which includes things like `stop` sequences), and returns a string.\n", + "- A `_llmType` method that returns a string. Used for logging purposes only.\n", + "\n", + "You can also implement the following optional method:\n", + "\n", + "- A `_streamResponseChunks` method that returns an `AsyncGenerator` and yields [`ChatGenerationChunks`](https://api.js.langchain.com/classes/langchain_core.outputs.ChatGenerationChunk.html). This allows the LLM to support streaming outputs.\n", + "\n", + "Let's implement a very simple custom chat model that just echoes back the first `n` characters of the input." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'I',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'I',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - "}\n", - "AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: ' ',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: ' ',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - "}\n", - "AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'a',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'a',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - "}\n", - "AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'm',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'm',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - "}\n" - ] - } - ], - "source": [ - "const stream = await chatModel.stream([[\"human\", \"I am an LLM\"]]);\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(chunk);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Richer outputs\n", - "\n", - "If you want to take advantage of LangChain's callback system for functionality like token tracking, you can extend the [`BaseChatModel`](https://api.js.langchain.com/classes/langchain_core.language_models_chat_models.BaseChatModel.html) class and implement the lower level\n", - "`_generate` method. It also takes a list of `BaseMessage`s as input, but requires you to construct and return a `ChatGeneration` object that permits additional metadata.\n", - "Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "import { AIMessage, BaseMessage } from \"@langchain/core/messages\";\n", - "import { ChatResult } from \"@langchain/core/outputs\";\n", - "import {\n", - " BaseChatModel,\n", - " BaseChatModelCallOptions,\n", - " BaseChatModelParams,\n", - "} from \"@langchain/core/language_models/chat_models\";\n", - "import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", - "\n", - "interface AdvancedCustomChatModelOptions\n", - " extends BaseChatModelCallOptions {}\n", - "\n", - "interface AdvancedCustomChatModelParams extends BaseChatModelParams {\n", - " n: number;\n", - "}\n", - "\n", - "class AdvancedCustomChatModel extends BaseChatModel {\n", - " n: number;\n", - "\n", - " static lc_name(): string {\n", - " return \"AdvancedCustomChatModel\";\n", - " }\n", - "\n", - " constructor(fields: AdvancedCustomChatModelParams) {\n", - " super(fields);\n", - " this.n = fields.n;\n", - " }\n", - "\n", - " async _generate(\n", - " messages: BaseMessage[],\n", - " options: this[\"ParsedCallOptions\"],\n", - " runManager?: CallbackManagerForLLMRun\n", - " ): Promise {\n", - " if (!messages.length) {\n", - " throw new Error(\"No messages provided.\");\n", - " }\n", - " if (typeof messages[0].content !== \"string\") {\n", - " throw new Error(\"Multimodal messages are not supported.\");\n", - " }\n", - " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", - " // await subRunnable.invoke(params, runManager?.getChild());\n", - " const content = messages[0].content.slice(0, this.n);\n", - " const tokenUsage = {\n", - " usedTokens: this.n,\n", - " };\n", - " return {\n", - " generations: [{ message: new AIMessage({ content }), text: content }],\n", - " llmOutput: { tokenUsage },\n", - " };\n", - " }\n", - "\n", - " _llmType(): string {\n", - " return \"advanced_custom_chat_model\";\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This will pass the additional returned information in callback events and in the `streamEvents method:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " SimpleChatModel,\n", + " type BaseChatModelParams,\n", + "} from \"@langchain/core/language_models/chat_models\";\n", + "import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", + "import { AIMessageChunk, type BaseMessage } from \"@langchain/core/messages\";\n", + "import { ChatGenerationChunk } from \"@langchain/core/outputs\";\n", + "\n", + "interface CustomChatModelInput extends BaseChatModelParams {\n", + " n: number;\n", + "}\n", + "\n", + "class CustomChatModel extends SimpleChatModel {\n", + " n: number;\n", + "\n", + " constructor(fields: CustomChatModelInput) {\n", + " super(fields);\n", + " this.n = fields.n;\n", + " }\n", + "\n", + " _llmType() {\n", + " return \"custom\";\n", + " }\n", + "\n", + " async _call(\n", + " messages: BaseMessage[],\n", + " options: this[\"ParsedCallOptions\"],\n", + " runManager?: CallbackManagerForLLMRun\n", + " ): Promise {\n", + " if (!messages.length) {\n", + " throw new Error(\"No messages provided.\");\n", + " }\n", + " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", + " // await subRunnable.invoke(params, runManager?.getChild());\n", + " if (typeof messages[0].content !== \"string\") {\n", + " throw new Error(\"Multimodal messages are not supported.\");\n", + " }\n", + " return messages[0].content.slice(0, this.n);\n", + " }\n", + "\n", + " async *_streamResponseChunks(\n", + " messages: BaseMessage[],\n", + " options: this[\"ParsedCallOptions\"],\n", + " runManager?: CallbackManagerForLLMRun\n", + " ): AsyncGenerator {\n", + " if (!messages.length) {\n", + " throw new Error(\"No messages provided.\");\n", + " }\n", + " if (typeof messages[0].content !== \"string\") {\n", + " throw new Error(\"Multimodal messages are not supported.\");\n", + " }\n", + " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", + " // await subRunnable.invoke(params, runManager?.getChild());\n", + " for (const letter of messages[0].content.slice(0, this.n)) {\n", + " yield new ChatGenerationChunk({\n", + " message: new AIMessageChunk({\n", + " content: letter,\n", + " }),\n", + " text: letter,\n", + " });\n", + " // Trigger the appropriate callback for new chunks\n", + " await runManager?.handleLLMNewToken(letter);\n", + " }\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now use this as any other chat model:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"event\": \"on_chat_model_end\",\n", - " \"data\": {\n", - " \"output\": {\n", - " \"lc\": 1,\n", - " \"type\": \"constructor\",\n", - " \"id\": [\n", - " \"langchain_core\",\n", - " \"messages\",\n", - " \"AIMessage\"\n", - " ],\n", - " \"kwargs\": {\n", - " \"content\": \"I am\",\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"usedTokens\": 4\n", - " }\n", - " }\n", - " }\n", - " }\n", - " },\n", - " \"run_id\": \"11dbdef6-1b91-407e-a497-1a1ce2974788\",\n", - " \"name\": \"AdvancedCustomChatModel\",\n", - " \"tags\": [],\n", - " \"metadata\": {\n", - " \"ls_model_type\": \"chat\"\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'I am',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'I am',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const chatModel = new CustomChatModel({ n: 4 });\n", + "\n", + "await chatModel.invoke([[\"human\", \"I am an LLM\"]]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And support streaming:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'I',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'I',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + "}\n", + "AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: ' ',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: ' ',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + "}\n", + "AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'a',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'a',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + "}\n", + "AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'm',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'm',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const stream = await chatModel.stream([[\"human\", \"I am an LLM\"]]);\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(chunk);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Richer outputs\n", + "\n", + "If you want to take advantage of LangChain's callback system for functionality like token tracking, you can extend the [`BaseChatModel`](https://api.js.langchain.com/classes/langchain_core.language_models_chat_models.BaseChatModel.html) class and implement the lower level\n", + "`_generate` method. It also takes a list of `BaseMessage`s as input, but requires you to construct and return a `ChatGeneration` object that permits additional metadata.\n", + "Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import { AIMessage, BaseMessage } from \"@langchain/core/messages\";\n", + "import { ChatResult } from \"@langchain/core/outputs\";\n", + "import {\n", + " BaseChatModel,\n", + " BaseChatModelCallOptions,\n", + " BaseChatModelParams,\n", + "} from \"@langchain/core/language_models/chat_models\";\n", + "import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", + "\n", + "interface AdvancedCustomChatModelOptions\n", + " extends BaseChatModelCallOptions {}\n", + "\n", + "interface AdvancedCustomChatModelParams extends BaseChatModelParams {\n", + " n: number;\n", + "}\n", + "\n", + "class AdvancedCustomChatModel extends BaseChatModel {\n", + " n: number;\n", + "\n", + " static lc_name(): string {\n", + " return \"AdvancedCustomChatModel\";\n", + " }\n", + "\n", + " constructor(fields: AdvancedCustomChatModelParams) {\n", + " super(fields);\n", + " this.n = fields.n;\n", + " }\n", + "\n", + " async _generate(\n", + " messages: BaseMessage[],\n", + " options: this[\"ParsedCallOptions\"],\n", + " runManager?: CallbackManagerForLLMRun\n", + " ): Promise {\n", + " if (!messages.length) {\n", + " throw new Error(\"No messages provided.\");\n", + " }\n", + " if (typeof messages[0].content !== \"string\") {\n", + " throw new Error(\"Multimodal messages are not supported.\");\n", + " }\n", + " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", + " // await subRunnable.invoke(params, runManager?.getChild());\n", + " const content = messages[0].content.slice(0, this.n);\n", + " const tokenUsage = {\n", + " usedTokens: this.n,\n", + " };\n", + " return {\n", + " generations: [{ message: new AIMessage({ content }), text: content }],\n", + " llmOutput: { tokenUsage },\n", + " };\n", + " }\n", + "\n", + " _llmType(): string {\n", + " return \"advanced_custom_chat_model\";\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This will pass the additional returned information in callback events and in the `streamEvents method:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"event\": \"on_chat_model_end\",\n", + " \"data\": {\n", + " \"output\": {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain_core\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"I am\",\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"usedTokens\": 4\n", + " }\n", + " }\n", + " }\n", + " }\n", + " },\n", + " \"run_id\": \"11dbdef6-1b91-407e-a497-1a1ce2974788\",\n", + " \"name\": \"AdvancedCustomChatModel\",\n", + " \"tags\": [],\n", + " \"metadata\": {\n", + " \"ls_model_type\": \"chat\"\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const chatModel = new AdvancedCustomChatModel({ n: 4 });\n", + "\n", + "const eventStream = await chatModel.streamEvents([[\"human\", \"I am an LLM\"]], {\n", + " version: \"v2\",\n", + "});\n", + "\n", + "for await (const event of eventStream) {\n", + " if (event.event === \"on_chat_model_end\") {\n", + " console.log(JSON.stringify(event, null, 2));\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Tracing (advanced)\n", + "\n", + "If you are implementing a custom chat model and want to use it with a tracing service like [LangSmith](https://smith.langchain.com/),\n", + "you can automatically log params used for a given invocation by implementing the `invocationParams()` method on the model.\n", + "\n", + "This method is purely optional, but anything it returns will be logged as metadata for the trace.\n", + "\n", + "Here's one pattern you might use:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", + "import { BaseChatModel, type BaseChatModelCallOptions, type BaseChatModelParams } from \"@langchain/core/language_models/chat_models\";\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { ChatResult } from \"@langchain/core/outputs\";\n", + "\n", + "interface CustomChatModelOptions extends BaseChatModelCallOptions {\n", + " // Some required or optional inner args\n", + " tools: Record[];\n", + "}\n", + "\n", + "interface CustomChatModelParams extends BaseChatModelParams {\n", + " temperature: number;\n", + " n: number;\n", + "}\n", + "\n", + "class CustomChatModel extends BaseChatModel {\n", + " temperature: number;\n", + "\n", + " n: number;\n", + "\n", + " static lc_name(): string {\n", + " return \"CustomChatModel\";\n", + " }\n", + "\n", + " constructor(fields: CustomChatModelParams) {\n", + " super(fields);\n", + " this.temperature = fields.temperature;\n", + " this.n = fields.n;\n", + " }\n", + "\n", + " // Anything returned in this method will be logged as metadata in the trace.\n", + " // It is common to pass it any options used to invoke the function.\n", + " invocationParams(options?: this[\"ParsedCallOptions\"]) {\n", + " return {\n", + " tools: options?.tools,\n", + " n: this.n,\n", + " };\n", + " }\n", + "\n", + " async _generate(\n", + " messages: BaseMessage[],\n", + " options: this[\"ParsedCallOptions\"],\n", + " runManager?: CallbackManagerForLLMRun\n", + " ): Promise {\n", + " if (!messages.length) {\n", + " throw new Error(\"No messages provided.\");\n", + " }\n", + " if (typeof messages[0].content !== \"string\") {\n", + " throw new Error(\"Multimodal messages are not supported.\");\n", + " }\n", + " const additionalParams = this.invocationParams(options);\n", + " const content = await someAPIRequest(messages, additionalParams);\n", + " return {\n", + " generations: [{ message: new AIMessage({ content }), text: content }],\n", + " llmOutput: {},\n", + " };\n", + " }\n", + "\n", + " _llmType(): string {\n", + " return \"advanced_custom_chat_model\";\n", + " }\n", + "}" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const chatModel = new AdvancedCustomChatModel({ n: 4 });\n", - "\n", - "const eventStream = await chatModel.streamEvents([[\"human\", \"I am an LLM\"]], {\n", - " version: \"v2\",\n", - "});\n", - "\n", - "for await (const event of eventStream) {\n", - " if (event.event === \"on_chat_model_end\") {\n", - " console.log(JSON.stringify(event, null, 2));\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Tracing (advanced)\n", - "\n", - "If you are implementing a custom chat model and want to use it with a tracing service like [LangSmith](https://smith.langchain.com/),\n", - "you can automatically log params used for a given invocation by implementing the `invocationParams()` method on the model.\n", - "\n", - "This method is purely optional, but anything it returns will be logged as metadata for the trace.\n", - "\n", - "Here's one pattern you might use:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", - "import { BaseChatModel, type BaseChatModelCallOptions, type BaseChatModelParams } from \"@langchain/core/language_models/chat_models\";\n", - "import { BaseMessage } from \"@langchain/core/messages\";\n", - "import { ChatResult } from \"@langchain/core/outputs\";\n", - "\n", - "interface CustomChatModelOptions extends BaseChatModelCallOptions {\n", - " // Some required or optional inner args\n", - " tools: Record[];\n", - "}\n", - "\n", - "interface CustomChatModelParams extends BaseChatModelParams {\n", - " temperature: number;\n", - " n: number;\n", - "}\n", - "\n", - "class CustomChatModel extends BaseChatModel {\n", - " temperature: number;\n", - "\n", - " n: number;\n", - "\n", - " static lc_name(): string {\n", - " return \"CustomChatModel\";\n", - " }\n", - "\n", - " constructor(fields: CustomChatModelParams) {\n", - " super(fields);\n", - " this.temperature = fields.temperature;\n", - " this.n = fields.n;\n", - " }\n", - "\n", - " // Anything returned in this method will be logged as metadata in the trace.\n", - " // It is common to pass it any options used to invoke the function.\n", - " invocationParams(options?: this[\"ParsedCallOptions\"]) {\n", - " return {\n", - " tools: options?.tools,\n", - " n: this.n,\n", - " };\n", - " }\n", - "\n", - " async _generate(\n", - " messages: BaseMessage[],\n", - " options: this[\"ParsedCallOptions\"],\n", - " runManager?: CallbackManagerForLLMRun\n", - " ): Promise {\n", - " if (!messages.length) {\n", - " throw new Error(\"No messages provided.\");\n", - " }\n", - " if (typeof messages[0].content !== \"string\") {\n", - " throw new Error(\"Multimodal messages are not supported.\");\n", - " }\n", - " const additionalParams = this.invocationParams(options);\n", - " const content = await someAPIRequest(messages, additionalParams);\n", - " return {\n", - " generations: [{ message: new AIMessage({ content }), text: content }],\n", - " llmOutput: {},\n", - " };\n", - " }\n", - "\n", - " _llmType(): string {\n", - " return \"advanced_custom_chat_model\";\n", - " }\n", - "}" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/custom_llm.ipynb b/docs/core_docs/docs/how_to/custom_llm.ipynb index fd47376dad2a..3aa2238f743e 100644 --- a/docs/core_docs/docs/how_to/custom_llm.ipynb +++ b/docs/core_docs/docs/how_to/custom_llm.ipynb @@ -1,299 +1,299 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 3\n", - "---\n", - "\n", - "# How to create a custom LLM class\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LLMs](/docs/concepts/#llms)\n", - "\n", - ":::\n", - "```\n", - "\n", - "This notebook goes over how to create a custom LLM wrapper, in case you want to use your own LLM or a different wrapper than one that is directly supported in LangChain.\n", - "\n", - "There are a few required things that a custom LLM needs to implement after extending the [`LLM` class](https://api.js.langchain.com/classes/langchain_core.language_models_llms.LLM.html):\n", - "\n", - "- A `_call` method that takes in a string and call options (which includes things like `stop` sequences), and returns a string.\n", - "- A `_llmType` method that returns a string. Used for logging purposes only.\n", - "\n", - "You can also implement the following optional method:\n", - "\n", - "- A `_streamResponseChunks` method that returns an `AsyncIterator` and yields [`GenerationChunks`](https://api.js.langchain.com/classes/langchain_core.outputs.GenerationChunk.html). This allows the LLM to support streaming outputs.\n", - "\n", - "Let's implement a very simple custom LLM that just echoes back the first `n` characters of the input." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { LLM, type BaseLLMParams } from \"@langchain/core/language_models/llms\";\n", - "import type { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", - "import { GenerationChunk } from \"@langchain/core/outputs\";\n", - "\n", - "interface CustomLLMInput extends BaseLLMParams {\n", - " n: number;\n", - "}\n", - "\n", - "class CustomLLM extends LLM {\n", - " n: number;\n", - "\n", - " constructor(fields: CustomLLMInput) {\n", - " super(fields);\n", - " this.n = fields.n;\n", - " }\n", - "\n", - " _llmType() {\n", - " return \"custom\";\n", - " }\n", - "\n", - " async _call(\n", - " prompt: string,\n", - " options: this[\"ParsedCallOptions\"],\n", - " runManager: CallbackManagerForLLMRun\n", - " ): Promise {\n", - " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", - " // await subRunnable.invoke(params, runManager?.getChild());\n", - " return prompt.slice(0, this.n);\n", - " }\n", - "\n", - " async *_streamResponseChunks(\n", - " prompt: string,\n", - " options: this[\"ParsedCallOptions\"],\n", - " runManager?: CallbackManagerForLLMRun\n", - " ): AsyncGenerator {\n", - " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", - " // await subRunnable.invoke(params, runManager?.getChild());\n", - " for (const letter of prompt.slice(0, this.n)) {\n", - " yield new GenerationChunk({\n", - " text: letter,\n", - " });\n", - " // Trigger the appropriate callback\n", - " await runManager?.handleLLMNewToken(letter);\n", - " }\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now use this as any other LLM:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "I am\n" - ] - } - ], - "source": [ - "const llm = new CustomLLM({ n: 4 });\n", - "\n", - "await llm.invoke(\"I am an LLM\");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And support streaming:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 3\n", + "---\n", + "\n", + "# How to create a custom LLM class\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LLMs](/docs/concepts/text_llms)\n", + "\n", + ":::\n", + "```\n", + "\n", + "This notebook goes over how to create a custom LLM wrapper, in case you want to use your own LLM or a different wrapper than one that is directly supported in LangChain.\n", + "\n", + "There are a few required things that a custom LLM needs to implement after extending the [`LLM` class](https://api.js.langchain.com/classes/langchain_core.language_models_llms.LLM.html):\n", + "\n", + "- A `_call` method that takes in a string and call options (which includes things like `stop` sequences), and returns a string.\n", + "- A `_llmType` method that returns a string. Used for logging purposes only.\n", + "\n", + "You can also implement the following optional method:\n", + "\n", + "- A `_streamResponseChunks` method that returns an `AsyncIterator` and yields [`GenerationChunks`](https://api.js.langchain.com/classes/langchain_core.outputs.GenerationChunk.html). This allows the LLM to support streaming outputs.\n", + "\n", + "Let's implement a very simple custom LLM that just echoes back the first `n` characters of the input." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "I\n", - " \n", - "a\n", - "m\n" - ] - } - ], - "source": [ - "const stream = await llm.stream(\"I am an LLM\");\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(chunk);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Richer outputs\n", - "\n", - "If you want to take advantage of LangChain's callback system for functionality like token tracking, you can extend the [`BaseLLM`](https://api.js.langchain.com/classes/langchain_core.language_models_llms.BaseLLM.html) class and implement the lower level\n", - "`_generate` method. Rather than taking a single string as input and a single string output, it can take multiple input strings and map each to multiple string outputs.\n", - "Additionally, it returns a `Generation` output with fields for additional metadata rather than just a string." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", - "import { LLMResult } from \"@langchain/core/outputs\";\n", - "import {\n", - " BaseLLM,\n", - " BaseLLMCallOptions,\n", - " BaseLLMParams,\n", - "} from \"@langchain/core/language_models/llms\";\n", - "\n", - "interface AdvancedCustomLLMCallOptions extends BaseLLMCallOptions {}\n", - "\n", - "interface AdvancedCustomLLMParams extends BaseLLMParams {\n", - " n: number;\n", - "}\n", - "\n", - "class AdvancedCustomLLM extends BaseLLM {\n", - " n: number;\n", - "\n", - " constructor(fields: AdvancedCustomLLMParams) {\n", - " super(fields);\n", - " this.n = fields.n;\n", - " }\n", - "\n", - " _llmType() {\n", - " return \"advanced_custom_llm\";\n", - " }\n", - "\n", - " async _generate(\n", - " inputs: string[],\n", - " options: this[\"ParsedCallOptions\"],\n", - " runManager?: CallbackManagerForLLMRun\n", - " ): Promise {\n", - " const outputs = inputs.map((input) => input.slice(0, this.n));\n", - " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", - " // await subRunnable.invoke(params, runManager?.getChild());\n", - "\n", - " // One input could generate multiple outputs.\n", - " const generations = outputs.map((output) => [\n", - " {\n", - " text: output,\n", - " // Optional additional metadata for the generation\n", - " generationInfo: { outputCount: 1 },\n", - " },\n", - " ]);\n", - " const tokenUsage = {\n", - " usedTokens: this.n,\n", - " };\n", - " return {\n", - " generations,\n", - " llmOutput: { tokenUsage },\n", - " };\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This will pass the additional returned information in callback events and in the `streamEvents method:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { LLM, type BaseLLMParams } from \"@langchain/core/language_models/llms\";\n", + "import type { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", + "import { GenerationChunk } from \"@langchain/core/outputs\";\n", + "\n", + "interface CustomLLMInput extends BaseLLMParams {\n", + " n: number;\n", + "}\n", + "\n", + "class CustomLLM extends LLM {\n", + " n: number;\n", + "\n", + " constructor(fields: CustomLLMInput) {\n", + " super(fields);\n", + " this.n = fields.n;\n", + " }\n", + "\n", + " _llmType() {\n", + " return \"custom\";\n", + " }\n", + "\n", + " async _call(\n", + " prompt: string,\n", + " options: this[\"ParsedCallOptions\"],\n", + " runManager: CallbackManagerForLLMRun\n", + " ): Promise {\n", + " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", + " // await subRunnable.invoke(params, runManager?.getChild());\n", + " return prompt.slice(0, this.n);\n", + " }\n", + "\n", + " async *_streamResponseChunks(\n", + " prompt: string,\n", + " options: this[\"ParsedCallOptions\"],\n", + " runManager?: CallbackManagerForLLMRun\n", + " ): AsyncGenerator {\n", + " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", + " // await subRunnable.invoke(params, runManager?.getChild());\n", + " for (const letter of prompt.slice(0, this.n)) {\n", + " yield new GenerationChunk({\n", + " text: letter,\n", + " });\n", + " // Trigger the appropriate callback\n", + " await runManager?.handleLLMNewToken(letter);\n", + " }\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now use this as any other LLM:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I am\n" + ] + } + ], + "source": [ + "const llm = new CustomLLM({ n: 4 });\n", + "\n", + "await llm.invoke(\"I am an LLM\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And support streaming:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I\n", + " \n", + "a\n", + "m\n" + ] + } + ], + "source": [ + "const stream = await llm.stream(\"I am an LLM\");\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(chunk);\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"event\": \"on_llm_end\",\n", - " \"data\": {\n", - " \"output\": {\n", - " \"generations\": [\n", - " [\n", - " {\n", - " \"text\": \"I am\",\n", - " \"generationInfo\": {\n", - " \"outputCount\": 1\n", - " }\n", - " }\n", - " ]\n", - " ],\n", - " \"llmOutput\": {\n", - " \"tokenUsage\": {\n", - " \"usedTokens\": 4\n", - " }\n", - " }\n", - " }\n", - " },\n", - " \"run_id\": \"a9ce50e4-f85b-41eb-bcbe-793efc52f9d8\",\n", - " \"name\": \"AdvancedCustomLLM\",\n", - " \"tags\": [],\n", - " \"metadata\": {}\n", - "}\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Richer outputs\n", + "\n", + "If you want to take advantage of LangChain's callback system for functionality like token tracking, you can extend the [`BaseLLM`](https://api.js.langchain.com/classes/langchain_core.language_models_llms.BaseLLM.html) class and implement the lower level\n", + "`_generate` method. Rather than taking a single string as input and a single string output, it can take multiple input strings and map each to multiple string outputs.\n", + "Additionally, it returns a `Generation` output with fields for additional metadata rather than just a string." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\n", + "import { LLMResult } from \"@langchain/core/outputs\";\n", + "import {\n", + " BaseLLM,\n", + " BaseLLMCallOptions,\n", + " BaseLLMParams,\n", + "} from \"@langchain/core/language_models/llms\";\n", + "\n", + "interface AdvancedCustomLLMCallOptions extends BaseLLMCallOptions {}\n", + "\n", + "interface AdvancedCustomLLMParams extends BaseLLMParams {\n", + " n: number;\n", + "}\n", + "\n", + "class AdvancedCustomLLM extends BaseLLM {\n", + " n: number;\n", + "\n", + " constructor(fields: AdvancedCustomLLMParams) {\n", + " super(fields);\n", + " this.n = fields.n;\n", + " }\n", + "\n", + " _llmType() {\n", + " return \"advanced_custom_llm\";\n", + " }\n", + "\n", + " async _generate(\n", + " inputs: string[],\n", + " options: this[\"ParsedCallOptions\"],\n", + " runManager?: CallbackManagerForLLMRun\n", + " ): Promise {\n", + " const outputs = inputs.map((input) => input.slice(0, this.n));\n", + " // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing\n", + " // await subRunnable.invoke(params, runManager?.getChild());\n", + "\n", + " // One input could generate multiple outputs.\n", + " const generations = outputs.map((output) => [\n", + " {\n", + " text: output,\n", + " // Optional additional metadata for the generation\n", + " generationInfo: { outputCount: 1 },\n", + " },\n", + " ]);\n", + " const tokenUsage = {\n", + " usedTokens: this.n,\n", + " };\n", + " return {\n", + " generations,\n", + " llmOutput: { tokenUsage },\n", + " };\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This will pass the additional returned information in callback events and in the `streamEvents method:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"event\": \"on_llm_end\",\n", + " \"data\": {\n", + " \"output\": {\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"I am\",\n", + " \"generationInfo\": {\n", + " \"outputCount\": 1\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llmOutput\": {\n", + " \"tokenUsage\": {\n", + " \"usedTokens\": 4\n", + " }\n", + " }\n", + " }\n", + " },\n", + " \"run_id\": \"a9ce50e4-f85b-41eb-bcbe-793efc52f9d8\",\n", + " \"name\": \"AdvancedCustomLLM\",\n", + " \"tags\": [],\n", + " \"metadata\": {}\n", + "}\n" + ] + } + ], + "source": [ + "const llm = new AdvancedCustomLLM({ n: 4 });\n", + "\n", + "const eventStream = await llm.streamEvents(\"I am an LLM\", {\n", + " version: \"v2\",\n", + "});\n", + "\n", + "for await (const event of eventStream) {\n", + " if (event.event === \"on_llm_end\") {\n", + " console.log(JSON.stringify(event, null, 2));\n", + " }\n", + "}" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const llm = new AdvancedCustomLLM({ n: 4 });\n", - "\n", - "const eventStream = await llm.streamEvents(\"I am an LLM\", {\n", - " version: \"v2\",\n", - "});\n", - "\n", - "for await (const event of eventStream) {\n", - " if (event.event === \"on_llm_end\") {\n", - " console.log(JSON.stringify(event, null, 2));\n", - " }\n", - "}" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/custom_retriever.mdx b/docs/core_docs/docs/how_to/custom_retriever.mdx index 28edfff6bbf9..acf7906242c9 100644 --- a/docs/core_docs/docs/how_to/custom_retriever.mdx +++ b/docs/core_docs/docs/how_to/custom_retriever.mdx @@ -4,7 +4,7 @@ This guide assumes familiarity with the following concepts: -- [Retrievers](/docs/concepts/#retrievers) +- [Retrievers](/docs/concepts/retrievers) ::: diff --git a/docs/core_docs/docs/how_to/custom_tools.ipynb b/docs/core_docs/docs/how_to/custom_tools.ipynb index 95f68259ac03..fe47c3b21368 100644 --- a/docs/core_docs/docs/how_to/custom_tools.ipynb +++ b/docs/core_docs/docs/how_to/custom_tools.ipynb @@ -1,411 +1,411 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "04171ad7", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "keywords: [custom tool, custom tools]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "5436020b", - "metadata": {}, - "source": [ - "# How to create Tools\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain tools](/docs/concepts#tools)\n", - "- [Agents](/docs/concepts/#agents)\n", - "\n", - ":::\n", - "\n", - "When constructing your own agent, you will need to provide it with a list of Tools that it can use. While LangChain includes some prebuilt tools, it can often be more useful to use tools that use custom logic. This guide will walk you through some ways you can create custom tools.\n", - "\n", - "The biggest difference here is that the first function requires an object with multiple input fields, while the second one only accepts an object with a single field. Some older agents only work with functions that require single inputs, so it's important to understand the distinction.\n", - "\n", - "LangChain has a handful of ways to construct tools for different applications. Below I'll show the two most common ways to create tools, and where you might use each." - ] - }, - { - "cell_type": "markdown", - "id": "82bb159d", - "metadata": {}, - "source": [ - "## Tool schema\n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "Only available in `@langchain/core` version 0.2.19 and above.\n", - ":::\n", - "```\n", - "\n", - "The simplest way to create a tool is through the [`StructuredToolParams`](https://api.js.langchain.com/interfaces/_langchain_core.tools.StructuredToolParams.html) schema. Every chat model which supports tool calling in LangChain accepts binding tools to the model through this schema. This schema has only three fields\n", - "\n", - "- `name` - The name of the tool.\n", - "- `schema` - The schema of the tool, defined with a Zod object.\n", - "- `description` (optional) - A description of the tool.\n", - "\n", - "This schema does not include a function to pair with the tool, and for this reason it should only be used in situations where the generated output does not need to be passed as the input argument to a function." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4d129789", - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { StructuredToolParams } from \"@langchain/core/tools\";\n", - "\n", - "const simpleToolSchema: StructuredToolParams = {\n", - " name: \"get_current_weather\",\n", - " description: \"Get the current weather for a location\",\n", - " schema: z.object({\n", - " city: z.string().describe(\"The city to get the weather for\"),\n", - " state: z.string().optional().describe(\"The state to get the weather for\"),\n", - " })\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "f6ec6ee8", - "metadata": {}, - "source": [ - "## `tool` function\n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "Only available in `@langchain/core` version 0.2.7 and above.\n", - ":::\n", - "```\n", - "\n", - "The [`tool`](https://api.js.langchain.com/classes/langchain_core.tools.Tool.html) wrapper function is a convenience method for turning a JavaScript function into a tool. It requires the function itself along with some additional arguments that define your tool. You should use this over `StructuredToolParams` tools when the resulting tool call executes a function. The most important are:\n", - "\n", - "- The tool's `name`, which the LLM will use as context as well as to reference the tool\n", - "- An optional, but recommended `description`, which the LLM will use as context to know when to use the tool\n", - "- A `schema`, which defines the shape of the tool's input\n", - "\n", - "The `tool` function will return an instance of the [`StructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.StructuredTool.html) class, so it is compatible with all the existing tool calling infrastructure in the LangChain library." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "ecc1ce9d", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "\u001b[32m\"The sum of 1 and 2 is 3\"\u001b[39m" + "cell_type": "raw", + "id": "04171ad7", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [custom tool, custom tools]\n", + "---" ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const adderSchema = z.object({\n", - " a: z.number(),\n", - " b: z.number(),\n", - "});\n", - "const adderTool = tool(async (input): Promise => {\n", - " const sum = input.a + input.b;\n", - " return `The sum of ${input.a} and ${input.b} is ${sum}`;\n", - "}, {\n", - " name: \"adder\",\n", - " description: \"Adds two numbers together\",\n", - " schema: adderSchema,\n", - "});\n", - "\n", - "await adderTool.invoke({ a: 1, b: 2 });" - ] - }, - { - "cell_type": "markdown", - "id": "213ee344", - "metadata": {}, - "source": [ - "## `DynamicStructuredTool`\n", - "\n", - "You can also use the [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) class to declare tools. Here's an example - note that tools must always return strings!" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "833dda4a", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "\u001b[32m\"72\"\u001b[39m" + "cell_type": "markdown", + "id": "5436020b", + "metadata": {}, + "source": [ + "# How to create Tools\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain tools](/docs/concepts/tools)\n", + "- [Agents](/docs/concepts/agents)\n", + "\n", + ":::\n", + "\n", + "When constructing your own agent, you will need to provide it with a list of Tools that it can use. While LangChain includes some prebuilt tools, it can often be more useful to use tools that use custom logic. This guide will walk you through some ways you can create custom tools.\n", + "\n", + "The biggest difference here is that the first function requires an object with multiple input fields, while the second one only accepts an object with a single field. Some older agents only work with functions that require single inputs, so it's important to understand the distinction.\n", + "\n", + "LangChain has a handful of ways to construct tools for different applications. Below I'll show the two most common ways to create tools, and where you might use each." ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { DynamicStructuredTool } from \"@langchain/core/tools\";\n", - "import { z } from \"zod\";\n", - "\n", - "const multiplyTool = new DynamicStructuredTool({\n", - " name: \"multiply\",\n", - " description: \"multiply two numbers together\",\n", - " schema: z.object({\n", - " a: z.number().describe(\"the first number to multiply\"),\n", - " b: z.number().describe(\"the second number to multiply\"),\n", - " }),\n", - " func: async ({ a, b }: { a: number; b: number; }) => {\n", - " return (a * b).toString();\n", - " },\n", - "});\n", - "\n", - "await multiplyTool.invoke({ a: 8, b: 9, });" - ] - }, - { - "cell_type": "markdown", - "id": "c7326b23", - "metadata": {}, - "source": [ - "## `DynamicTool`\n", - "\n", - "For older agents that require tools which accept only a single input, you can pass the relevant parameters to the [`DynamicTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicTool.html) class. This is useful when working with older agents that only support tools that accept a single input. In this case, no schema is required:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "b0ce7de8", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "\u001b[32m\"LangChain\"\u001b[39m" + "cell_type": "markdown", + "id": "82bb159d", + "metadata": {}, + "source": [ + "## Tool schema\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "Only available in `@langchain/core` version 0.2.19 and above.\n", + ":::\n", + "```\n", + "\n", + "The simplest way to create a tool is through the [`StructuredToolParams`](https://api.js.langchain.com/interfaces/_langchain_core.tools.StructuredToolParams.html) schema. Every chat model which supports tool calling in LangChain accepts binding tools to the model through this schema. This schema has only three fields\n", + "\n", + "- `name` - The name of the tool.\n", + "- `schema` - The schema of the tool, defined with a Zod object.\n", + "- `description` (optional) - A description of the tool.\n", + "\n", + "This schema does not include a function to pair with the tool, and for this reason it should only be used in situations where the generated output does not need to be passed as the input argument to a function." ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { DynamicTool } from \"@langchain/core/tools\";\n", - "\n", - "const searchTool = new DynamicTool({\n", - " name: \"search\",\n", - " description: \"look things up online\",\n", - " func: async (_input: string) => {\n", - " return \"LangChain\";\n", - " },\n", - "});\n", - "\n", - "await searchTool.invoke(\"foo\");" - ] - }, - { - "cell_type": "markdown", - "id": "d4093dea", - "metadata": {}, - "source": [ - "# Returning artifacts of Tool execution\n", - "\n", - "Sometimes there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns custom objects like Documents, we may want to pass some view or metadata about this output to the model without passing the raw output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n", - "\n", - "The Tool and `ToolMessage` interfaces make it possible to distinguish between the parts of the tool output meant for the model (`ToolMessage.content`) and those parts which are meant for use outside the model (`ToolMessage.artifact`).\n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "This functionality was added in `@langchain/core>=0.2.16`. Please make sure your package is up to date.\n", - ":::\n", - "```\n", - "\n", - "If you want your tool to distinguish between message content and other artifacts, we need to do three things:\n", - "\n", - "- Set the `response_format` parameter to `\"content_and_artifact\"` when defining the tool.\n", - "- Make sure that we return a tuple of `[content, artifact]`.\n", - "- Call the tool with a a [`ToolCall`](https://api.js.langchain.com/types/langchain_core.messages_tool.ToolCall.html) (like the ones generated by tool-calling models) rather than with the required schema directly.\n", - "\n", - "Here's an example of what this looks like. First, create a new tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "ecf15c35", - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const randomIntToolSchema = z.object({\n", - " min: z.number(),\n", - " max: z.number(),\n", - " size: z.number(),\n", - "});\n", - "\n", - "const generateRandomInts = tool(async ({ min, max, size }) => {\n", - " const array: number[] = [];\n", - " for (let i = 0; i < size; i++) {\n", - " array.push(Math.floor(Math.random() * (max - min + 1)) + min);\n", - " }\n", - " return [\n", - " `Successfully generated array of ${size} random ints in [${min}, ${max}].`,\n", - " array,\n", - " ];\n", - "}, {\n", - " name: \"generateRandomInts\",\n", - " description: \"Generate size random ints in the range [min, max].\",\n", - " schema: randomIntToolSchema,\n", - " responseFormat: \"content_and_artifact\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "5775e686", - "metadata": {}, - "source": [ - "If you invoke our tool directly with the tool arguments, you'll get back just the `content` part of the output:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "ecbde6de", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "\u001b[32m\"Successfully generated array of 10 random ints in [0, 9].\"\u001b[39m" + "cell_type": "code", + "execution_count": null, + "id": "4d129789", + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { StructuredToolParams } from \"@langchain/core/tools\";\n", + "\n", + "const simpleToolSchema: StructuredToolParams = {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather for a location\",\n", + " schema: z.object({\n", + " city: z.string().describe(\"The city to get the weather for\"),\n", + " state: z.string().optional().describe(\"The state to get the weather for\"),\n", + " })\n", + "}" ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await generateRandomInts.invoke({ min: 0, max: 9, size: 10 });" - ] - }, - { - "cell_type": "markdown", - "id": "6299ef60", - "metadata": {}, - "source": [ - "But if you invoke our tool with a `ToolCall`, you'll get back a ToolMessage that contains both the content and artifact generated by the `Tool`:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "05209573", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "id": "f6ec6ee8", + "metadata": {}, + "source": [ + "## `tool` function\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "Only available in `@langchain/core` version 0.2.7 and above.\n", + ":::\n", + "```\n", + "\n", + "The [`tool`](https://api.js.langchain.com/classes/langchain_core.tools.Tool.html) wrapper function is a convenience method for turning a JavaScript function into a tool. It requires the function itself along with some additional arguments that define your tool. You should use this over `StructuredToolParams` tools when the resulting tool call executes a function. The most important are:\n", + "\n", + "- The tool's `name`, which the LLM will use as context as well as to reference the tool\n", + "- An optional, but recommended `description`, which the LLM will use as context to know when to use the tool\n", + "- A `schema`, which defines the shape of the tool's input\n", + "\n", + "The `tool` function will return an instance of the [`StructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.StructuredTool.html) class, so it is compatible with all the existing tool calling infrastructure in the LangChain library." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "ecc1ce9d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"The sum of 1 and 2 is 3\"\u001b[39m" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const adderSchema = z.object({\n", + " a: z.number(),\n", + " b: z.number(),\n", + "});\n", + "const adderTool = tool(async (input): Promise => {\n", + " const sum = input.a + input.b;\n", + " return `The sum of ${input.a} and ${input.b} is ${sum}`;\n", + "}, {\n", + " name: \"adder\",\n", + " description: \"Adds two numbers together\",\n", + " schema: adderSchema,\n", + "});\n", + "\n", + "await adderTool.invoke({ a: 1, b: 2 });" + ] + }, { - "data": { - "text/plain": [ - "ToolMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Successfully generated array of 10 random ints in [0, 9].\"\u001b[39m,\n", - " artifact: [\n", - " \u001b[33m7\u001b[39m, \u001b[33m7\u001b[39m, \u001b[33m1\u001b[39m, \u001b[33m4\u001b[39m, \u001b[33m8\u001b[39m,\n", - " \u001b[33m4\u001b[39m, \u001b[33m8\u001b[39m, \u001b[33m3\u001b[39m, \u001b[33m0\u001b[39m, \u001b[33m9\u001b[39m\n", - " ],\n", - " tool_call_id: \u001b[32m\"123\"\u001b[39m,\n", - " name: \u001b[32m\"generateRandomInts\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Successfully generated array of 10 random ints in [0, 9].\"\u001b[39m,\n", - " name: \u001b[32m\"generateRandomInts\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: \u001b[90mundefined\u001b[39m,\n", - " tool_call_id: \u001b[32m\"123\"\u001b[39m,\n", - " artifact: [\n", - " \u001b[33m7\u001b[39m, \u001b[33m7\u001b[39m, \u001b[33m1\u001b[39m, \u001b[33m4\u001b[39m, \u001b[33m8\u001b[39m,\n", - " \u001b[33m4\u001b[39m, \u001b[33m8\u001b[39m, \u001b[33m3\u001b[39m, \u001b[33m0\u001b[39m, \u001b[33m9\u001b[39m\n", - " ]\n", - "}" + "cell_type": "markdown", + "id": "213ee344", + "metadata": {}, + "source": [ + "## `DynamicStructuredTool`\n", + "\n", + "You can also use the [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) class to declare tools. Here's an example - note that tools must always return strings!" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "833dda4a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"72\"\u001b[39m" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { DynamicStructuredTool } from \"@langchain/core/tools\";\n", + "import { z } from \"zod\";\n", + "\n", + "const multiplyTool = new DynamicStructuredTool({\n", + " name: \"multiply\",\n", + " description: \"multiply two numbers together\",\n", + " schema: z.object({\n", + " a: z.number().describe(\"the first number to multiply\"),\n", + " b: z.number().describe(\"the second number to multiply\"),\n", + " }),\n", + " func: async ({ a, b }: { a: number; b: number; }) => {\n", + " return (a * b).toString();\n", + " },\n", + "});\n", + "\n", + "await multiplyTool.invoke({ a: 8, b: 9, });" + ] + }, + { + "cell_type": "markdown", + "id": "c7326b23", + "metadata": {}, + "source": [ + "## `DynamicTool`\n", + "\n", + "For older agents that require tools which accept only a single input, you can pass the relevant parameters to the [`DynamicTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicTool.html) class. This is useful when working with older agents that only support tools that accept a single input. In this case, no schema is required:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "b0ce7de8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"LangChain\"\u001b[39m" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { DynamicTool } from \"@langchain/core/tools\";\n", + "\n", + "const searchTool = new DynamicTool({\n", + " name: \"search\",\n", + " description: \"look things up online\",\n", + " func: async (_input: string) => {\n", + " return \"LangChain\";\n", + " },\n", + "});\n", + "\n", + "await searchTool.invoke(\"foo\");" + ] + }, + { + "cell_type": "markdown", + "id": "d4093dea", + "metadata": {}, + "source": [ + "# Returning artifacts of Tool execution\n", + "\n", + "Sometimes there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns custom objects like Documents, we may want to pass some view or metadata about this output to the model without passing the raw output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n", + "\n", + "The Tool and `ToolMessage` interfaces make it possible to distinguish between the parts of the tool output meant for the model (`ToolMessage.content`) and those parts which are meant for use outside the model (`ToolMessage.artifact`).\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "This functionality was added in `@langchain/core>=0.2.16`. Please make sure your package is up to date.\n", + ":::\n", + "```\n", + "\n", + "If you want your tool to distinguish between message content and other artifacts, we need to do three things:\n", + "\n", + "- Set the `response_format` parameter to `\"content_and_artifact\"` when defining the tool.\n", + "- Make sure that we return a tuple of `[content, artifact]`.\n", + "- Call the tool with a a [`ToolCall`](https://api.js.langchain.com/types/langchain_core.messages_tool.ToolCall.html) (like the ones generated by tool-calling models) rather than with the required schema directly.\n", + "\n", + "Here's an example of what this looks like. First, create a new tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ecf15c35", + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const randomIntToolSchema = z.object({\n", + " min: z.number(),\n", + " max: z.number(),\n", + " size: z.number(),\n", + "});\n", + "\n", + "const generateRandomInts = tool(async ({ min, max, size }) => {\n", + " const array: number[] = [];\n", + " for (let i = 0; i < size; i++) {\n", + " array.push(Math.floor(Math.random() * (max - min + 1)) + min);\n", + " }\n", + " return [\n", + " `Successfully generated array of ${size} random ints in [${min}, ${max}].`,\n", + " array,\n", + " ];\n", + "}, {\n", + " name: \"generateRandomInts\",\n", + " description: \"Generate size random ints in the range [min, max].\",\n", + " schema: randomIntToolSchema,\n", + " responseFormat: \"content_and_artifact\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "5775e686", + "metadata": {}, + "source": [ + "If you invoke our tool directly with the tool arguments, you'll get back just the `content` part of the output:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "ecbde6de", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Successfully generated array of 10 random ints in [0, 9].\"\u001b[39m" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await generateRandomInts.invoke({ min: 0, max: 9, size: 10 });" + ] + }, + { + "cell_type": "markdown", + "id": "6299ef60", + "metadata": {}, + "source": [ + "But if you invoke our tool with a `ToolCall`, you'll get back a ToolMessage that contains both the content and artifact generated by the `Tool`:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "05209573", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ToolMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"Successfully generated array of 10 random ints in [0, 9].\"\u001b[39m,\n", + " artifact: [\n", + " \u001b[33m7\u001b[39m, \u001b[33m7\u001b[39m, \u001b[33m1\u001b[39m, \u001b[33m4\u001b[39m, \u001b[33m8\u001b[39m,\n", + " \u001b[33m4\u001b[39m, \u001b[33m8\u001b[39m, \u001b[33m3\u001b[39m, \u001b[33m0\u001b[39m, \u001b[33m9\u001b[39m\n", + " ],\n", + " tool_call_id: \u001b[32m\"123\"\u001b[39m,\n", + " name: \u001b[32m\"generateRandomInts\"\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"Successfully generated array of 10 random ints in [0, 9].\"\u001b[39m,\n", + " name: \u001b[32m\"generateRandomInts\"\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: \u001b[90mundefined\u001b[39m,\n", + " tool_call_id: \u001b[32m\"123\"\u001b[39m,\n", + " artifact: [\n", + " \u001b[33m7\u001b[39m, \u001b[33m7\u001b[39m, \u001b[33m1\u001b[39m, \u001b[33m4\u001b[39m, \u001b[33m8\u001b[39m,\n", + " \u001b[33m4\u001b[39m, \u001b[33m8\u001b[39m, \u001b[33m3\u001b[39m, \u001b[33m0\u001b[39m, \u001b[33m9\u001b[39m\n", + " ]\n", + "}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await generateRandomInts.invoke({\n", + " name: \"generateRandomInts\",\n", + " args: { min: 0, max: 9, size: 10 },\n", + " id: \"123\", // required\n", + " type: \"tool_call\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "8eceaf09", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "You've now seen a few ways to create custom tools in LangChain.\n", + "\n", + "Next, you might be interested in learning [how to use a chat model to call tools](/docs/how_to/tool_calling/).\n", + "\n", + "You can also check out how to create your own [custom versions of other modules](/docs/how_to/#custom)." ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "await generateRandomInts.invoke({\n", - " name: \"generateRandomInts\",\n", - " args: { min: 0, max: 9, size: 10 },\n", - " id: \"123\", // required\n", - " type: \"tool_call\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "8eceaf09", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "You've now seen a few ways to create custom tools in LangChain.\n", - "\n", - "Next, you might be interested in learning [how to use a chat model to call tools](/docs/how_to/tool_calling/).\n", - "\n", - "You can also check out how to create your own [custom versions of other modules](/docs/how_to/#custom)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" - }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + }, + "vscode": { + "interpreter": { + "hash": "e90c8aa204a57276aa905271aff2d11799d0acb3547adabc5892e639a5e45e34" + } + } }, - "vscode": { - "interpreter": { - "hash": "e90c8aa204a57276aa905271aff2d11799d0acb3547adabc5892e639a5e45e34" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/document_loader_html.ipynb b/docs/core_docs/docs/how_to/document_loader_html.ipynb index 177518ec6c7f..f4d020072658 100644 --- a/docs/core_docs/docs/how_to/document_loader_html.ipynb +++ b/docs/core_docs/docs/how_to/document_loader_html.ipynb @@ -1,176 +1,176 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "0c6c50fc-15e1-4767-925a-53a37c430b9b", - "metadata": {}, - "source": [ - "# How to load HTML\n", - "\n", - "The HyperText Markup Language or [HTML](https://en.wikipedia.org/wiki/HTML) is the standard markup language for documents designed to be displayed in a web browser.\n", - "\n", - "This covers how to load `HTML` documents into a LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects that we can use downstream.\n", - "\n", - "Parsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://unstructured-io.github.io/unstructured/). Head over to the integrations page to find integrations with additional services, such as [FireCrawl](/docs/integrations/document_loaders/web_loaders/firecrawl).\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Documents](/docs/concepts#document)\n", - "- [Document Loaders](/docs/concepts#document-loaders)\n", - "\n", - ":::\n", - "\n", - "## Installation\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "868cfb85", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "Although Unstructured has an open source offering, you're still required to provide an API key to access the service. To get everything up and running, follow these two steps:\n", - "\n", - "1. Download & start the Docker container:\n", - " \n", - "```bash\n", - "docker run -p 8000:8000 -d --rm --name unstructured-api downloads.unstructured.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0\n", - "```\n", - "\n", - "2. Get a free API key & API URL [here](https://unstructured.io/api-key), and set it in your environment (as per the Unstructured website, it may take up to an hour to allocate your API key & URL.):\n", - "\n", - "```bash\n", - "export UNSTRUCTURED_API_KEY=\"...\"\n", - "# Replace with your `Full URL` from the email\n", - "export UNSTRUCTURED_API_URL=\"https://-.api.unstructuredapp.io/general/v0/general\" \n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a4d93b2e", - "metadata": {}, - "source": [ - "## Loading HTML with Unstructured" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "7d167ca3-c7c7-4ef0-b509-080629f0f482", - "metadata": {}, - "outputs": [ + "cells": [ + { + "cell_type": "markdown", + "id": "0c6c50fc-15e1-4767-925a-53a37c430b9b", + "metadata": {}, + "source": [ + "# How to load HTML\n", + "\n", + "The HyperText Markup Language or [HTML](https://en.wikipedia.org/wiki/HTML) is the standard markup language for documents designed to be displayed in a web browser.\n", + "\n", + "This covers how to load `HTML` documents into a LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects that we can use downstream.\n", + "\n", + "Parsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://unstructured-io.github.io/unstructured/). Head over to the integrations page to find integrations with additional services, such as [FireCrawl](/docs/integrations/document_loaders/web_loaders/firecrawl).\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Documents](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html)\n", + "- [Document Loaders](/docs/concepts/document_loaders)\n", + "\n", + ":::\n", + "\n", + "## Installation\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'Word of the Day',\n", - " metadata: {\n", - " category_depth: 0,\n", - " languages: [Array],\n", - " filename: 'wordoftheday.html',\n", - " filetype: 'text/html',\n", - " category: 'Title'\n", - " }\n", - " },\n", - " Document {\n", - " pageContent: ': April 10, 2023',\n", - " metadata: {\n", - " emphasized_text_contents: [Array],\n", - " emphasized_text_tags: [Array],\n", - " languages: [Array],\n", - " parent_id: 'b845e60d85ff7d10abda4e5f9a37eec8',\n", - " filename: 'wordoftheday.html',\n", - " filetype: 'text/html',\n", - " category: 'UncategorizedText'\n", - " }\n", - " },\n", - " Document {\n", - " pageContent: 'foible',\n", - " metadata: {\n", - " category_depth: 1,\n", - " languages: [Array],\n", - " parent_id: 'b845e60d85ff7d10abda4e5f9a37eec8',\n", - " filename: 'wordoftheday.html',\n", - " filetype: 'text/html',\n", - " category: 'Title'\n", - " }\n", - " },\n", - " Document {\n", - " pageContent: 'play',\n", - " metadata: {\n", - " category_depth: 0,\n", - " link_texts: [Array],\n", - " link_urls: [Array],\n", - " link_start_indexes: [Array],\n", - " languages: [Array],\n", - " filename: 'wordoftheday.html',\n", - " filetype: 'text/html',\n", - " category: 'Title'\n", - " }\n", - " },\n", - " Document {\n", - " pageContent: 'noun',\n", - " metadata: {\n", - " category_depth: 0,\n", - " emphasized_text_contents: [Array],\n", - " emphasized_text_tags: [Array],\n", - " languages: [Array],\n", - " filename: 'wordoftheday.html',\n", - " filetype: 'text/html',\n", - " category: 'Title'\n", - " }\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "868cfb85", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "Although Unstructured has an open source offering, you're still required to provide an API key to access the service. To get everything up and running, follow these two steps:\n", + "\n", + "1. Download & start the Docker container:\n", + " \n", + "```bash\n", + "docker run -p 8000:8000 -d --rm --name unstructured-api downloads.unstructured.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0\n", + "```\n", + "\n", + "2. Get a free API key & API URL [here](https://unstructured.io/api-key), and set it in your environment (as per the Unstructured website, it may take up to an hour to allocate your API key & URL.):\n", + "\n", + "```bash\n", + "export UNSTRUCTURED_API_KEY=\"...\"\n", + "# Replace with your `Full URL` from the email\n", + "export UNSTRUCTURED_API_URL=\"https://-.api.unstructuredapp.io/general/v0/general\" \n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a4d93b2e", + "metadata": {}, + "source": [ + "## Loading HTML with Unstructured" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "7d167ca3-c7c7-4ef0-b509-080629f0f482", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'Word of the Day',\n", + " metadata: {\n", + " category_depth: 0,\n", + " languages: [Array],\n", + " filename: 'wordoftheday.html',\n", + " filetype: 'text/html',\n", + " category: 'Title'\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: ': April 10, 2023',\n", + " metadata: {\n", + " emphasized_text_contents: [Array],\n", + " emphasized_text_tags: [Array],\n", + " languages: [Array],\n", + " parent_id: 'b845e60d85ff7d10abda4e5f9a37eec8',\n", + " filename: 'wordoftheday.html',\n", + " filetype: 'text/html',\n", + " category: 'UncategorizedText'\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: 'foible',\n", + " metadata: {\n", + " category_depth: 1,\n", + " languages: [Array],\n", + " parent_id: 'b845e60d85ff7d10abda4e5f9a37eec8',\n", + " filename: 'wordoftheday.html',\n", + " filetype: 'text/html',\n", + " category: 'Title'\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: 'play',\n", + " metadata: {\n", + " category_depth: 0,\n", + " link_texts: [Array],\n", + " link_urls: [Array],\n", + " link_start_indexes: [Array],\n", + " languages: [Array],\n", + " filename: 'wordoftheday.html',\n", + " filetype: 'text/html',\n", + " category: 'Title'\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: 'noun',\n", + " metadata: {\n", + " category_depth: 0,\n", + " emphasized_text_contents: [Array],\n", + " emphasized_text_tags: [Array],\n", + " languages: [Array],\n", + " filename: 'wordoftheday.html',\n", + " filetype: 'text/html',\n", + " category: 'Title'\n", + " }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { UnstructuredLoader } from \"@langchain/community/document_loaders/fs/unstructured\";\n", + "\n", + "const filePath = \"../../../../libs/langchain-community/src/tools/fixtures/wordoftheday.html\"\n", + "\n", + "const loader = new UnstructuredLoader(filePath, {\n", + " apiKey: process.env.UNSTRUCTURED_API_KEY,\n", + " apiUrl: process.env.UNSTRUCTURED_API_URL,\n", + "});\n", + "\n", + "const data = await loader.load()\n", + "console.log(data.slice(0, 5));" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { UnstructuredLoader } from \"@langchain/community/document_loaders/fs/unstructured\";\n", - "\n", - "const filePath = \"../../../../libs/langchain-community/src/tools/fixtures/wordoftheday.html\"\n", - "\n", - "const loader = new UnstructuredLoader(filePath, {\n", - " apiKey: process.env.UNSTRUCTURED_API_KEY,\n", - " apiUrl: process.env.UNSTRUCTURED_API_URL,\n", - "});\n", - "\n", - "const data = await loader.load()\n", - "console.log(data.slice(0, 5));" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/document_loader_markdown.ipynb b/docs/core_docs/docs/how_to/document_loader_markdown.ipynb index 51b130e27d64..a667c0830408 100644 --- a/docs/core_docs/docs/how_to/document_loader_markdown.ipynb +++ b/docs/core_docs/docs/how_to/document_loader_markdown.ipynb @@ -1,295 +1,295 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "d836a98a-ad14-4bed-af76-e1877f7ef8a4", - "metadata": {}, - "source": [ - "# How to load Markdown\n", - "\n", - "[Markdown](https://en.wikipedia.org/wiki/Markdown) is a lightweight markup language for creating formatted text using a plain-text editor.\n", - "\n", - "Here we cover how to load `Markdown` documents into LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects that we can use downstream.\n", - "\n", - "We will cover:\n", - "\n", - "- Basic usage;\n", - "- Parsing of Markdown into elements such as titles, list items, and text.\n", - "\n", - "LangChain implements an [UnstructuredLoader](https://api.js.langchain.com/classes/langchain.document_loaders_fs_unstructured.UnstructuredLoader.html) class.\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Documents](/docs/concepts#document)\n", - "- [Document Loaders](/docs/concepts#document-loaders)\n", - "\n", - ":::\n", - "\n", - "## Installation\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "897a69e9", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "Although Unstructured has an open source offering, you're still required to provide an API key to access the service. To get everything up and running, follow these two steps:\n", - "\n", - "1. Download & start the Docker container:\n", - " \n", - "```bash\n", - "docker run -p 8000:8000 -d --rm --name unstructured-api downloads.unstructured.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0\n", - "```\n", - "\n", - "2. Get a free API key & API URL [here](https://unstructured.io/api-key), and set it in your environment (as per the Unstructured website, it may take up to an hour to allocate your API key & URL.):\n", - "\n", - "```bash\n", - "export UNSTRUCTURED_API_KEY=\"...\"\n", - "# Replace with your `Full URL` from the email\n", - "export UNSTRUCTURED_API_URL=\"https://-.api.unstructuredapp.io/general/v0/general\" \n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "ea8c41f8-a8dc-48cc-b78d-7b3e2427a34c", - "metadata": {}, - "source": [ - "Basic usage will ingest a Markdown file to a single document. Here we demonstrate on LangChain's readme:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "80c50cc4-7ce9-4418-81b9-29c52c7b3627", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: '🦜️🔗 LangChain.js',\n", - " metadata: {\n", - " languages: [Array],\n", - " filename: 'README.md',\n", - " filetype: 'text/markdown',\n", - " category: 'Title'\n", - " }\n", - " },\n", - " Document {\n", - " pageContent: '⚡ Building applications with LLMs through composability ⚡',\n", - " metadata: {\n", - " languages: [Array],\n", - " filename: 'README.md',\n", - " filetype: 'text/markdown',\n", - " category: 'Title'\n", - " }\n", - " },\n", - " Document {\n", - " pageContent: 'Looking for the Python version? Check out LangChain.',\n", - " metadata: {\n", - " languages: [Array],\n", - " parent_id: '7ea17bcb17b10f303cbb93b4cb95de93',\n", - " filename: 'README.md',\n", - " filetype: 'text/markdown',\n", - " category: 'NarrativeText'\n", - " }\n", - " },\n", - " Document {\n", - " pageContent: 'To help you ship LangChain apps to production faster, check out LangSmith.\\n' +\n", - " 'LangSmith is a unified developer platform for building, testing, and monitoring LLM applications.\\n' +\n", - " 'Fill out this form to get on the waitlist or speak with our sales team.',\n", - " metadata: {\n", - " languages: [Array],\n", - " parent_id: '7ea17bcb17b10f303cbb93b4cb95de93',\n", - " filename: 'README.md',\n", - " filetype: 'text/markdown',\n", - " category: 'NarrativeText'\n", - " }\n", - " },\n", - " Document {\n", - " pageContent: '⚡️ Quick Install',\n", - " metadata: {\n", - " languages: [Array],\n", - " filename: 'README.md',\n", - " filetype: 'text/markdown',\n", - " category: 'Title'\n", - " }\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { UnstructuredLoader } from \"@langchain/community/document_loaders/fs/unstructured\";\n", - "\n", - "const markdownPath = \"../../../../README.md\";\n", - "\n", - "const loader = new UnstructuredLoader(markdownPath, {\n", - " apiKey: process.env.UNSTRUCTURED_API_KEY,\n", - " apiUrl: process.env.UNSTRUCTURED_API_URL,\n", - "});\n", - "\n", - "const data = await loader.load()\n", - "console.log(data.slice(0, 5));" - ] - }, - { - "cell_type": "markdown", - "id": "b7560a6e-ca5d-47e1-b176-a9c40e763ff3", - "metadata": {}, - "source": [ - "## Retain Elements\n", - "\n", - "Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `chunkingStrategy: \"by_title\"`." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a986bbce-7fd3-41d1-bc47-49f9f57c7cd1", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "d836a98a-ad14-4bed-af76-e1877f7ef8a4", + "metadata": {}, + "source": [ + "# How to load Markdown\n", + "\n", + "[Markdown](https://en.wikipedia.org/wiki/Markdown) is a lightweight markup language for creating formatted text using a plain-text editor.\n", + "\n", + "Here we cover how to load `Markdown` documents into LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects that we can use downstream.\n", + "\n", + "We will cover:\n", + "\n", + "- Basic usage;\n", + "- Parsing of Markdown into elements such as titles, list items, and text.\n", + "\n", + "LangChain implements an [UnstructuredLoader](https://api.js.langchain.com/classes/langchain.document_loaders_fs_unstructured.UnstructuredLoader.html) class.\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Documents](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html)\n", + "- [Document Loaders](/docs/concepts/document_loaders)\n", + "\n", + ":::\n", + "\n", + "## Installation\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Number of documents: 13\n", - "\n", - "Document {\n", - " pageContent: '🦜️🔗 LangChain.js\\n' +\n", - " '\\n' +\n", - " '⚡ Building applications with LLMs through composability ⚡\\n' +\n", - " '\\n' +\n", - " 'Looking for the Python version? Check out LangChain.\\n' +\n", - " '\\n' +\n", - " 'To help you ship LangChain apps to production faster, check out LangSmith.\\n' +\n", - " 'LangSmith is a unified developer platform for building, testing, and monitoring LLM applications.\\n' +\n", - " 'Fill out this form to get on the waitlist or speak with our sales team.',\n", - " metadata: {\n", - " filename: 'README.md',\n", - " filetype: 'text/markdown',\n", - " languages: [ 'eng' ],\n", - " orig_elements: 'eJzNUtuO0zAQ/ZVRnquSS3PjBcGyPHURgr5tV2hijxNTJ45ip0u14t8Zp1y6CCF4ACFLlufuc+bcPkRkqKfBv9cyegpREWNZosxS0RRVzmeTCiFlnmRUFZmQ0QqinjxK9Mj5D5HShgbsKRS/vX7+8uZ63S9ZIeBP4xLw9NE/6XxvQsDg0M7YkuPIbURDG919Wp1zQu5+llVGfMta7GdFsVo8MniSErZcfdWhHtYfXOj2dcROe0MRN/oRUUmYlI1o+EpilcWZaJo6azaiqXNJdfYvEKUFJvBi1kbqoQUcR6MFem0HB/fad7Dd3jjw3WTntgNh+9E6bLTR/gTn4t9CmhHFTc1w80oKSUlTpFWaFKWsVR5nFf0dpOwdcfoDvi+p2Vp7CJQoOzF+gjcn39kBjjQ5ZucZXHUkDmBnf7H3Sy5e4zQxkUfahYY/4UQqVcZJpSpspKqSMslVllWJzDdMC6XVf8jJzkJHZoSTncF1evwOPSiHdWJhnKycRRAQKHSephWIR0y961lW6/3w7Q3aAcI8aKVJgqQjGTvSBKNBz+T3ywaaLwpdgSfnlwcOEno7aG+nsCcW6iP58ohX2phlru94xtKLf9iSB/5d2Ok9smC1Y3sCNxIezpq3M5toiAER9r/a6t1n6BJ/zg==',\n", - " category: 'CompositeElement'\n", - " }\n", - "}\n", - "\n", - "\n", - "Document {\n", - " pageContent: '⚡️ Quick Install\\n' +\n", - " '\\n' +\n", - " 'You can use npm, yarn, or pnpm to install LangChain.js\\n' +\n", - " '\\n' +\n", - " 'npm install -S langchain or yarn add langchain or pnpm add langchain\\n' +\n", - " '\\n' +\n", - " 'typescript\\n' +\n", - " 'import { ChatOpenAI } from \"langchain/chat_models/openai\";\\n' +\n", - " '\\n' +\n", - " '🌐 Supported Environments\\n' +\n", - " '\\n' +\n", - " 'LangChain is written in TypeScript and can be used in:\\n' +\n", - " '\\n' +\n", - " 'Node.js (ESM and CommonJS) - 18.x, 19.x, 20.x\\n' +\n", - " '\\n' +\n", - " 'Cloudflare Workers\\n' +\n", - " '\\n' +\n", - " 'Vercel / Next.js (Browser, Serverless and Edge functions)\\n' +\n", - " '\\n' +\n", - " 'Supabase Edge Functions\\n' +\n", - " '\\n' +\n", - " 'Browser\\n' +\n", - " '\\n' +\n", - " 'Deno',\n", - " metadata: {\n", - " filename: 'README.md',\n", - " filetype: 'text/markdown',\n", - " languages: [ 'eng' ],\n", - " orig_elements: 'eJzNlm1v2zYQx7/KQa9WwE1Iik/qXnWpB2RoM2wOOgx1URzJY6pVogyJTlME/e6j3KZIhgBzULjIG0Li3VH+/e/BfHNdUUc9pfyuDdUzqGzUjUUda1ZbL7R1UQetnNdMK9swVy2g6iljwIzF/7qKbUcJe5qD/1w+f/FqedSH2Ws25E+bnSHTVT5+n/tuNnSYLrZ4QVOxvKkoXVRvPy+++My+663QyNfbSCzCH9vWf4DTNGXsdsE3J563uaOqxP0XIDSxCdobSZIYd9w7JpQlLU3TaKf4YQDK7gbHB8h4m/jvYQseE2wngrTpF/AJx7SAYYRNeYU8QPtFAHhZvnzyHtt09M90W40zHEfM7SWdz0fep0otuUISLBqMjfNFjMYzI6SWFFWQj1CVGf2G++kK5uP9jD7rMgsEGMLd3Z1ad3YfpJHWsubSchGQeNRItUGPElF7wck2hy/9OWbyY7vJ69T2m2HMcA0l3/n3DaXnp/AZ4jj0sK6+AR6XNb/rh0DddDwUL2zX1c97NUpjVAEOxkh0tbOaN1qU1vG8VtYGe6CSuNvpwda+rJEzWG03MzAFWKbLdhzS/FOnvUhcdChlNC6iKBWuJVrCGMhxIaKMP6i4/1fP2+jfGhnaCT6Obc5UHhOcl4+vdhUAmMJuKjiaB0Mo1mcPKmdBvlFWK6ZMaXfNI2ojIvNORMsUHWiSf5cqZ6WOy2SDn5arVzv+k6Hvh/Tb6gk8BW6PrhbAm3kV7Ojqthgv2ymfZurvrQ4hvRLCSaUEj8YG77TzQTNriYv6B/0hPEiHk24oTdGVePhrGD/QOO0LyxRHKZivAxldS41akzXcxELPm/oxJv01jZ46OIazsrHL/i/j8HGicQErGi9p7GiadtWwDBcEcZt8boc0PdlXE9KlAoSkZh4PtUBZ5oRjTAbiSgd3oLn+XZqUYYgOy3Vgh/zrDfK+xA0rqY6GaQrGo5JM1azcgawzjeOa2CMk/przvXMayvXQEA8meEmCsxiDrkO54/iAVvtHSPiC0nA/3tt/AY+igwk=',\n", - " category: 'CompositeElement'\n", - " }\n", - "}\n", - "\n", - "\n" - ] - } - ], - "source": [ - "const loaderByTitle = new UnstructuredLoader(markdownPath, {\n", - " chunkingStrategy: \"by_title\"\n", - "});\n", - "\n", - "\n", - "const loadedDocs = await loaderByTitle.load()\n", - "\n", - "console.log(`Number of documents: ${loadedDocs.length}\\n`)\n", - "\n", - "for (const doc of loadedDocs.slice(0, 2)) {\n", - " console.log(doc);\n", - " console.log(\"\\n\");\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "117dc6b0-9baa-44a2-9d1d-fc38ecf7a233", - "metadata": {}, - "source": [ - "Note that in this case we recover just one distinct element type:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "75abc139-3ded-4e8e-9f21-d0c8ec40fdfc", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "897a69e9", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "Although Unstructured has an open source offering, you're still required to provide an API key to access the service. To get everything up and running, follow these two steps:\n", + "\n", + "1. Download & start the Docker container:\n", + " \n", + "```bash\n", + "docker run -p 8000:8000 -d --rm --name unstructured-api downloads.unstructured.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0\n", + "```\n", + "\n", + "2. Get a free API key & API URL [here](https://unstructured.io/api-key), and set it in your environment (as per the Unstructured website, it may take up to an hour to allocate your API key & URL.):\n", + "\n", + "```bash\n", + "export UNSTRUCTURED_API_KEY=\"...\"\n", + "# Replace with your `Full URL` from the email\n", + "export UNSTRUCTURED_API_URL=\"https://-.api.unstructuredapp.io/general/v0/general\" \n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "ea8c41f8-a8dc-48cc-b78d-7b3e2427a34c", + "metadata": {}, + "source": [ + "Basic usage will ingest a Markdown file to a single document. Here we demonstrate on LangChain's readme:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "80c50cc4-7ce9-4418-81b9-29c52c7b3627", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: '🦜️🔗 LangChain.js',\n", + " metadata: {\n", + " languages: [Array],\n", + " filename: 'README.md',\n", + " filetype: 'text/markdown',\n", + " category: 'Title'\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: '⚡ Building applications with LLMs through composability ⚡',\n", + " metadata: {\n", + " languages: [Array],\n", + " filename: 'README.md',\n", + " filetype: 'text/markdown',\n", + " category: 'Title'\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: 'Looking for the Python version? Check out LangChain.',\n", + " metadata: {\n", + " languages: [Array],\n", + " parent_id: '7ea17bcb17b10f303cbb93b4cb95de93',\n", + " filename: 'README.md',\n", + " filetype: 'text/markdown',\n", + " category: 'NarrativeText'\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: 'To help you ship LangChain apps to production faster, check out LangSmith.\\n' +\n", + " 'LangSmith is a unified developer platform for building, testing, and monitoring LLM applications.\\n' +\n", + " 'Fill out this form to get on the waitlist or speak with our sales team.',\n", + " metadata: {\n", + " languages: [Array],\n", + " parent_id: '7ea17bcb17b10f303cbb93b4cb95de93',\n", + " filename: 'README.md',\n", + " filetype: 'text/markdown',\n", + " category: 'NarrativeText'\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: '⚡️ Quick Install',\n", + " metadata: {\n", + " languages: [Array],\n", + " filename: 'README.md',\n", + " filetype: 'text/markdown',\n", + " category: 'Title'\n", + " }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { UnstructuredLoader } from \"@langchain/community/document_loaders/fs/unstructured\";\n", + "\n", + "const markdownPath = \"../../../../README.md\";\n", + "\n", + "const loader = new UnstructuredLoader(markdownPath, {\n", + " apiKey: process.env.UNSTRUCTURED_API_KEY,\n", + " apiUrl: process.env.UNSTRUCTURED_API_URL,\n", + "});\n", + "\n", + "const data = await loader.load()\n", + "console.log(data.slice(0, 5));" + ] + }, + { + "cell_type": "markdown", + "id": "b7560a6e-ca5d-47e1-b176-a9c40e763ff3", + "metadata": {}, + "source": [ + "## Retain Elements\n", + "\n", + "Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `chunkingStrategy: \"by_title\"`." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Set(1) { 'CompositeElement' }\n" - ] + "cell_type": "code", + "execution_count": 2, + "id": "a986bbce-7fd3-41d1-bc47-49f9f57c7cd1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of documents: 13\n", + "\n", + "Document {\n", + " pageContent: '🦜️🔗 LangChain.js\\n' +\n", + " '\\n' +\n", + " '⚡ Building applications with LLMs through composability ⚡\\n' +\n", + " '\\n' +\n", + " 'Looking for the Python version? Check out LangChain.\\n' +\n", + " '\\n' +\n", + " 'To help you ship LangChain apps to production faster, check out LangSmith.\\n' +\n", + " 'LangSmith is a unified developer platform for building, testing, and monitoring LLM applications.\\n' +\n", + " 'Fill out this form to get on the waitlist or speak with our sales team.',\n", + " metadata: {\n", + " filename: 'README.md',\n", + " filetype: 'text/markdown',\n", + " languages: [ 'eng' ],\n", + " orig_elements: 'eJzNUtuO0zAQ/ZVRnquSS3PjBcGyPHURgr5tV2hijxNTJ45ip0u14t8Zp1y6CCF4ACFLlufuc+bcPkRkqKfBv9cyegpREWNZosxS0RRVzmeTCiFlnmRUFZmQ0QqinjxK9Mj5D5HShgbsKRS/vX7+8uZ63S9ZIeBP4xLw9NE/6XxvQsDg0M7YkuPIbURDG919Wp1zQu5+llVGfMta7GdFsVo8MniSErZcfdWhHtYfXOj2dcROe0MRN/oRUUmYlI1o+EpilcWZaJo6azaiqXNJdfYvEKUFJvBi1kbqoQUcR6MFem0HB/fad7Dd3jjw3WTntgNh+9E6bLTR/gTn4t9CmhHFTc1w80oKSUlTpFWaFKWsVR5nFf0dpOwdcfoDvi+p2Vp7CJQoOzF+gjcn39kBjjQ5ZucZXHUkDmBnf7H3Sy5e4zQxkUfahYY/4UQqVcZJpSpspKqSMslVllWJzDdMC6XVf8jJzkJHZoSTncF1evwOPSiHdWJhnKycRRAQKHSephWIR0y961lW6/3w7Q3aAcI8aKVJgqQjGTvSBKNBz+T3ywaaLwpdgSfnlwcOEno7aG+nsCcW6iP58ohX2phlru94xtKLf9iSB/5d2Ok9smC1Y3sCNxIezpq3M5toiAER9r/a6t1n6BJ/zg==',\n", + " category: 'CompositeElement'\n", + " }\n", + "}\n", + "\n", + "\n", + "Document {\n", + " pageContent: '⚡️ Quick Install\\n' +\n", + " '\\n' +\n", + " 'You can use npm, yarn, or pnpm to install LangChain.js\\n' +\n", + " '\\n' +\n", + " 'npm install -S langchain or yarn add langchain or pnpm add langchain\\n' +\n", + " '\\n' +\n", + " 'typescript\\n' +\n", + " 'import { ChatOpenAI } from \"langchain/chat_models/openai\";\\n' +\n", + " '\\n' +\n", + " '🌐 Supported Environments\\n' +\n", + " '\\n' +\n", + " 'LangChain is written in TypeScript and can be used in:\\n' +\n", + " '\\n' +\n", + " 'Node.js (ESM and CommonJS) - 18.x, 19.x, 20.x\\n' +\n", + " '\\n' +\n", + " 'Cloudflare Workers\\n' +\n", + " '\\n' +\n", + " 'Vercel / Next.js (Browser, Serverless and Edge functions)\\n' +\n", + " '\\n' +\n", + " 'Supabase Edge Functions\\n' +\n", + " '\\n' +\n", + " 'Browser\\n' +\n", + " '\\n' +\n", + " 'Deno',\n", + " metadata: {\n", + " filename: 'README.md',\n", + " filetype: 'text/markdown',\n", + " languages: [ 'eng' ],\n", + " orig_elements: 'eJzNlm1v2zYQx7/KQa9WwE1Iik/qXnWpB2RoM2wOOgx1URzJY6pVogyJTlME/e6j3KZIhgBzULjIG0Li3VH+/e/BfHNdUUc9pfyuDdUzqGzUjUUda1ZbL7R1UQetnNdMK9swVy2g6iljwIzF/7qKbUcJe5qD/1w+f/FqedSH2Ws25E+bnSHTVT5+n/tuNnSYLrZ4QVOxvKkoXVRvPy+++My+663QyNfbSCzCH9vWf4DTNGXsdsE3J563uaOqxP0XIDSxCdobSZIYd9w7JpQlLU3TaKf4YQDK7gbHB8h4m/jvYQseE2wngrTpF/AJx7SAYYRNeYU8QPtFAHhZvnzyHtt09M90W40zHEfM7SWdz0fep0otuUISLBqMjfNFjMYzI6SWFFWQj1CVGf2G++kK5uP9jD7rMgsEGMLd3Z1ad3YfpJHWsubSchGQeNRItUGPElF7wck2hy/9OWbyY7vJ69T2m2HMcA0l3/n3DaXnp/AZ4jj0sK6+AR6XNb/rh0DddDwUL2zX1c97NUpjVAEOxkh0tbOaN1qU1vG8VtYGe6CSuNvpwda+rJEzWG03MzAFWKbLdhzS/FOnvUhcdChlNC6iKBWuJVrCGMhxIaKMP6i4/1fP2+jfGhnaCT6Obc5UHhOcl4+vdhUAmMJuKjiaB0Mo1mcPKmdBvlFWK6ZMaXfNI2ojIvNORMsUHWiSf5cqZ6WOy2SDn5arVzv+k6Hvh/Tb6gk8BW6PrhbAm3kV7Ojqthgv2ymfZurvrQ4hvRLCSaUEj8YG77TzQTNriYv6B/0hPEiHk24oTdGVePhrGD/QOO0LyxRHKZivAxldS41akzXcxELPm/oxJv01jZ46OIazsrHL/i/j8HGicQErGi9p7GiadtWwDBcEcZt8boc0PdlXE9KlAoSkZh4PtUBZ5oRjTAbiSgd3oLn+XZqUYYgOy3Vgh/zrDfK+xA0rqY6GaQrGo5JM1azcgawzjeOa2CMk/przvXMayvXQEA8meEmCsxiDrkO54/iAVvtHSPiC0nA/3tt/AY+igwk=',\n", + " category: 'CompositeElement'\n", + " }\n", + "}\n", + "\n", + "\n" + ] + } + ], + "source": [ + "const loaderByTitle = new UnstructuredLoader(markdownPath, {\n", + " chunkingStrategy: \"by_title\"\n", + "});\n", + "\n", + "\n", + "const loadedDocs = await loaderByTitle.load()\n", + "\n", + "console.log(`Number of documents: ${loadedDocs.length}\\n`)\n", + "\n", + "for (const doc of loadedDocs.slice(0, 2)) {\n", + " console.log(doc);\n", + " console.log(\"\\n\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "117dc6b0-9baa-44a2-9d1d-fc38ecf7a233", + "metadata": {}, + "source": [ + "Note that in this case we recover just one distinct element type:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "75abc139-3ded-4e8e-9f21-d0c8ec40fdfc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Set(1) { 'CompositeElement' }\n" + ] + } + ], + "source": [ + "const categories = new Set(data.map((document) => document.metadata.category));\n", + "console.log(categories);" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const categories = new Set(data.map((document) => document.metadata.category));\n", - "console.log(categories);" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/embed_text.mdx b/docs/core_docs/docs/how_to/embed_text.mdx index 2a0c5c34cad5..e531663d5048 100644 --- a/docs/core_docs/docs/how_to/embed_text.mdx +++ b/docs/core_docs/docs/how_to/embed_text.mdx @@ -12,7 +12,7 @@ Head to [Integrations](/docs/integrations/text_embedding) for documentation on b This guide assumes familiarity with the following concepts: -- [Embeddings](/docs/concepts/#embedding-models) +- [Embeddings](/docs/concepts/embedding_models) ::: diff --git a/docs/core_docs/docs/how_to/ensemble_retriever.mdx b/docs/core_docs/docs/how_to/ensemble_retriever.mdx index 218a76bcff8c..e86b4ecdedbf 100644 --- a/docs/core_docs/docs/how_to/ensemble_retriever.mdx +++ b/docs/core_docs/docs/how_to/ensemble_retriever.mdx @@ -4,8 +4,8 @@ This guide assumes familiarity with the following concepts: -- [Documents](/docs/concepts#document) -- [Retrievers](/docs/concepts#retrievers) +- [Documents](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) +- [Retrievers](/docs/concepts/retrievers) ::: diff --git a/docs/core_docs/docs/how_to/example_selectors.ipynb b/docs/core_docs/docs/how_to/example_selectors.ipynb index ca82ad234880..fbda2184ecd1 100644 --- a/docs/core_docs/docs/how_to/example_selectors.ipynb +++ b/docs/core_docs/docs/how_to/example_selectors.ipynb @@ -1,286 +1,286 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "af408f61", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 1\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "1a65e4c9", - "metadata": {}, - "source": [ - "# How to use example selectors\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "- [Few-shot examples](/docs/how_to/few_shot_examples)\n", - "\n", - ":::\n", - "\n", - "If you have a large number of examples, you may need to select which ones to include in the prompt. The Example Selector is the class responsible for doing so.\n", - "\n", - "The base interface is defined as below:\n", - "\n", - "```typescript\n", - "class BaseExampleSelector {\n", - " addExample(example: Example): Promise;\n", - "\n", - " selectExamples(input_variables: Example): Promise;\n", - "}\n", - "```\n", - "\n", - "The only method it needs to define is a `selectExamples` method. This takes in the input variables and then returns a list of examples. It is up to each specific implementation as to how those examples are selected.\n", - "\n", - "LangChain has a few different types of example selectors. For an overview of all these types, see the below table.\n", - "\n", - "In this guide, we will walk through creating a custom example selector." - ] - }, - { - "cell_type": "markdown", - "id": "638e9039", - "metadata": {}, - "source": [ - "## Examples\n", - "\n", - "In order to use an example selector, we need to create a list of examples. These should generally be example inputs and outputs. For this demo purpose, let's imagine we are selecting examples of how to translate English to Italian." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "48658d53", - "metadata": {}, - "outputs": [], - "source": [ - "const examples = [\n", - " { input: \"hi\", output: \"ciao\" },\n", - " { input: \"bye\", output: \"arrivaderci\" },\n", - " { input: \"soccer\", output: \"calcio\" },\n", - "];" - ] - }, - { - "cell_type": "markdown", - "id": "c2830b49", - "metadata": {}, - "source": [ - "## Custom Example Selector\n", - "\n", - "Let's write an example selector that chooses what example to pick based on the length of the word." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "56b740a1", - "metadata": {}, - "outputs": [], - "source": [ - "import { BaseExampleSelector } from \"@langchain/core/example_selectors\";\n", - "import { Example } from \"@langchain/core/prompts\";\n", - "\n", - "\n", - "class CustomExampleSelector extends BaseExampleSelector {\n", - " private examples: Example[];\n", - " \n", - " constructor(examples: Example[]) {\n", - " super();\n", - " this.examples = examples;\n", - " }\n", - " \n", - " async addExample(example: Example): Promise {\n", - " this.examples.push(example);\n", - " return;\n", - " }\n", - " \n", - " async selectExamples(inputVariables: Example): Promise {\n", - " // This assumes knowledge that part of the input will be a 'text' key\n", - " const newWord = inputVariables.input;\n", - " const newWordLength = newWord.length;\n", - " \n", - " // Initialize variables to store the best match and its length difference\n", - " let bestMatch: Example | null = null;\n", - " let smallestDiff = Infinity;\n", - " \n", - " // Iterate through each example\n", - " for (const example of this.examples) {\n", - " // Calculate the length difference with the first word of the example\n", - " const currentDiff = Math.abs(example.input.length - newWordLength);\n", - " \n", - " // Update the best match if the current one is closer in length\n", - " if (currentDiff < smallestDiff) {\n", - " smallestDiff = currentDiff;\n", - " bestMatch = example;\n", - " }\n", - " }\n", - " \n", - " return bestMatch ? [bestMatch] : [];\n", - " }\n", - " }" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "ce928187", - "metadata": {}, - "outputs": [], - "source": [ - "const exampleSelector = new CustomExampleSelector(examples)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "37ef3149", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "[ { input: \u001b[32m\"bye\"\u001b[39m, output: \u001b[32m\"arrivaderci\"\u001b[39m } ]" + "cell_type": "raw", + "id": "af408f61", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 1\n", + "---" ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await exampleSelector.selectExamples({ input: \"okay\" })" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "c5ad9f35", - "metadata": {}, - "outputs": [], - "source": [ - "await exampleSelector.addExample({ input: \"hand\", output: \"mano\" })" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "e4127fe0", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[ { input: \u001b[32m\"hand\"\u001b[39m, output: \u001b[32m\"mano\"\u001b[39m } ]" + "cell_type": "markdown", + "id": "1a65e4c9", + "metadata": {}, + "source": [ + "# How to use example selectors\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "- [Few-shot examples](/docs/how_to/few_shot_examples)\n", + "\n", + ":::\n", + "\n", + "If you have a large number of examples, you may need to select which ones to include in the prompt. The Example Selector is the class responsible for doing so.\n", + "\n", + "The base interface is defined as below:\n", + "\n", + "```typescript\n", + "class BaseExampleSelector {\n", + " addExample(example: Example): Promise;\n", + "\n", + " selectExamples(input_variables: Example): Promise;\n", + "}\n", + "```\n", + "\n", + "The only method it needs to define is a `selectExamples` method. This takes in the input variables and then returns a list of examples. It is up to each specific implementation as to how those examples are selected.\n", + "\n", + "LangChain has a few different types of example selectors. For an overview of all these types, see the below table.\n", + "\n", + "In this guide, we will walk through creating a custom example selector." ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await exampleSelector.selectExamples({ input: \"okay\" })" - ] - }, - { - "cell_type": "markdown", - "id": "786c920c", - "metadata": {}, - "source": [ - "## Use in a Prompt\n", - "\n", - "We can now use this example selector in a prompt" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "619090e2", - "metadata": {}, - "outputs": [], - "source": [ - "import { PromptTemplate, FewShotPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const examplePrompt = PromptTemplate.fromTemplate(\"Input: {input} -> Output: {output}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "5934c415", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "id": "638e9039", + "metadata": {}, + "source": [ + "## Examples\n", + "\n", + "In order to use an example selector, we need to create a list of examples. These should generally be example inputs and outputs. For this demo purpose, let's imagine we are selecting examples of how to translate English to Italian." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "48658d53", + "metadata": {}, + "outputs": [], + "source": [ + "const examples = [\n", + " { input: \"hi\", output: \"ciao\" },\n", + " { input: \"bye\", output: \"arrivaderci\" },\n", + " { input: \"soccer\", output: \"calcio\" },\n", + "];" + ] + }, + { + "cell_type": "markdown", + "id": "c2830b49", + "metadata": {}, + "source": [ + "## Custom Example Selector\n", + "\n", + "Let's write an example selector that chooses what example to pick based on the length of the word." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Translate the following words from English to Italain:\n", - "\n", - "Input: hand -> Output: mano\n", - "\n", - "Input: word -> Output:\n" - ] + "cell_type": "code", + "execution_count": 4, + "id": "56b740a1", + "metadata": {}, + "outputs": [], + "source": [ + "import { BaseExampleSelector } from \"@langchain/core/example_selectors\";\n", + "import { Example } from \"@langchain/core/prompts\";\n", + "\n", + "\n", + "class CustomExampleSelector extends BaseExampleSelector {\n", + " private examples: Example[];\n", + " \n", + " constructor(examples: Example[]) {\n", + " super();\n", + " this.examples = examples;\n", + " }\n", + " \n", + " async addExample(example: Example): Promise {\n", + " this.examples.push(example);\n", + " return;\n", + " }\n", + " \n", + " async selectExamples(inputVariables: Example): Promise {\n", + " // This assumes knowledge that part of the input will be a 'text' key\n", + " const newWord = inputVariables.input;\n", + " const newWordLength = newWord.length;\n", + " \n", + " // Initialize variables to store the best match and its length difference\n", + " let bestMatch: Example | null = null;\n", + " let smallestDiff = Infinity;\n", + " \n", + " // Iterate through each example\n", + " for (const example of this.examples) {\n", + " // Calculate the length difference with the first word of the example\n", + " const currentDiff = Math.abs(example.input.length - newWordLength);\n", + " \n", + " // Update the best match if the current one is closer in length\n", + " if (currentDiff < smallestDiff) {\n", + " smallestDiff = currentDiff;\n", + " bestMatch = example;\n", + " }\n", + " }\n", + " \n", + " return bestMatch ? [bestMatch] : [];\n", + " }\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "ce928187", + "metadata": {}, + "outputs": [], + "source": [ + "const exampleSelector = new CustomExampleSelector(examples)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "37ef3149", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[ { input: \u001b[32m\"bye\"\u001b[39m, output: \u001b[32m\"arrivaderci\"\u001b[39m } ]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await exampleSelector.selectExamples({ input: \"okay\" })" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c5ad9f35", + "metadata": {}, + "outputs": [], + "source": [ + "await exampleSelector.addExample({ input: \"hand\", output: \"mano\" })" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "e4127fe0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[ { input: \u001b[32m\"hand\"\u001b[39m, output: \u001b[32m\"mano\"\u001b[39m } ]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await exampleSelector.selectExamples({ input: \"okay\" })" + ] + }, + { + "cell_type": "markdown", + "id": "786c920c", + "metadata": {}, + "source": [ + "## Use in a Prompt\n", + "\n", + "We can now use this example selector in a prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "619090e2", + "metadata": {}, + "outputs": [], + "source": [ + "import { PromptTemplate, FewShotPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const examplePrompt = PromptTemplate.fromTemplate(\"Input: {input} -> Output: {output}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "5934c415", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Translate the following words from English to Italain:\n", + "\n", + "Input: hand -> Output: mano\n", + "\n", + "Input: word -> Output:\n" + ] + } + ], + "source": [ + "const prompt = new FewShotPromptTemplate({\n", + " exampleSelector,\n", + " examplePrompt,\n", + " suffix: \"Input: {input} -> Output:\",\n", + " prefix: \"Translate the following words from English to Italain:\",\n", + " inputVariables: [\"input\"],\n", + "})\n", + "\n", + "console.log(await prompt.format({ input: \"word\" }))" + ] + }, + { + "cell_type": "markdown", + "id": "e767f69d", + "metadata": {}, + "source": [ + "## Example Selector Types\n", + "\n", + "| Name | Description |\n", + "|------------|---------------------------------------------------------------------------------------------|\n", + "| Similarity | Uses semantic similarity between inputs and examples to decide which examples to choose. |\n", + "| Length | Selects examples based on how many can fit within a certain length |\n", + "\n", + "## Next steps\n", + "\n", + "You've now learned a bit about using example selectors to few shot LLMs.\n", + "\n", + "Next, check out some guides on some other techniques for selecting examples:\n", + "\n", + "- [How to select examples by length](/docs/how_to/example_selectors_length_based)\n", + "- [How to select examples by similarity](/docs/how_to/example_selectors_similarity)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "const prompt = new FewShotPromptTemplate({\n", - " exampleSelector,\n", - " examplePrompt,\n", - " suffix: \"Input: {input} -> Output:\",\n", - " prefix: \"Translate the following words from English to Italain:\",\n", - " inputVariables: [\"input\"],\n", - "})\n", - "\n", - "console.log(await prompt.format({ input: \"word\" }))" - ] - }, - { - "cell_type": "markdown", - "id": "e767f69d", - "metadata": {}, - "source": [ - "## Example Selector Types\n", - "\n", - "| Name | Description |\n", - "|------------|---------------------------------------------------------------------------------------------|\n", - "| Similarity | Uses semantic similarity between inputs and examples to decide which examples to choose. |\n", - "| Length | Selects examples based on how many can fit within a certain length |\n", - "\n", - "## Next steps\n", - "\n", - "You've now learned a bit about using example selectors to few shot LLMs.\n", - "\n", - "Next, check out some guides on some other techniques for selecting examples:\n", - "\n", - "- [How to select examples by length](/docs/how_to/example_selectors_length_based)\n", - "- [How to select examples by similarity](/docs/how_to/example_selectors_similarity)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/example_selectors_langsmith.ipynb b/docs/core_docs/docs/how_to/example_selectors_langsmith.ipynb index 1917d4b67f97..e798e370e518 100644 --- a/docs/core_docs/docs/how_to/example_selectors_langsmith.ipynb +++ b/docs/core_docs/docs/how_to/example_selectors_langsmith.ipynb @@ -1,553 +1,553 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "4f7e423b", - "metadata": {}, - "source": [ - "# How to select examples from a LangSmith dataset\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Prerequisites\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Few-shot-prompting](/docs/concepts/#few-shot-prompting)\n", - "- [LangSmith](/docs/concepts/#langsmith)\n", - "\n", - ":::\n", - "\n", - "\n", - ":::note Compatibility\n", - "\n", - "- `langsmith` >= 0.1.43\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "LangSmith datasets have built-in support for similarity search, making them a great tool for building and querying few-shot examples.\n", - "\n", - "In this guide we'll see how to use an indexed LangSmith dataset as a few-shot example selector.\n", - "\n", - "## Setup\n", - "\n", - "Before getting started make sure you've [created a LangSmith account](https://smith.langchain.com/) and set your credentials:\n", - "\n", - "```typescript\n", - "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", - "process.env.LANGSMITH_TRACING=\"true\"\n", - "```\n", - "\n", - "We'll need to install the `langsmith` SDK. In this example we'll also make use of `langchain` and `@langchain/anthropic`:\n", - "\n", - "```{=mdx}\n", - "\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", - "\n", - "\n", - " langsmith langchain @langchain/anthropic @langchain/core zod zod-to-json-schema\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "fc716e12", - "metadata": {}, - "source": [ - "Now we'll clone a public dataset and turn on indexing for the dataset. We can also turn on indexing via the [LangSmith UI](https://docs.smith.langchain.com/how_to_guides/datasets/index_datasets_for_dynamic_few_shot_example_selection).\n", - "\n", - "We'll create a clone the [Multiverse math few shot example dataset](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance/).\n", - "\n", - "This enables searching over the dataset, and will make sure that anytime we update/add examples they are also indexed.\n", - "\n", - "The first step to creating a clone is to read the JSON file containing the examples and convert them to the format expected by LangSmith for creating examples:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "2bcc86a0", - "metadata": {}, - "outputs": [], - "source": [ - "import { Client as LangSmithClient } from 'langsmith';\n", - "import { z } from 'zod';\n", - "import { zodToJsonSchema } from 'zod-to-json-schema';\n", - "import fs from \"fs/promises\";\n", - "\n", - "// Read the example dataset and convert to the format expected by the LangSmith API\n", - "// for creating new examples\n", - "const examplesJson = JSON.parse(\n", - " await fs.readFile(\"../../data/ls_few_shot_example_dataset.json\", \"utf-8\")\n", - ");\n", - "\n", - "let inputs: Record[] = [];\n", - "let outputs: Record[] = [];\n", - "let metadata: Record[] = [];\n", - "\n", - "examplesJson.forEach((ex) => {\n", - " inputs.push(ex.inputs);\n", - " outputs.push(ex.outputs);\n", - " metadata.push(ex.metadata);\n", - "});\n", - "\n", - "// Define our input schema as this is required for indexing\n", - "const inputsSchema = zodToJsonSchema(z.object({\n", - " input: z.string(),\n", - " system: z.boolean().optional(),\n", - "}));\n", - "\n", - "const lsClient = new LangSmithClient();\n", - "\n", - "await lsClient.deleteDataset({ datasetName: \"multiverse-math-examples-for-few-shot-example\" })\n", - "\n", - "const dataset = await lsClient.createDataset(\"multiverse-math-examples-for-few-shot-example\", {\n", - " inputsSchema,\n", - "});\n", - "\n", - "const createdExamples = await lsClient.createExamples({\n", - " inputs,\n", - " outputs,\n", - " metadata,\n", - " datasetId: dataset.id,\n", - "})\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "01b5a8f3", - "metadata": {}, - "outputs": [], - "source": [ - "await lsClient.indexDataset({ datasetId: dataset.id });" - ] - }, - { - "cell_type": "markdown", - "id": "5767d171", - "metadata": {}, - "source": [ - "Once the dataset is indexed, we can search for similar examples like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "5013a56f", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "3\n" - ] - } - ], - "source": [ - "const examples = await lsClient.similarExamples(\n", - " { input: \"whats the negation of the negation of the negation of 3\" },\n", - " dataset.id,\n", - " 3,\n", - ")\n", - "console.log(examples.length)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "a142db06", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "4f7e423b", + "metadata": {}, + "source": [ + "# How to select examples from a LangSmith dataset\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Prerequisites\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Few-shot-prompting](/docs/concepts/few_shot_prompting)\n", + "- [LangSmith](/docs/concepts/#langsmith)\n", + "\n", + ":::\n", + "\n", + "\n", + ":::note Compatibility\n", + "\n", + "- `langsmith` >= 0.1.43\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "LangSmith datasets have built-in support for similarity search, making them a great tool for building and querying few-shot examples.\n", + "\n", + "In this guide we'll see how to use an indexed LangSmith dataset as a few-shot example selector.\n", + "\n", + "## Setup\n", + "\n", + "Before getting started make sure you've [created a LangSmith account](https://smith.langchain.com/) and set your credentials:\n", + "\n", + "```typescript\n", + "process.env.LANGSMITH_API_KEY=\"your-api-key\"\n", + "process.env.LANGSMITH_TRACING=\"true\"\n", + "```\n", + "\n", + "We'll need to install the `langsmith` SDK. In this example we'll also make use of `langchain` and `@langchain/anthropic`:\n", + "\n", + "```{=mdx}\n", + "\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " langsmith langchain @langchain/anthropic @langchain/core zod zod-to-json-schema\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "evaluate the negation of -100\n" - ] - } - ], - "source": [ - "console.log(examples[0].inputs.input)" - ] - }, - { - "cell_type": "markdown", - "id": "d2627125", - "metadata": {}, - "source": [ - "For this dataset the outputs are an entire chat history:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "af5b9191", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "fc716e12", + "metadata": {}, + "source": [ + "Now we'll clone a public dataset and turn on indexing for the dataset. We can also turn on indexing via the [LangSmith UI](https://docs.smith.langchain.com/how_to_guides/datasets/index_datasets_for_dynamic_few_shot_example_selection).\n", + "\n", + "We'll create a clone the [Multiverse math few shot example dataset](https://blog.langchain.dev/few-shot-prompting-to-improve-tool-calling-performance/).\n", + "\n", + "This enables searching over the dataset, and will make sure that anytime we update/add examples they are also indexed.\n", + "\n", + "The first step to creating a clone is to read the JSON file containing the examples and convert them to the format expected by LangSmith for creating examples:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " id: 'cbe7ed83-86e1-4e46-89de-6646f8b55cef',\n", - " type: 'system',\n", - " content: 'You are requested to solve math questions in an alternate mathematical universe. The operations have been altered to yield different results than expected. Do not guess the answer or rely on your innate knowledge of math. Use the provided tools to answer the question. While associativity and commutativity apply, distributivity does not. Answer the question using the fewest possible tools. Only include the numeric response without any clarifications.',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " {\n", - " id: '04946246-09a8-4465-be95-037efd7dae55',\n", - " type: 'human',\n", - " content: 'if one gazoink is 4 badoinks, each of which is 6 foos, each of wich is 3 bars - how many bars in 3 gazoinks?',\n", - " example: false,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " {\n", - " id: 'run-d6f0954e-b21b-4ea8-ad98-0ee64cfc824e-0',\n", - " type: 'ai',\n", - " content: [ [Object] ],\n", - " example: false,\n", - " tool_calls: [ [Object] ],\n", - " usage_metadata: { input_tokens: 916, total_tokens: 984, output_tokens: 68 },\n", - " additional_kwargs: {},\n", - " response_metadata: {\n", - " id: 'msg_01MBWxgouUBzomwTvXhomGVq',\n", - " model: 'claude-3-sonnet-20240229',\n", - " usage: [Object],\n", - " stop_reason: 'tool_use',\n", - " stop_sequence: null\n", - " },\n", - " invalid_tool_calls: []\n", - " },\n", - " {\n", - " id: '3d4c72c4-f009-48ce-b739-1d3f28ee4803',\n", - " name: 'multiply',\n", - " type: 'tool',\n", - " content: '13.2',\n", - " tool_call_id: 'toolu_016RjRHSEyDZRqKhGrb8uvjJ',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " {\n", - " id: 'run-26dd7e83-f5fb-4c70-8ba1-271300ffeb25-0',\n", - " type: 'ai',\n", - " content: [ [Object] ],\n", - " example: false,\n", - " tool_calls: [ [Object] ],\n", - " usage_metadata: { input_tokens: 999, total_tokens: 1070, output_tokens: 71 },\n", - " additional_kwargs: {},\n", - " response_metadata: {\n", - " id: 'msg_01VTFvtCxtR3rN58hCmjt2oH',\n", - " model: 'claude-3-sonnet-20240229',\n", - " usage: [Object],\n", - " stop_reason: 'tool_use',\n", - " stop_sequence: null\n", - " },\n", - " invalid_tool_calls: []\n", - " },\n", - " {\n", - " id: 'ca4e0317-7b3a-4638-933c-1efd98bc4fda',\n", - " name: 'multiply',\n", - " type: 'tool',\n", - " content: '87.12',\n", - " tool_call_id: 'toolu_01PqvszxiuXrVJ9bwgTWaH3q',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " {\n", - " id: 'run-007794ac-3590-4b9e-b678-008f02e40042-0',\n", - " type: 'ai',\n", - " content: [ [Object] ],\n", - " example: false,\n", - " tool_calls: [ [Object] ],\n", - " usage_metadata: { input_tokens: 1084, total_tokens: 1155, output_tokens: 71 },\n", - " additional_kwargs: {},\n", - " response_metadata: {\n", - " id: 'msg_017BEkSqmTsmtJaTxAzfRMEh',\n", - " model: 'claude-3-sonnet-20240229',\n", - " usage: [Object],\n", - " stop_reason: 'tool_use',\n", - " stop_sequence: null\n", - " },\n", - " invalid_tool_calls: []\n", - " },\n", - " {\n", - " id: '7f58c121-6f21-4c7b-ba38-aa820e274ff8',\n", - " name: 'multiply',\n", - " type: 'tool',\n", - " content: '287.496',\n", - " tool_call_id: 'toolu_01LU3RqRUXZRLRoJ2AZNmPed',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " {\n", - " id: 'run-51e35afb-7ec6-4738-93e2-92f80b5c9377-0',\n", - " type: 'ai',\n", - " content: '287.496',\n", - " example: false,\n", - " tool_calls: [],\n", - " usage_metadata: { input_tokens: 1169, total_tokens: 1176, output_tokens: 7 },\n", - " additional_kwargs: {},\n", - " response_metadata: {\n", - " id: 'msg_01Tx9kSNapSg8aUbWZXiS1NL',\n", - " model: 'claude-3-sonnet-20240229',\n", - " usage: [Object],\n", - " stop_reason: 'end_turn',\n", - " stop_sequence: null\n", - " },\n", - " invalid_tool_calls: []\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "console.log(examples[1].outputs.output)" - ] - }, - { - "cell_type": "markdown", - "id": "e852c8ef", - "metadata": {}, - "source": [ - "The search returns the examples whose inputs are most similar to the query input. We can use this for few-shot prompting a model. The first step is to create a series of math tools we want to allow the model to call:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "53e03aa1", - "metadata": {}, - "outputs": [], - "source": [ - "import { tool } from '@langchain/core/tools';\n", - "import { z } from 'zod';\n", - "\n", - "const add = tool((input) => {\n", - " return (input.a + input.b).toString();\n", - "}, {\n", - " name: \"add\",\n", - " description: \"Add two numbers\",\n", - " schema: z.object({\n", - " a: z.number().describe(\"The first number to add\"),\n", - " b: z.number().describe(\"The second number to add\"),\n", - " }),\n", - "});\n", - "\n", - "const cos = tool((input) => {\n", - " return Math.cos(input.angle).toString();\n", - "}, {\n", - " name: \"cos\",\n", - " description: \"Calculate the cosine of an angle (in radians)\",\n", - " schema: z.object({\n", - " angle: z.number().describe(\"The angle in radians\"),\n", - " }),\n", - "});\n", - "\n", - "const divide = tool((input) => {\n", - " return (input.a / input.b).toString();\n", - "}, {\n", - " name: \"divide\",\n", - " description: \"Divide two numbers\",\n", - " schema: z.object({\n", - " a: z.number().describe(\"The dividend\"),\n", - " b: z.number().describe(\"The divisor\"),\n", - " }),\n", - "});\n", - "\n", - "const log = tool((input) => {\n", - " return Math.log(input.value).toString();\n", - "}, {\n", - " name: \"log\",\n", - " description: \"Calculate the natural logarithm of a number\",\n", - " schema: z.object({\n", - " value: z.number().describe(\"The number to calculate the logarithm of\"),\n", - " }),\n", - "});\n", - "\n", - "const multiply = tool((input) => {\n", - " return (input.a * input.b).toString();\n", - "}, {\n", - " name: \"multiply\",\n", - " description: \"Multiply two numbers\",\n", - " schema: z.object({\n", - " a: z.number().describe(\"The first number to multiply\"),\n", - " b: z.number().describe(\"The second number to multiply\"),\n", - " }),\n", - "});\n", - "\n", - "const negate = tool((input) => {\n", - " return (-input.a).toString();\n", - "}, {\n", - " name: \"negate\",\n", - " description: \"Negate a number\",\n", - " schema: z.object({\n", - " a: z.number().describe(\"The number to negate\"),\n", - " }),\n", - "});\n", - "\n", - "const pi = tool(() => {\n", - " return Math.PI.toString();\n", - "}, {\n", - " name: \"pi\",\n", - " description: \"Return the value of pi\",\n", - " schema: z.object({}),\n", - "});\n", - "\n", - "const power = tool((input) => {\n", - " return Math.pow(input.base, input.exponent).toString();\n", - "}, {\n", - " name: \"power\",\n", - " description: \"Raise a number to a power\",\n", - " schema: z.object({\n", - " base: z.number().describe(\"The base number\"),\n", - " exponent: z.number().describe(\"The exponent\"),\n", - " }),\n", - "});\n", - "\n", - "const sin = tool((input) => {\n", - " return Math.sin(input.angle).toString();\n", - "}, {\n", - " name: \"sin\",\n", - " description: \"Calculate the sine of an angle (in radians)\",\n", - " schema: z.object({\n", - " angle: z.number().describe(\"The angle in radians\"),\n", - " }),\n", - "});\n", - "\n", - "const subtract = tool((input) => {\n", - " return (input.a - input.b).toString();\n", - "}, {\n", - " name: \"subtract\",\n", - " description: \"Subtract two numbers\",\n", - " schema: z.object({\n", - " a: z.number().describe(\"The number to subtract from\"),\n", - " b: z.number().describe(\"The number to subtract\"),\n", - " }),\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "12cba1e1", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "import { HumanMessage, SystemMessage, BaseMessage, BaseMessageLike } from \"@langchain/core/messages\";\n", - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "import { Client as LangSmithClient, Example } from \"langsmith\";\n", - "import { coerceMessageLikeToMessage } from \"@langchain/core/messages\";\n", - "\n", - "const client = new LangSmithClient();\n", - "\n", - "async function similarExamples(input: Record): Promise> {\n", - " const examples = await client.similarExamples(input, dataset.id, 5);\n", - " return { ...input, examples };\n", - "}\n", - "\n", - "function constructPrompt(input: { examples: Example[], input: string }): BaseMessage[] {\n", - " const instructions = \"You are great at using mathematical tools.\";\n", - " let messages: BaseMessage[] = []\n", - " \n", - " for (const ex of input.examples) {\n", - " // Assuming ex.outputs.output is an array of message-like objects\n", - " messages = messages.concat(ex.outputs.output.flatMap((msg: BaseMessageLike) => coerceMessageLikeToMessage(msg)));\n", - " }\n", - " \n", - " const examples = messages.filter(msg => msg._getType() !== 'system');\n", - " examples.forEach((ex) => {\n", - " if (ex._getType() === 'human') {\n", - " ex.name = \"example_user\";\n", - " } else {\n", - " ex.name = \"example_assistant\";\n", - " }\n", - " });\n", - "\n", - " return [new SystemMessage(instructions), ...examples, new HumanMessage(input.input)];\n", - "}\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - "});\n", - "const tools = [add, cos, divide, log, multiply, negate, pi, power, sin, subtract];\n", - "const llmWithTools = llm.bindTools(tools);\n", - "\n", - "const exampleSelector = new RunnableLambda(\n", - " { func: similarExamples }\n", - ").withConfig({ runName: \"similarExamples\" });\n", - "\n", - "const chain = exampleSelector.pipe(\n", - " new RunnableLambda({\n", - " func: constructPrompt\n", - " }).withConfig({\n", - " runName: \"constructPrompt\"\n", - " })\n", - ").pipe(llmWithTools);" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "c423b367", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "2bcc86a0", + "metadata": {}, + "outputs": [], + "source": [ + "import { Client as LangSmithClient } from 'langsmith';\n", + "import { z } from 'zod';\n", + "import { zodToJsonSchema } from 'zod-to-json-schema';\n", + "import fs from \"fs/promises\";\n", + "\n", + "// Read the example dataset and convert to the format expected by the LangSmith API\n", + "// for creating new examples\n", + "const examplesJson = JSON.parse(\n", + " await fs.readFile(\"../../data/ls_few_shot_example_dataset.json\", \"utf-8\")\n", + ");\n", + "\n", + "let inputs: Record[] = [];\n", + "let outputs: Record[] = [];\n", + "let metadata: Record[] = [];\n", + "\n", + "examplesJson.forEach((ex) => {\n", + " inputs.push(ex.inputs);\n", + " outputs.push(ex.outputs);\n", + " metadata.push(ex.metadata);\n", + "});\n", + "\n", + "// Define our input schema as this is required for indexing\n", + "const inputsSchema = zodToJsonSchema(z.object({\n", + " input: z.string(),\n", + " system: z.boolean().optional(),\n", + "}));\n", + "\n", + "const lsClient = new LangSmithClient();\n", + "\n", + "await lsClient.deleteDataset({ datasetName: \"multiverse-math-examples-for-few-shot-example\" })\n", + "\n", + "const dataset = await lsClient.createDataset(\"multiverse-math-examples-for-few-shot-example\", {\n", + " inputsSchema,\n", + "});\n", + "\n", + "const createdExamples = await lsClient.createExamples({\n", + " inputs,\n", + " outputs,\n", + " metadata,\n", + " datasetId: dataset.id,\n", + "})\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "01b5a8f3", + "metadata": {}, + "outputs": [], + "source": [ + "await lsClient.indexDataset({ datasetId: dataset.id });" + ] + }, + { + "cell_type": "markdown", + "id": "5767d171", + "metadata": {}, + "source": [ + "Once the dataset is indexed, we can search for similar examples like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5013a56f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3\n" + ] + } + ], + "source": [ + "const examples = await lsClient.similarExamples(\n", + " { input: \"whats the negation of the negation of the negation of 3\" },\n", + " dataset.id,\n", + " 3,\n", + ")\n", + "console.log(examples.length)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a142db06", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "evaluate the negation of -100\n" + ] + } + ], + "source": [ + "console.log(examples[0].inputs.input)" + ] + }, + { + "cell_type": "markdown", + "id": "d2627125", + "metadata": {}, + "source": [ + "For this dataset the outputs are an entire chat history:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "af5b9191", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " id: 'cbe7ed83-86e1-4e46-89de-6646f8b55cef',\n", + " type: 'system',\n", + " content: 'You are requested to solve math questions in an alternate mathematical universe. The operations have been altered to yield different results than expected. Do not guess the answer or rely on your innate knowledge of math. Use the provided tools to answer the question. While associativity and commutativity apply, distributivity does not. Answer the question using the fewest possible tools. Only include the numeric response without any clarifications.',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " {\n", + " id: '04946246-09a8-4465-be95-037efd7dae55',\n", + " type: 'human',\n", + " content: 'if one gazoink is 4 badoinks, each of which is 6 foos, each of wich is 3 bars - how many bars in 3 gazoinks?',\n", + " example: false,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " {\n", + " id: 'run-d6f0954e-b21b-4ea8-ad98-0ee64cfc824e-0',\n", + " type: 'ai',\n", + " content: [ [Object] ],\n", + " example: false,\n", + " tool_calls: [ [Object] ],\n", + " usage_metadata: { input_tokens: 916, total_tokens: 984, output_tokens: 68 },\n", + " additional_kwargs: {},\n", + " response_metadata: {\n", + " id: 'msg_01MBWxgouUBzomwTvXhomGVq',\n", + " model: 'claude-3-sonnet-20240229',\n", + " usage: [Object],\n", + " stop_reason: 'tool_use',\n", + " stop_sequence: null\n", + " },\n", + " invalid_tool_calls: []\n", + " },\n", + " {\n", + " id: '3d4c72c4-f009-48ce-b739-1d3f28ee4803',\n", + " name: 'multiply',\n", + " type: 'tool',\n", + " content: '13.2',\n", + " tool_call_id: 'toolu_016RjRHSEyDZRqKhGrb8uvjJ',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " {\n", + " id: 'run-26dd7e83-f5fb-4c70-8ba1-271300ffeb25-0',\n", + " type: 'ai',\n", + " content: [ [Object] ],\n", + " example: false,\n", + " tool_calls: [ [Object] ],\n", + " usage_metadata: { input_tokens: 999, total_tokens: 1070, output_tokens: 71 },\n", + " additional_kwargs: {},\n", + " response_metadata: {\n", + " id: 'msg_01VTFvtCxtR3rN58hCmjt2oH',\n", + " model: 'claude-3-sonnet-20240229',\n", + " usage: [Object],\n", + " stop_reason: 'tool_use',\n", + " stop_sequence: null\n", + " },\n", + " invalid_tool_calls: []\n", + " },\n", + " {\n", + " id: 'ca4e0317-7b3a-4638-933c-1efd98bc4fda',\n", + " name: 'multiply',\n", + " type: 'tool',\n", + " content: '87.12',\n", + " tool_call_id: 'toolu_01PqvszxiuXrVJ9bwgTWaH3q',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " {\n", + " id: 'run-007794ac-3590-4b9e-b678-008f02e40042-0',\n", + " type: 'ai',\n", + " content: [ [Object] ],\n", + " example: false,\n", + " tool_calls: [ [Object] ],\n", + " usage_metadata: { input_tokens: 1084, total_tokens: 1155, output_tokens: 71 },\n", + " additional_kwargs: {},\n", + " response_metadata: {\n", + " id: 'msg_017BEkSqmTsmtJaTxAzfRMEh',\n", + " model: 'claude-3-sonnet-20240229',\n", + " usage: [Object],\n", + " stop_reason: 'tool_use',\n", + " stop_sequence: null\n", + " },\n", + " invalid_tool_calls: []\n", + " },\n", + " {\n", + " id: '7f58c121-6f21-4c7b-ba38-aa820e274ff8',\n", + " name: 'multiply',\n", + " type: 'tool',\n", + " content: '287.496',\n", + " tool_call_id: 'toolu_01LU3RqRUXZRLRoJ2AZNmPed',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " {\n", + " id: 'run-51e35afb-7ec6-4738-93e2-92f80b5c9377-0',\n", + " type: 'ai',\n", + " content: '287.496',\n", + " example: false,\n", + " tool_calls: [],\n", + " usage_metadata: { input_tokens: 1169, total_tokens: 1176, output_tokens: 7 },\n", + " additional_kwargs: {},\n", + " response_metadata: {\n", + " id: 'msg_01Tx9kSNapSg8aUbWZXiS1NL',\n", + " model: 'claude-3-sonnet-20240229',\n", + " usage: [Object],\n", + " stop_reason: 'end_turn',\n", + " stop_sequence: null\n", + " },\n", + " invalid_tool_calls: []\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "console.log(examples[1].outputs.output)" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'negate',\n", - " args: { a: 3 },\n", - " type: 'tool_call',\n", - " id: 'call_SX0dmb4AbFu39KkGQDqPXQwa'\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "e852c8ef", + "metadata": {}, + "source": [ + "The search returns the examples whose inputs are most similar to the query input. We can use this for few-shot prompting a model. The first step is to create a series of math tools we want to allow the model to call:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "53e03aa1", + "metadata": {}, + "outputs": [], + "source": [ + "import { tool } from '@langchain/core/tools';\n", + "import { z } from 'zod';\n", + "\n", + "const add = tool((input) => {\n", + " return (input.a + input.b).toString();\n", + "}, {\n", + " name: \"add\",\n", + " description: \"Add two numbers\",\n", + " schema: z.object({\n", + " a: z.number().describe(\"The first number to add\"),\n", + " b: z.number().describe(\"The second number to add\"),\n", + " }),\n", + "});\n", + "\n", + "const cos = tool((input) => {\n", + " return Math.cos(input.angle).toString();\n", + "}, {\n", + " name: \"cos\",\n", + " description: \"Calculate the cosine of an angle (in radians)\",\n", + " schema: z.object({\n", + " angle: z.number().describe(\"The angle in radians\"),\n", + " }),\n", + "});\n", + "\n", + "const divide = tool((input) => {\n", + " return (input.a / input.b).toString();\n", + "}, {\n", + " name: \"divide\",\n", + " description: \"Divide two numbers\",\n", + " schema: z.object({\n", + " a: z.number().describe(\"The dividend\"),\n", + " b: z.number().describe(\"The divisor\"),\n", + " }),\n", + "});\n", + "\n", + "const log = tool((input) => {\n", + " return Math.log(input.value).toString();\n", + "}, {\n", + " name: \"log\",\n", + " description: \"Calculate the natural logarithm of a number\",\n", + " schema: z.object({\n", + " value: z.number().describe(\"The number to calculate the logarithm of\"),\n", + " }),\n", + "});\n", + "\n", + "const multiply = tool((input) => {\n", + " return (input.a * input.b).toString();\n", + "}, {\n", + " name: \"multiply\",\n", + " description: \"Multiply two numbers\",\n", + " schema: z.object({\n", + " a: z.number().describe(\"The first number to multiply\"),\n", + " b: z.number().describe(\"The second number to multiply\"),\n", + " }),\n", + "});\n", + "\n", + "const negate = tool((input) => {\n", + " return (-input.a).toString();\n", + "}, {\n", + " name: \"negate\",\n", + " description: \"Negate a number\",\n", + " schema: z.object({\n", + " a: z.number().describe(\"The number to negate\"),\n", + " }),\n", + "});\n", + "\n", + "const pi = tool(() => {\n", + " return Math.PI.toString();\n", + "}, {\n", + " name: \"pi\",\n", + " description: \"Return the value of pi\",\n", + " schema: z.object({}),\n", + "});\n", + "\n", + "const power = tool((input) => {\n", + " return Math.pow(input.base, input.exponent).toString();\n", + "}, {\n", + " name: \"power\",\n", + " description: \"Raise a number to a power\",\n", + " schema: z.object({\n", + " base: z.number().describe(\"The base number\"),\n", + " exponent: z.number().describe(\"The exponent\"),\n", + " }),\n", + "});\n", + "\n", + "const sin = tool((input) => {\n", + " return Math.sin(input.angle).toString();\n", + "}, {\n", + " name: \"sin\",\n", + " description: \"Calculate the sine of an angle (in radians)\",\n", + " schema: z.object({\n", + " angle: z.number().describe(\"The angle in radians\"),\n", + " }),\n", + "});\n", + "\n", + "const subtract = tool((input) => {\n", + " return (input.a - input.b).toString();\n", + "}, {\n", + " name: \"subtract\",\n", + " description: \"Subtract two numbers\",\n", + " schema: z.object({\n", + " a: z.number().describe(\"The number to subtract from\"),\n", + " b: z.number().describe(\"The number to subtract\"),\n", + " }),\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "12cba1e1", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { HumanMessage, SystemMessage, BaseMessage, BaseMessageLike } from \"@langchain/core/messages\";\n", + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "import { Client as LangSmithClient, Example } from \"langsmith\";\n", + "import { coerceMessageLikeToMessage } from \"@langchain/core/messages\";\n", + "\n", + "const client = new LangSmithClient();\n", + "\n", + "async function similarExamples(input: Record): Promise> {\n", + " const examples = await client.similarExamples(input, dataset.id, 5);\n", + " return { ...input, examples };\n", + "}\n", + "\n", + "function constructPrompt(input: { examples: Example[], input: string }): BaseMessage[] {\n", + " const instructions = \"You are great at using mathematical tools.\";\n", + " let messages: BaseMessage[] = []\n", + " \n", + " for (const ex of input.examples) {\n", + " // Assuming ex.outputs.output is an array of message-like objects\n", + " messages = messages.concat(ex.outputs.output.flatMap((msg: BaseMessageLike) => coerceMessageLikeToMessage(msg)));\n", + " }\n", + " \n", + " const examples = messages.filter(msg => msg._getType() !== 'system');\n", + " examples.forEach((ex) => {\n", + " if (ex._getType() === 'human') {\n", + " ex.name = \"example_user\";\n", + " } else {\n", + " ex.name = \"example_assistant\";\n", + " }\n", + " });\n", + "\n", + " return [new SystemMessage(instructions), ...examples, new HumanMessage(input.input)];\n", + "}\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "});\n", + "const tools = [add, cos, divide, log, multiply, negate, pi, power, sin, subtract];\n", + "const llmWithTools = llm.bindTools(tools);\n", + "\n", + "const exampleSelector = new RunnableLambda(\n", + " { func: similarExamples }\n", + ").withConfig({ runName: \"similarExamples\" });\n", + "\n", + "const chain = exampleSelector.pipe(\n", + " new RunnableLambda({\n", + " func: constructPrompt\n", + " }).withConfig({\n", + " runName: \"constructPrompt\"\n", + " })\n", + ").pipe(llmWithTools);" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "c423b367", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'negate',\n", + " args: { a: 3 },\n", + " type: 'tool_call',\n", + " id: 'call_SX0dmb4AbFu39KkGQDqPXQwa'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const aiMsg = await chain.invoke({ input: \"whats the negation of the negation of 3\", system: false })\n", + "console.log(aiMsg.tool_calls)" + ] + }, + { + "cell_type": "markdown", + "id": "94489b4a", + "metadata": {}, + "source": [ + "Looking at the LangSmith trace, we can see that relevant examples were pulled in in the `similarExamples` step and passed as messages to ChatOpenAI: https://smith.langchain.com/public/20e09618-0746-4973-9382-5b36c3f27083/r." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const aiMsg = await chain.invoke({ input: \"whats the negation of the negation of 3\", system: false })\n", - "console.log(aiMsg.tool_calls)" - ] - }, - { - "cell_type": "markdown", - "id": "94489b4a", - "metadata": {}, - "source": [ - "Looking at the LangSmith trace, we can see that relevant examples were pulled in in the `similarExamples` step and passed as messages to ChatOpenAI: https://smith.langchain.com/public/20e09618-0746-4973-9382-5b36c3f27083/r." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/example_selectors_length_based.mdx b/docs/core_docs/docs/how_to/example_selectors_length_based.mdx index b1b7323f661a..652844eb41b0 100644 --- a/docs/core_docs/docs/how_to/example_selectors_length_based.mdx +++ b/docs/core_docs/docs/how_to/example_selectors_length_based.mdx @@ -4,7 +4,7 @@ This guide assumes familiarity with the following concepts: -- [Prompt templates](/docs/concepts/#prompt-templates) +- [Prompt templates](/docs/concepts/prompt_templates) - [Example selectors](/docs/how_to/example_selectors) ::: diff --git a/docs/core_docs/docs/how_to/example_selectors_similarity.mdx b/docs/core_docs/docs/how_to/example_selectors_similarity.mdx index f18eff41be78..09f3db698f93 100644 --- a/docs/core_docs/docs/how_to/example_selectors_similarity.mdx +++ b/docs/core_docs/docs/how_to/example_selectors_similarity.mdx @@ -4,9 +4,9 @@ This guide assumes familiarity with the following concepts: -- [Prompt templates](/docs/concepts/#prompt-templates) +- [Prompt templates](/docs/concepts/prompt_templates) - [Example selectors](/docs/how_to/example_selectors) -- [Vector stores](/docs/concepts#vectorstores) +- [Vector stores](/docs/concepts/vectorstores) ::: diff --git a/docs/core_docs/docs/how_to/fallbacks.mdx b/docs/core_docs/docs/how_to/fallbacks.mdx index c3a62cb93dfb..bb3e63f3dfd8 100644 --- a/docs/core_docs/docs/how_to/fallbacks.mdx +++ b/docs/core_docs/docs/how_to/fallbacks.mdx @@ -6,7 +6,7 @@ import CodeBlock from "@theme/CodeBlock"; This guide assumes familiarity with the following concepts: -- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language) +- [LangChain Expression Language (LCEL)](/docs/concepts/lcel) - [Chaining runnables](/docs/how_to/sequence/) ::: diff --git a/docs/core_docs/docs/how_to/few_shot_examples.ipynb b/docs/core_docs/docs/how_to/few_shot_examples.ipynb index 0934a66a42a4..d20daa4aa2ae 100644 --- a/docs/core_docs/docs/how_to/few_shot_examples.ipynb +++ b/docs/core_docs/docs/how_to/few_shot_examples.ipynb @@ -1,367 +1,367 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "94c3ad61", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 3\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "b91e03f1", - "metadata": {}, - "source": [ - "# How to use few shot examples\n", - "\n", - "In this guide, we'll learn how to create a simple prompt template that provides the model with example inputs and outputs when generating. Providing the LLM with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", - "\n", - "A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://api.js.langchain.com/classes/langchain_core.example_selectors.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n", - "\n", - "This guide will cover few-shotting with string prompt templates. For a guide on few-shotting with chat messages for chat models, see [here](/docs/how_to/few_shot_examples_chat/).\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "- [Example selectors](/docs/concepts/#example-selectors)\n", - "- [LLMs](/docs/concepts/#llms)\n", - "- [Vectorstores](/docs/concepts/#vectorstores)\n", - "\n", - ":::\n", - "\n", - "## Create a formatter for the few-shot examples\n", - "\n", - "Configure a formatter that will format the few-shot examples into a string. This formatter should be a `PromptTemplate` object." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "4e70bce2", - "metadata": {}, - "outputs": [], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const examplePrompt = PromptTemplate.fromTemplate(\"Question: {question}\\n{answer}\")" - ] - }, - { - "cell_type": "markdown", - "id": "50846ad4", - "metadata": {}, - "source": [ - "## Creating the example set\n", - "\n", - "Next, we'll create a list of few-shot examples. Each example should be a dictionary representing an example input to the formatter prompt we defined above." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a44be840", - "metadata": {}, - "outputs": [], - "source": [ - "const examples = [\n", - " {\n", - " question: \"Who lived longer, Muhammad Ali or Alan Turing?\",\n", - " answer: `\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: How old was Muhammad Ali when he died?\n", - " Intermediate answer: Muhammad Ali was 74 years old when he died.\n", - " Follow up: How old was Alan Turing when he died?\n", - " Intermediate answer: Alan Turing was 41 years old when he died.\n", - " So the final answer is: Muhammad Ali\n", - " `\n", - " },\n", - " {\n", - " question: \"When was the founder of craigslist born?\",\n", - " answer: `\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: Who was the founder of craigslist?\n", - " Intermediate answer: Craigslist was founded by Craig Newmark.\n", - " Follow up: When was Craig Newmark born?\n", - " Intermediate answer: Craig Newmark was born on December 6, 1952.\n", - " So the final answer is: December 6, 1952\n", - " `\n", - " },\n", - " {\n", - " question: \"Who was the maternal grandfather of George Washington?\",\n", - " answer: `\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: Who was the mother of George Washington?\n", - " Intermediate answer: The mother of George Washington was Mary Ball Washington.\n", - " Follow up: Who was the father of Mary Ball Washington?\n", - " Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n", - " So the final answer is: Joseph Ball\n", - " `\n", - " },\n", - " {\n", - " question: \"Are both the directors of Jaws and Casino Royale from the same country?\",\n", - " answer: `\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: Who is the director of Jaws?\n", - " Intermediate Answer: The director of Jaws is Steven Spielberg.\n", - " Follow up: Where is Steven Spielberg from?\n", - " Intermediate Answer: The United States.\n", - " Follow up: Who is the director of Casino Royale?\n", - " Intermediate Answer: The director of Casino Royale is Martin Campbell.\n", - " Follow up: Where is Martin Campbell from?\n", - " Intermediate Answer: New Zealand.\n", - " So the final answer is: No\n", - " `\n", - " }\n", - " ];" - ] - }, - { - "cell_type": "markdown", - "id": "dad66af1", - "metadata": {}, - "source": [ - "### Pass the examples and formatter to `FewShotPromptTemplate`\n", - "\n", - "Finally, create a [`FewShotPromptTemplate`](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotPromptTemplate.html) object. This object takes in the few-shot examples and the formatter for the few-shot examples. When this `FewShotPromptTemplate` is formatted, it formats the passed examples using the `examplePrompt`, then and adds them to the final prompt before `suffix`:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "e76fa1ba", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Question: Who lived longer, Muhammad Ali or Alan Turing?\n", - "\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: How old was Muhammad Ali when he died?\n", - " Intermediate answer: Muhammad Ali was 74 years old when he died.\n", - " Follow up: How old was Alan Turing when he died?\n", - " Intermediate answer: Alan Turing was 41 years old when he died.\n", - " So the final answer is: Muhammad Ali\n", - " \n", - "\n", - "Question: When was the founder of craigslist born?\n", - "\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: Who was the founder of craigslist?\n", - " Intermediate answer: Craigslist was founded by Craig Newmark.\n", - " Follow up: When was Craig Newmark born?\n", - " Intermediate answer: Craig Newmark was born on December 6, 1952.\n", - " So the final answer is: December 6, 1952\n", - " \n", - "\n", - "Question: Who was the maternal grandfather of George Washington?\n", - "\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: Who was the mother of George Washington?\n", - " Intermediate answer: The mother of George Washington was Mary Ball Washington.\n", - " Follow up: Who was the father of Mary Ball Washington?\n", - " Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n", - " So the final answer is: Joseph Ball\n", - " \n", - "\n", - "Question: Are both the directors of Jaws and Casino Royale from the same country?\n", - "\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: Who is the director of Jaws?\n", - " Intermediate Answer: The director of Jaws is Steven Spielberg.\n", - " Follow up: Where is Steven Spielberg from?\n", - " Intermediate Answer: The United States.\n", - " Follow up: Who is the director of Casino Royale?\n", - " Intermediate Answer: The director of Casino Royale is Martin Campbell.\n", - " Follow up: Where is Martin Campbell from?\n", - " Intermediate Answer: New Zealand.\n", - " So the final answer is: No\n", - " \n", - "\n", - "Question: Who was the father of Mary Ball Washington?\n" - ] - } - ], - "source": [ - "import { FewShotPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = new FewShotPromptTemplate({\n", - " examples,\n", - " examplePrompt,\n", - " suffix: \"Question: {input}\",\n", - " inputVariables: [\"input\"],\n", - "})\n", - "\n", - "const formatted = await prompt.format({ input: \"Who was the father of Mary Ball Washington?\" })\n", - "console.log(formatted.toString())" - ] - }, - { - "cell_type": "markdown", - "id": "59c6f332", - "metadata": {}, - "source": [ - "By providing the model with examples like this, we can guide the model to a better response." - ] - }, - { - "cell_type": "markdown", - "id": "bbe1f843", - "metadata": {}, - "source": [ - "## Using an example selector\n", - "\n", - "We will reuse the example set and the formatter from the previous section. However, instead of feeding the examples directly into the `FewShotPromptTemplate` object, we will feed them into an implementation of `ExampleSelector` called [`SemanticSimilarityExampleSelector`](https://api.js.langchain.com/classes/langchain_core.example_selectors.SemanticSimilarityExampleSelector.html) instance. This class selects few-shot examples from the initial set based on their similarity to the input. It uses an embedding model to compute the similarity between the input and the few-shot examples, as well as a vector store to perform the nearest neighbor search.\n", - "\n", - "To show what it looks like, let's initialize an instance and call it in isolation:" - ] - }, - { - "cell_type": "markdown", - "id": "d1f350b4", - "metadata": {}, - "source": [ - "Set your OpenAI API key for the embeddings model\n", - "```bash\n", - "export OPENAI_API_KEY=\"...\"\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "80c5ac5c", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "94c3ad61", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 3\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Examples most similar to the input: Who was the father of Mary Ball Washington?\n", - "\n", - "\n", - "question: Who was the maternal grandfather of George Washington?\n", - "answer: \n", - " Are follow up questions needed here: Yes.\n", - " Follow up: Who was the mother of George Washington?\n", - " Intermediate answer: The mother of George Washington was Mary Ball Washington.\n", - " Follow up: Who was the father of Mary Ball Washington?\n", - " Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n", - " So the final answer is: Joseph Ball\n", - " \n" - ] - } - ], - "source": [ - "import { SemanticSimilarityExampleSelector } from \"@langchain/core/example_selectors\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const exampleSelector = await SemanticSimilarityExampleSelector.fromExamples(\n", - " // This is the list of examples available to select from.\n", - " examples,\n", - " // This is the embedding class used to produce embeddings which are used to measure semantic similarity.\n", - " new OpenAIEmbeddings(),\n", - " // This is the VectorStore class that is used to store the embeddings and do a similarity search over.\n", - " MemoryVectorStore,\n", - " {\n", - " // This is the number of examples to produce.\n", - " k: 1,\n", - " }\n", - ")\n", - "\n", - "// Select the most similar example to the input.\n", - "const question = \"Who was the father of Mary Ball Washington?\"\n", - "const selectedExamples = await exampleSelector.selectExamples({ question })\n", - "console.log(`Examples most similar to the input: ${question}`)\n", - "for (const example of selectedExamples) {\n", - " console.log(\"\\n\");\n", - " console.log(Object.entries(example).map(([k, v]) => `${k}: ${v}`).join(\"\\n\"))\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "89ac47fe", - "metadata": {}, - "source": [ - "Now, let's create a `FewShotPromptTemplate` object. This object takes in the example selector and the formatter prompt for the few-shot examples." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "de69a214", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "b91e03f1", + "metadata": {}, + "source": [ + "# How to use few shot examples\n", + "\n", + "In this guide, we'll learn how to create a simple prompt template that provides the model with example inputs and outputs when generating. Providing the LLM with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", + "\n", + "A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://api.js.langchain.com/classes/langchain_core.example_selectors.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n", + "\n", + "This guide will cover few-shotting with string prompt templates. For a guide on few-shotting with chat messages for chat models, see [here](/docs/how_to/few_shot_examples_chat/).\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "- [Example selectors](/docs/concepts/example_selectors)\n", + "- [LLMs](/docs/concepts/text_llms)\n", + "- [Vectorstores](/docs/concepts/#vectorstores)\n", + "\n", + ":::\n", + "\n", + "## Create a formatter for the few-shot examples\n", + "\n", + "Configure a formatter that will format the few-shot examples into a string. This formatter should be a `PromptTemplate` object." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "Question: Who was the maternal grandfather of George Washington?\n", - "\n", - " Are follow up questions needed here: Yes.\n", - " Follow up: Who was the mother of George Washington?\n", - " Intermediate answer: The mother of George Washington was Mary Ball Washington.\n", - " Follow up: Who was the father of Mary Ball Washington?\n", - " Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n", - " So the final answer is: Joseph Ball\n", - " \n", - "\n", - "Question: Who was the father of Mary Ball Washington?\n" - ] + "cell_type": "code", + "execution_count": 1, + "id": "4e70bce2", + "metadata": {}, + "outputs": [], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const examplePrompt = PromptTemplate.fromTemplate(\"Question: {question}\\n{answer}\")" + ] + }, + { + "cell_type": "markdown", + "id": "50846ad4", + "metadata": {}, + "source": [ + "## Creating the example set\n", + "\n", + "Next, we'll create a list of few-shot examples. Each example should be a dictionary representing an example input to the formatter prompt we defined above." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a44be840", + "metadata": {}, + "outputs": [], + "source": [ + "const examples = [\n", + " {\n", + " question: \"Who lived longer, Muhammad Ali or Alan Turing?\",\n", + " answer: `\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: How old was Muhammad Ali when he died?\n", + " Intermediate answer: Muhammad Ali was 74 years old when he died.\n", + " Follow up: How old was Alan Turing when he died?\n", + " Intermediate answer: Alan Turing was 41 years old when he died.\n", + " So the final answer is: Muhammad Ali\n", + " `\n", + " },\n", + " {\n", + " question: \"When was the founder of craigslist born?\",\n", + " answer: `\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: Who was the founder of craigslist?\n", + " Intermediate answer: Craigslist was founded by Craig Newmark.\n", + " Follow up: When was Craig Newmark born?\n", + " Intermediate answer: Craig Newmark was born on December 6, 1952.\n", + " So the final answer is: December 6, 1952\n", + " `\n", + " },\n", + " {\n", + " question: \"Who was the maternal grandfather of George Washington?\",\n", + " answer: `\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: Who was the mother of George Washington?\n", + " Intermediate answer: The mother of George Washington was Mary Ball Washington.\n", + " Follow up: Who was the father of Mary Ball Washington?\n", + " Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n", + " So the final answer is: Joseph Ball\n", + " `\n", + " },\n", + " {\n", + " question: \"Are both the directors of Jaws and Casino Royale from the same country?\",\n", + " answer: `\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: Who is the director of Jaws?\n", + " Intermediate Answer: The director of Jaws is Steven Spielberg.\n", + " Follow up: Where is Steven Spielberg from?\n", + " Intermediate Answer: The United States.\n", + " Follow up: Who is the director of Casino Royale?\n", + " Intermediate Answer: The director of Casino Royale is Martin Campbell.\n", + " Follow up: Where is Martin Campbell from?\n", + " Intermediate Answer: New Zealand.\n", + " So the final answer is: No\n", + " `\n", + " }\n", + " ];" + ] + }, + { + "cell_type": "markdown", + "id": "dad66af1", + "metadata": {}, + "source": [ + "### Pass the examples and formatter to `FewShotPromptTemplate`\n", + "\n", + "Finally, create a [`FewShotPromptTemplate`](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotPromptTemplate.html) object. This object takes in the few-shot examples and the formatter for the few-shot examples. When this `FewShotPromptTemplate` is formatted, it formats the passed examples using the `examplePrompt`, then and adds them to the final prompt before `suffix`:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "e76fa1ba", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Question: Who lived longer, Muhammad Ali or Alan Turing?\n", + "\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: How old was Muhammad Ali when he died?\n", + " Intermediate answer: Muhammad Ali was 74 years old when he died.\n", + " Follow up: How old was Alan Turing when he died?\n", + " Intermediate answer: Alan Turing was 41 years old when he died.\n", + " So the final answer is: Muhammad Ali\n", + " \n", + "\n", + "Question: When was the founder of craigslist born?\n", + "\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: Who was the founder of craigslist?\n", + " Intermediate answer: Craigslist was founded by Craig Newmark.\n", + " Follow up: When was Craig Newmark born?\n", + " Intermediate answer: Craig Newmark was born on December 6, 1952.\n", + " So the final answer is: December 6, 1952\n", + " \n", + "\n", + "Question: Who was the maternal grandfather of George Washington?\n", + "\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: Who was the mother of George Washington?\n", + " Intermediate answer: The mother of George Washington was Mary Ball Washington.\n", + " Follow up: Who was the father of Mary Ball Washington?\n", + " Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n", + " So the final answer is: Joseph Ball\n", + " \n", + "\n", + "Question: Are both the directors of Jaws and Casino Royale from the same country?\n", + "\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: Who is the director of Jaws?\n", + " Intermediate Answer: The director of Jaws is Steven Spielberg.\n", + " Follow up: Where is Steven Spielberg from?\n", + " Intermediate Answer: The United States.\n", + " Follow up: Who is the director of Casino Royale?\n", + " Intermediate Answer: The director of Casino Royale is Martin Campbell.\n", + " Follow up: Where is Martin Campbell from?\n", + " Intermediate Answer: New Zealand.\n", + " So the final answer is: No\n", + " \n", + "\n", + "Question: Who was the father of Mary Ball Washington?\n" + ] + } + ], + "source": [ + "import { FewShotPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = new FewShotPromptTemplate({\n", + " examples,\n", + " examplePrompt,\n", + " suffix: \"Question: {input}\",\n", + " inputVariables: [\"input\"],\n", + "})\n", + "\n", + "const formatted = await prompt.format({ input: \"Who was the father of Mary Ball Washington?\" })\n", + "console.log(formatted.toString())" + ] + }, + { + "cell_type": "markdown", + "id": "59c6f332", + "metadata": {}, + "source": [ + "By providing the model with examples like this, we can guide the model to a better response." + ] + }, + { + "cell_type": "markdown", + "id": "bbe1f843", + "metadata": {}, + "source": [ + "## Using an example selector\n", + "\n", + "We will reuse the example set and the formatter from the previous section. However, instead of feeding the examples directly into the `FewShotPromptTemplate` object, we will feed them into an implementation of `ExampleSelector` called [`SemanticSimilarityExampleSelector`](https://api.js.langchain.com/classes/langchain_core.example_selectors.SemanticSimilarityExampleSelector.html) instance. This class selects few-shot examples from the initial set based on their similarity to the input. It uses an embedding model to compute the similarity between the input and the few-shot examples, as well as a vector store to perform the nearest neighbor search.\n", + "\n", + "To show what it looks like, let's initialize an instance and call it in isolation:" + ] + }, + { + "cell_type": "markdown", + "id": "d1f350b4", + "metadata": {}, + "source": [ + "Set your OpenAI API key for the embeddings model\n", + "```bash\n", + "export OPENAI_API_KEY=\"...\"\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "80c5ac5c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Examples most similar to the input: Who was the father of Mary Ball Washington?\n", + "\n", + "\n", + "question: Who was the maternal grandfather of George Washington?\n", + "answer: \n", + " Are follow up questions needed here: Yes.\n", + " Follow up: Who was the mother of George Washington?\n", + " Intermediate answer: The mother of George Washington was Mary Ball Washington.\n", + " Follow up: Who was the father of Mary Ball Washington?\n", + " Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n", + " So the final answer is: Joseph Ball\n", + " \n" + ] + } + ], + "source": [ + "import { SemanticSimilarityExampleSelector } from \"@langchain/core/example_selectors\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const exampleSelector = await SemanticSimilarityExampleSelector.fromExamples(\n", + " // This is the list of examples available to select from.\n", + " examples,\n", + " // This is the embedding class used to produce embeddings which are used to measure semantic similarity.\n", + " new OpenAIEmbeddings(),\n", + " // This is the VectorStore class that is used to store the embeddings and do a similarity search over.\n", + " MemoryVectorStore,\n", + " {\n", + " // This is the number of examples to produce.\n", + " k: 1,\n", + " }\n", + ")\n", + "\n", + "// Select the most similar example to the input.\n", + "const question = \"Who was the father of Mary Ball Washington?\"\n", + "const selectedExamples = await exampleSelector.selectExamples({ question })\n", + "console.log(`Examples most similar to the input: ${question}`)\n", + "for (const example of selectedExamples) {\n", + " console.log(\"\\n\");\n", + " console.log(Object.entries(example).map(([k, v]) => `${k}: ${v}`).join(\"\\n\"))\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "89ac47fe", + "metadata": {}, + "source": [ + "Now, let's create a `FewShotPromptTemplate` object. This object takes in the example selector and the formatter prompt for the few-shot examples." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "de69a214", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Question: Who was the maternal grandfather of George Washington?\n", + "\n", + " Are follow up questions needed here: Yes.\n", + " Follow up: Who was the mother of George Washington?\n", + " Intermediate answer: The mother of George Washington was Mary Ball Washington.\n", + " Follow up: Who was the father of Mary Ball Washington?\n", + " Intermediate answer: The father of Mary Ball Washington was Joseph Ball.\n", + " So the final answer is: Joseph Ball\n", + " \n", + "\n", + "Question: Who was the father of Mary Ball Washington?\n" + ] + } + ], + "source": [ + "const prompt = new FewShotPromptTemplate({\n", + " exampleSelector,\n", + " examplePrompt,\n", + " suffix: \"Question: {input}\",\n", + " inputVariables: [\"input\"],\n", + "})\n", + "\n", + "const formatted = await prompt.invoke({ input: \"Who was the father of Mary Ball Washington?\" });\n", + "console.log(formatted.toString())" + ] + }, + { + "cell_type": "markdown", + "id": "1b460794", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned how to add few-shot examples to your prompts.\n", + "\n", + "Next, check out the other how-to guides on prompt templates in this section, the related how-to guide on [few shotting with chat models](/docs/how_to/few_shot_examples_chat), or the other [example selector how-to guides](/docs/how_to/example_selectors/)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "const prompt = new FewShotPromptTemplate({\n", - " exampleSelector,\n", - " examplePrompt,\n", - " suffix: \"Question: {input}\",\n", - " inputVariables: [\"input\"],\n", - "})\n", - "\n", - "const formatted = await prompt.invoke({ input: \"Who was the father of Mary Ball Washington?\" });\n", - "console.log(formatted.toString())" - ] - }, - { - "cell_type": "markdown", - "id": "1b460794", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned how to add few-shot examples to your prompts.\n", - "\n", - "Next, check out the other how-to guides on prompt templates in this section, the related how-to guide on [few shotting with chat models](/docs/how_to/few_shot_examples_chat), or the other [example selector how-to guides](/docs/how_to/example_selectors/)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/few_shot_examples_chat.ipynb b/docs/core_docs/docs/how_to/few_shot_examples_chat.ipynb index b61090b0a850..666c2c7c6333 100644 --- a/docs/core_docs/docs/how_to/few_shot_examples_chat.ipynb +++ b/docs/core_docs/docs/how_to/few_shot_examples_chat.ipynb @@ -1,704 +1,704 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "beba2e0e", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 2\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "bb0735c0", - "metadata": {}, - "source": [ - "# How to use few shot examples in chat models\n", - "\n", - "This guide covers how to prompt a chat model with example inputs and outputs. Providing the model with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", - "\n", - "There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html) as a flexible starting point, and you can modify or replace them as you see fit.\n", - "\n", - "The goal of few-shot prompt templates are to dynamically select examples based on an input, and then format the examples in a final prompt to provide for the model.\n", - "\n", - "**Note:** The following code examples are for chat models only, since `FewShotChatMessagePromptTemplates` are designed to output formatted [chat messages](/docs/concepts/#message-types) rather than pure strings. For similar few-shot prompt examples for pure string templates compatible with completion models (LLMs), see the [few-shot prompt templates](/docs/how_to/few_shot_examples/) guide.\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "- [Example selectors](/docs/concepts/#example-selectors)\n", - "- [Chat models](/docs/concepts/#chat-model)\n", - "- [Vectorstores](/docs/concepts/#vectorstores)\n", - "\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "id": "d716f2de-cc29-4823-9360-a808c7bfdb86", - "metadata": { - "tags": [] - }, - "source": [ - "## Fixed Examples\n", - "\n", - "The most basic (and common) few-shot prompting technique is to use fixed prompt examples. This way you can select a chain, evaluate it, and avoid worrying about additional moving parts in production.\n", - "\n", - "The basic components of the template are:\n", - "- `examples`: An array of object examples to include in the final prompt.\n", - "- `examplePrompt`: converts each example into 1 or more messages through its [`formatMessages`](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n", - "\n", - "Below is a simple demonstration. First, define the examples you'd like to include:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "0fc5a02a-6249-4e92-95c3-30fff9671e8b", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import {\n", - " ChatPromptTemplate,\n", - " FewShotChatMessagePromptTemplate,\n", - "} from \"@langchain/core/prompts\"\n", - "\n", - "const examples = [\n", - " { input: \"2+2\", output: \"4\" },\n", - " { input: \"2+3\", output: \"5\" },\n", - "]" - ] - }, - { - "cell_type": "markdown", - "id": "e8710ecc-2aa0-4172-a74c-250f6bc3d9e2", - "metadata": {}, - "source": [ - "Next, assemble them into the few-shot prompt template." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "65e72ad1-9060-47d0-91a1-bc130c8b98ac", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: { content: \"2+2\", additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"2+2\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"4\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"4\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: { content: \"2+3\", additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"2+3\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"5\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"5\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "// This is a prompt template used to format each individual example.\n", - "const examplePrompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"human\", \"{input}\"],\n", - " [\"ai\", \"{output}\"],\n", - " ]\n", - ")\n", - "const fewShotPrompt = new FewShotChatMessagePromptTemplate({\n", - " examplePrompt,\n", - " examples,\n", - " inputVariables: [], // no input variables\n", - "})\n", - "\n", - "const result = await fewShotPrompt.invoke({});\n", - "console.log(result.toChatMessages())" - ] - }, - { - "cell_type": "markdown", - "id": "5490bd59-b28f-46a4-bbdf-0191802dd3c5", - "metadata": {}, - "source": [ - "Finally, we assemble the final prompt as shown below, passing `fewShotPrompt` directly into the `fromMessages` factory method, and use it with a model:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "9f86d6d9-50de-41b6-b6c7-0f9980cc0187", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "const finalPrompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"system\", \"You are a wondrous wizard of math.\"],\n", - " fewShotPrompt,\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "c74c6026", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "97d443b1-6fae-4b36-bede-3ff7306288a3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cell_type": "raw", + "id": "beba2e0e", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 2\n", + "---" + ] + }, { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"A triangle does not have a square. The square of a number is the result of multiplying the number by\"\u001b[39m... 8 more characters,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"A triangle does not have a square. The square of a number is the result of multiplying the number by\"\u001b[39m... 8 more characters,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m23\u001b[39m, promptTokens: \u001b[33m52\u001b[39m, totalTokens: \u001b[33m75\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "markdown", + "id": "bb0735c0", + "metadata": {}, + "source": [ + "# How to use few shot examples in chat models\n", + "\n", + "This guide covers how to prompt a chat model with example inputs and outputs. Providing the model with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", + "\n", + "There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html) as a flexible starting point, and you can modify or replace them as you see fit.\n", + "\n", + "The goal of few-shot prompt templates are to dynamically select examples based on an input, and then format the examples in a final prompt to provide for the model.\n", + "\n", + "**Note:** The following code examples are for chat models only, since `FewShotChatMessagePromptTemplates` are designed to output formatted [chat messages](/docs/concepts/messages) rather than pure strings. For similar few-shot prompt examples for pure string templates compatible with completion models (LLMs), see the [few-shot prompt templates](/docs/how_to/few_shot_examples/) guide.\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "- [Example selectors](/docs/concepts/example_selectors)\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Vectorstores](/docs/concepts/#vectorstores)\n", + "\n", + ":::" ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const chain = finalPrompt.pipe(model);\n", - "\n", - "await chain.invoke({ input: \"What's the square of a triangle?\" })" - ] - }, - { - "cell_type": "markdown", - "id": "70ab7114-f07f-46be-8874-3705a25aba5f", - "metadata": {}, - "source": [ - "## Dynamic few-shot prompting\n", - "\n", - "Sometimes you may want to select only a few examples from your overall set to show based on the input. For this, you can replace the `examples` passed into `FewShotChatMessagePromptTemplate` with an `exampleSelector`. The other components remain the same as above! Our dynamic few-shot prompt template would look like:\n", - "\n", - "- `exampleSelector`: responsible for selecting few-shot examples (and the order in which they are returned) for a given input. These implement the [BaseExampleSelector](https://api.js.langchain.com/classes/langchain_core.example_selectors.BaseExampleSelector.html) interface. A common example is the vectorstore-backed [SemanticSimilarityExampleSelector](https://api.js.langchain.com/classes/langchain_core.example_selectors.SemanticSimilarityExampleSelector.html)\n", - "- `examplePrompt`: convert each example into 1 or more messages through its [`formatMessages`](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n", - "\n", - "These once again can be composed with other messages and chat templates to assemble your final prompt.\n", - "\n", - "Let's walk through an example with the `SemanticSimilarityExampleSelector`. Since this implementation uses a vectorstore to select examples based on semantic similarity, we will want to first populate the store. Since the basic idea here is that we want to search for and return examples most similar to the text input, we embed the `values` of our prompt examples rather than considering the keys:" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "ad66f06a-66fd-4fcc-8166-5d0e3c801e57", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { SemanticSimilarityExampleSelector } from \"@langchain/core/example_selectors\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { OpenAIEmbeddings } from '@langchain/openai';\n", - "\n", - "const examples = [\n", - " { input: '2+2', output: '4' },\n", - " { input: '2+3', output: '5' },\n", - " { input: '2+4', output: '6' },\n", - " { input: 'What did the cow say to the moon?', output: 'nothing at all' },\n", - " {\n", - " input: 'Write me a poem about the moon',\n", - " output: 'One for the moon, and one for me, who are we to talk about the moon?',\n", - " },\n", - "];\n", - "\n", - "const toVectorize = examples.map((example) => `${example.input} ${example.output}`);\n", - "const embeddings = new OpenAIEmbeddings();\n", - "const vectorStore = await MemoryVectorStore.fromTexts(toVectorize, examples, embeddings);" - ] - }, - { - "cell_type": "markdown", - "id": "2f7e384a-2031-432b-951c-7ea8cf9262f1", - "metadata": {}, - "source": [ - "### Create the `exampleSelector`\n", - "\n", - "With a vectorstore created, we can create the `exampleSelector`. Here we will call it in isolation, and set `k` on it to only fetch the two example closest to the input." - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "7790303a-f722-452e-8921-b14bdf20bdff", - "metadata": { - "tags": [] - }, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " {\n", - " input: \u001b[32m\"What did the cow say to the moon?\"\u001b[39m,\n", - " output: \u001b[32m\"nothing at all\"\u001b[39m\n", - " },\n", - " { input: \u001b[32m\"2+4\"\u001b[39m, output: \u001b[32m\"6\"\u001b[39m }\n", - "]" + "cell_type": "markdown", + "id": "d716f2de-cc29-4823-9360-a808c7bfdb86", + "metadata": { + "tags": [] + }, + "source": [ + "## Fixed Examples\n", + "\n", + "The most basic (and common) few-shot prompting technique is to use fixed prompt examples. This way you can select a chain, evaluate it, and avoid worrying about additional moving parts in production.\n", + "\n", + "The basic components of the template are:\n", + "- `examples`: An array of object examples to include in the final prompt.\n", + "- `examplePrompt`: converts each example into 1 or more messages through its [`formatMessages`](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n", + "\n", + "Below is a simple demonstration. First, define the examples you'd like to include:" ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const exampleSelector = new SemanticSimilarityExampleSelector(\n", - " {\n", - " vectorStore,\n", - " k: 2\n", - " }\n", - ")\n", - "\n", - "// The prompt template will load examples by passing the input do the `select_examples` method\n", - "await exampleSelector.selectExamples({ input: \"horse\"})" - ] - }, - { - "cell_type": "markdown", - "id": "cc77c40f-3f58-40a2-b757-a2a2ea43f24a", - "metadata": {}, - "source": [ - "### Create prompt template\n", - "\n", - "We now assemble the prompt template, using the `exampleSelector` created above." - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "253c255e-41d7-45f6-9d88-c7a0ced4b1bd", - "metadata": { - "tags": [] - }, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: { content: \"2+3\", additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"2+3\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"5\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"5\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: { content: \"2+2\", additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"2+2\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"4\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"4\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import {\n", - " ChatPromptTemplate,\n", - " FewShotChatMessagePromptTemplate,\n", - "} from \"@langchain/core/prompts\"\n", - "\n", - "// Define the few-shot prompt.\n", - "const fewShotPrompt = new FewShotChatMessagePromptTemplate({\n", - " // The input variables select the values to pass to the example_selector\n", - " inputVariables: [\"input\"],\n", - " exampleSelector,\n", - " // Define how ech example will be formatted.\n", - " // In this case, each example will become 2 messages:\n", - " // 1 human, and 1 AI\n", - " examplePrompt: ChatPromptTemplate.fromMessages(\n", - " [[\"human\", \"{input}\"], [\"ai\", \"{output}\"]]\n", - " ),\n", - "})\n", - "\n", - "const results = await fewShotPrompt.invoke({ input: \"What's 3+3?\" });\n", - "console.log(results.toChatMessages())" - ] - }, - { - "cell_type": "markdown", - "id": "339cae7d-0eb0-44a6-852f-0267c5ff72b3", - "metadata": {}, - "source": [ - "And we can pass this few-shot chat message prompt template into another chat prompt template:" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "e731cb45-f0ea-422c-be37-42af2a6cb2c4", - "metadata": { - "tags": [] - }, - "outputs": [ + "cell_type": "code", + "execution_count": 4, + "id": "0fc5a02a-6249-4e92-95c3-30fff9671e8b", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import {\n", + " ChatPromptTemplate,\n", + " FewShotChatMessagePromptTemplate,\n", + "} from \"@langchain/core/prompts\"\n", + "\n", + "const examples = [\n", + " { input: \"2+2\", output: \"4\" },\n", + " { input: \"2+3\", output: \"5\" },\n", + "]" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "ChatPromptValue {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"2+3\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"2+3\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"5\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"5\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"2+2\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"2+2\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"4\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"4\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"prompt_values\" ],\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: { content: \"2+3\", additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"2+3\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"5\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"5\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: { content: \"2+2\", additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"2+2\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"4\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"4\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "const finalPrompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"system\", \"You are a wondrous wizard of math.\"],\n", - " fewShotPrompt,\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const result = await fewShotPrompt.invoke({ input: \"What's 3+3?\" });\n", - "console.log(result)" - ] - }, - { - "cell_type": "markdown", - "id": "2408ea69-1880-4ef5-a0fa-ffa8d2026aa9", - "metadata": {}, - "source": [ - "### Use with an chat model\n", - "\n", - "Finally, you can connect your model to the few-shot prompt." - ] - }, - { - "cell_type": "markdown", - "id": "ea48da1a", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "0568cbc6-5354-47f1-ab4d-dfcc616cf583", - "metadata": { - "tags": [] - }, - "outputs": [ + "cell_type": "markdown", + "id": "e8710ecc-2aa0-4172-a74c-250f6bc3d9e2", + "metadata": {}, + "source": [ + "Next, assemble them into the few-shot prompt template." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "65e72ad1-9060-47d0-91a1-bc130c8b98ac", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: { content: \"2+2\", additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"2+2\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"4\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"4\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: { content: \"2+3\", additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"2+3\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"5\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"5\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "// This is a prompt template used to format each individual example.\n", + "const examplePrompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"human\", \"{input}\"],\n", + " [\"ai\", \"{output}\"],\n", + " ]\n", + ")\n", + "const fewShotPrompt = new FewShotChatMessagePromptTemplate({\n", + " examplePrompt,\n", + " examples,\n", + " inputVariables: [], // no input variables\n", + "})\n", + "\n", + "const result = await fewShotPrompt.invoke({});\n", + "console.log(result.toChatMessages())" + ] + }, + { + "cell_type": "markdown", + "id": "5490bd59-b28f-46a4-bbdf-0191802dd3c5", + "metadata": {}, + "source": [ + "Finally, we assemble the final prompt as shown below, passing `fewShotPrompt` directly into the `fromMessages` factory method, and use it with a model:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "9f86d6d9-50de-41b6-b6c7-0f9980cc0187", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "const finalPrompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", \"You are a wondrous wizard of math.\"],\n", + " fewShotPrompt,\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "c74c6026", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "97d443b1-6fae-4b36-bede-3ff7306288a3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"A triangle does not have a square. The square of a number is the result of multiplying the number by\"\u001b[39m... 8 more characters,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"A triangle does not have a square. The square of a number is the result of multiplying the number by\"\u001b[39m... 8 more characters,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: \u001b[33m23\u001b[39m, promptTokens: \u001b[33m52\u001b[39m, totalTokens: \u001b[33m75\u001b[39m },\n", + " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", + " },\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const chain = finalPrompt.pipe(model);\n", + "\n", + "await chain.invoke({ input: \"What's the square of a triangle?\" })" + ] + }, + { + "cell_type": "markdown", + "id": "70ab7114-f07f-46be-8874-3705a25aba5f", + "metadata": {}, + "source": [ + "## Dynamic few-shot prompting\n", + "\n", + "Sometimes you may want to select only a few examples from your overall set to show based on the input. For this, you can replace the `examples` passed into `FewShotChatMessagePromptTemplate` with an `exampleSelector`. The other components remain the same as above! Our dynamic few-shot prompt template would look like:\n", + "\n", + "- `exampleSelector`: responsible for selecting few-shot examples (and the order in which they are returned) for a given input. These implement the [BaseExampleSelector](https://api.js.langchain.com/classes/langchain_core.example_selectors.BaseExampleSelector.html) interface. A common example is the vectorstore-backed [SemanticSimilarityExampleSelector](https://api.js.langchain.com/classes/langchain_core.example_selectors.SemanticSimilarityExampleSelector.html)\n", + "- `examplePrompt`: convert each example into 1 or more messages through its [`formatMessages`](https://api.js.langchain.com/classes/langchain_core.prompts.FewShotChatMessagePromptTemplate.html#formatMessages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n", + "\n", + "These once again can be composed with other messages and chat templates to assemble your final prompt.\n", + "\n", + "Let's walk through an example with the `SemanticSimilarityExampleSelector`. Since this implementation uses a vectorstore to select examples based on semantic similarity, we will want to first populate the store. Since the basic idea here is that we want to search for and return examples most similar to the text input, we embed the `values` of our prompt examples rather than considering the keys:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "ad66f06a-66fd-4fcc-8166-5d0e3c801e57", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { SemanticSimilarityExampleSelector } from \"@langchain/core/example_selectors\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from '@langchain/openai';\n", + "\n", + "const examples = [\n", + " { input: '2+2', output: '4' },\n", + " { input: '2+3', output: '5' },\n", + " { input: '2+4', output: '6' },\n", + " { input: 'What did the cow say to the moon?', output: 'nothing at all' },\n", + " {\n", + " input: 'Write me a poem about the moon',\n", + " output: 'One for the moon, and one for me, who are we to talk about the moon?',\n", + " },\n", + "];\n", + "\n", + "const toVectorize = examples.map((example) => `${example.input} ${example.output}`);\n", + "const embeddings = new OpenAIEmbeddings();\n", + "const vectorStore = await MemoryVectorStore.fromTexts(toVectorize, examples, embeddings);" + ] + }, + { + "cell_type": "markdown", + "id": "2f7e384a-2031-432b-951c-7ea8cf9262f1", + "metadata": {}, + "source": [ + "### Create the `exampleSelector`\n", + "\n", + "With a vectorstore created, we can create the `exampleSelector`. Here we will call it in isolation, and set `k` on it to only fetch the two example closest to the input." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "7790303a-f722-452e-8921-b14bdf20bdff", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " {\n", + " input: \u001b[32m\"What did the cow say to the moon?\"\u001b[39m,\n", + " output: \u001b[32m\"nothing at all\"\u001b[39m\n", + " },\n", + " { input: \u001b[32m\"2+4\"\u001b[39m, output: \u001b[32m\"6\"\u001b[39m }\n", + "]" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const exampleSelector = new SemanticSimilarityExampleSelector(\n", + " {\n", + " vectorStore,\n", + " k: 2\n", + " }\n", + ")\n", + "\n", + "// The prompt template will load examples by passing the input do the `select_examples` method\n", + "await exampleSelector.selectExamples({ input: \"horse\"})" + ] + }, { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"6\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"6\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m1\u001b[39m, promptTokens: \u001b[33m51\u001b[39m, totalTokens: \u001b[33m52\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "markdown", + "id": "cc77c40f-3f58-40a2-b757-a2a2ea43f24a", + "metadata": {}, + "source": [ + "### Create prompt template\n", + "\n", + "We now assemble the prompt template, using the `exampleSelector` created above." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "253c255e-41d7-45f6-9d88-c7a0ced4b1bd", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: { content: \"2+3\", additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"2+3\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"5\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"5\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: { content: \"2+2\", additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"2+2\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"4\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"4\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import {\n", + " ChatPromptTemplate,\n", + " FewShotChatMessagePromptTemplate,\n", + "} from \"@langchain/core/prompts\"\n", + "\n", + "// Define the few-shot prompt.\n", + "const fewShotPrompt = new FewShotChatMessagePromptTemplate({\n", + " // The input variables select the values to pass to the example_selector\n", + " inputVariables: [\"input\"],\n", + " exampleSelector,\n", + " // Define how ech example will be formatted.\n", + " // In this case, each example will become 2 messages:\n", + " // 1 human, and 1 AI\n", + " examplePrompt: ChatPromptTemplate.fromMessages(\n", + " [[\"human\", \"{input}\"], [\"ai\", \"{output}\"]]\n", + " ),\n", + "})\n", + "\n", + "const results = await fewShotPrompt.invoke({ input: \"What's 3+3?\" });\n", + "console.log(results.toChatMessages())" + ] + }, + { + "cell_type": "markdown", + "id": "339cae7d-0eb0-44a6-852f-0267c5ff72b3", + "metadata": {}, + "source": [ + "And we can pass this few-shot chat message prompt template into another chat prompt template:" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "e731cb45-f0ea-422c-be37-42af2a6cb2c4", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ChatPromptValue {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " messages: [\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"2+3\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"2+3\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"5\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"5\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"2+2\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"2+2\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"4\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"4\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " }\n", + " ]\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"prompt_values\" ],\n", + " messages: [\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: { content: \"2+3\", additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"2+3\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"5\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"5\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: { content: \"2+2\", additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"2+2\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"4\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"4\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "const finalPrompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", \"You are a wondrous wizard of math.\"],\n", + " fewShotPrompt,\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const result = await fewShotPrompt.invoke({ input: \"What's 3+3?\" });\n", + "console.log(result)" + ] + }, + { + "cell_type": "markdown", + "id": "2408ea69-1880-4ef5-a0fa-ffa8d2026aa9", + "metadata": {}, + "source": [ + "### Use with an chat model\n", + "\n", + "Finally, you can connect your model to the few-shot prompt." + ] + }, + { + "cell_type": "markdown", + "id": "ea48da1a", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "0568cbc6-5354-47f1-ab4d-dfcc616cf583", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"6\"\u001b[39m,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"6\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: \u001b[33m1\u001b[39m, promptTokens: \u001b[33m51\u001b[39m, totalTokens: \u001b[33m52\u001b[39m },\n", + " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", + " },\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const chain = finalPrompt.pipe(model);\n", + "\n", + "await chain.invoke({ input: \"What's 3+3?\" })" + ] + }, + { + "cell_type": "markdown", + "id": "c87fad3c", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned how to add few-shot examples to your chat prompts.\n", + "\n", + "Next, check out the other how-to guides on prompt templates in this section, the related how-to guide on [few shotting with text completion models](/docs/how_to/few_shot_examples), or the other [example selector how-to guides](/docs/how_to/example_selectors/)." ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "const chain = finalPrompt.pipe(model);\n", - "\n", - "await chain.invoke({ input: \"What's 3+3?\" })" - ] - }, - { - "cell_type": "markdown", - "id": "c87fad3c", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned how to add few-shot examples to your chat prompts.\n", - "\n", - "Next, check out the other how-to guides on prompt templates in this section, the related how-to guide on [few shotting with text completion models](/docs/how_to/few_shot_examples), or the other [example selector how-to guides](/docs/how_to/example_selectors/)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/functions.ipynb b/docs/core_docs/docs/how_to/functions.ipynb index fac849d56f2f..ed4680fc175c 100644 --- a/docs/core_docs/docs/how_to/functions.ipynb +++ b/docs/core_docs/docs/how_to/functions.ipynb @@ -1,386 +1,386 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "ce0e08fd", - "metadata": {}, - "source": [ - "---\n", - "keywords: [RunnableLambda, LCEL]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "fbc4bf6e", - "metadata": {}, - "source": [ - "# How to run custom functions\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n", - "- [Chaining runnables](/docs/how_to/sequence/)\n", - "\n", - ":::\n", - "\n", - "You can use arbitrary functions as [Runnables](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html). This is useful for formatting or when you need functionality not provided by other LangChain components, and custom functions used as Runnables are called [`RunnableLambdas`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableLambda.html).\n", - "\n", - "Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single dict input and unpacks it into multiple argument.\n", - "\n", - "This guide will cover:\n", - "\n", - "- How to explicitly create a runnable from a custom function using the `RunnableLambda` constructor\n", - "- Coercion of custom functions into runnables when used in chains\n", - "- How to accept and use run metadata in your custom function\n", - "- How to stream with custom functions by having them return generators\n", - "\n", - "## Using the constructor\n", - "\n", - "Below, we explicitly wrap our custom logic using a `RunnableLambda` method:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "6bb221b3", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "\u001b[32m\"3 squared is \\\\(3^2\\\\), which means multiplying 3 by itself. \\n\"\u001b[39m +\n", - " \u001b[32m\"\\n\"\u001b[39m +\n", - " \u001b[32m\"\\\\[3^2 = 3 \\\\times 3 = 9\\\\]\\n\"\u001b[39m +\n", - " \u001b[32m\"\\n\"\u001b[39m +\n", - " \u001b[32m\"So, 3 squared\"\u001b[39m... 6 more characters" + "cell_type": "raw", + "id": "ce0e08fd", + "metadata": {}, + "source": [ + "---\n", + "keywords: [RunnableLambda, LCEL]\n", + "---" ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const lengthFunction = (input: { foo: string }): { length: string } => {\n", - " return {\n", - " length: input.foo.length.toString(),\n", - " };\n", - "};\n", - "\n", - "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(\"What is {length} squared?\");\n", - "\n", - "const chain = RunnableLambda.from(lengthFunction)\n", - " .pipe(prompt)\n", - " .pipe(model)\n", - " .pipe(new StringOutputParser());\n", - "\n", - "await chain.invoke({ \"foo\": \"bar\" });" - ] - }, - { - "cell_type": "markdown", - "id": "4728ddd9-914d-42ce-ae9b-72c9ce8ec940", - "metadata": {}, - "source": [ - "## Automatic coercion in chains\n", - "\n", - "When using custom functions in chains with [`RunnableSequence.from`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html#from) static method, you can omit the explicit `RunnableLambda` creation and rely on coercion.\n", - "\n", - "Here's a simple example with a function that takes the output from the model and returns the first five letters of it:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "5ab39a87", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "\u001b[32m\"Once \"\u001b[39m" + "cell_type": "markdown", + "id": "fbc4bf6e", + "metadata": {}, + "source": [ + "# How to run custom functions\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n", + "- [Chaining runnables](/docs/how_to/sequence/)\n", + "\n", + ":::\n", + "\n", + "You can use arbitrary functions as [Runnables](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html). This is useful for formatting or when you need functionality not provided by other LangChain components, and custom functions used as Runnables are called [`RunnableLambdas`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableLambda.html).\n", + "\n", + "Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single dict input and unpacks it into multiple argument.\n", + "\n", + "This guide will cover:\n", + "\n", + "- How to explicitly create a runnable from a custom function using the `RunnableLambda` constructor\n", + "- Coercion of custom functions into runnables when used in chains\n", + "- How to accept and use run metadata in your custom function\n", + "- How to stream with custom functions by having them return generators\n", + "\n", + "## Using the constructor\n", + "\n", + "Below, we explicitly wrap our custom logic using a `RunnableLambda` method:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "```" ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { RunnableSequence } from \"@langchain/core/runnables\";\n", - "\n", - "const storyPrompt = ChatPromptTemplate.fromTemplate(\"Tell me a short story about {topic}\");\n", - "\n", - "const storyModel = new ChatOpenAI({ model: \"gpt-4o\" });\n", - "\n", - "const chainWithCoercedFunction = RunnableSequence.from([\n", - " storyPrompt,\n", - " storyModel,\n", - " (input) => input.content.slice(0, 5),\n", - "]);\n", - "\n", - "await chainWithCoercedFunction.invoke({ \"topic\": \"bears\" });" - ] - }, - { - "cell_type": "markdown", - "id": "c9a481d1", - "metadata": {}, - "source": [ - "Note that we didn't need to wrap the custom function `(input) => input.content.slice(0, 5)` in a `RunnableLambda` method. The custom function is **coerced** into a runnable. See [this section](/docs/how_to/sequence/#coercion) for more information.\n", - "\n", - "## Passing run metadata\n", - "\n", - "Runnable lambdas can optionally accept a [RunnableConfig](https://api.js.langchain.com/interfaces/langchain_core.runnables.RunnableConfig.html) parameter, which they can use to pass callbacks, tags, and other configuration information to nested runs." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ff0daf0c-49dd-4d21-9772-e5fa133c5f36", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " generations: [\n", - " [\n", - " {\n", - " text: \"oof\",\n", - " message: AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: \"oof\",\n", - " name: undefined,\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object],\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " generationInfo: { finish_reason: \"stop\" }\n", - " }\n", - " ]\n", - " ],\n", - " llmOutput: {\n", - " tokenUsage: { completionTokens: 2, promptTokens: 13, totalTokens: 15 }\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { type RunnableConfig } from \"@langchain/core/runnables\";\n", - "\n", - "const echo = (text: string, config: RunnableConfig) => {\n", - " const prompt = ChatPromptTemplate.fromTemplate(\"Reverse the following text: {text}\");\n", - " const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", - " const chain = prompt.pipe(model).pipe(new StringOutputParser());\n", - " return chain.invoke({ text }, config);\n", - "};\n", - "\n", - "const output = await RunnableLambda.from(echo).invoke(\"foo\", {\n", - " tags: [\"my-tag\"],\n", - " callbacks: [{\n", - " handleLLMEnd: (output) => console.log(output),\n", - " }],\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "922b48bd", - "metadata": {}, - "source": [ - "# Streaming\n", - "\n", - "You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a chain.\n", - "\n", - "The signature of these generators should be `AsyncGenerator -> AsyncGenerator`.\n", - "\n", - "These are useful for:\n", - "- implementing a custom output parser\n", - "- modifying the output of a previous step, while preserving streaming capabilities\n", - "\n", - "Here's an example of a custom output parser for comma-separated lists. First, we create a chain that generates such a list as text:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "29f55c38", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "6bb221b3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"3 squared is \\\\(3^2\\\\), which means multiplying 3 by itself. \\n\"\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"\\\\[3^2 = 3 \\\\times 3 = 9\\\\]\\n\"\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"So, 3 squared\"\u001b[39m... 6 more characters" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const lengthFunction = (input: { foo: string }): { length: string } => {\n", + " return {\n", + " length: input.foo.length.toString(),\n", + " };\n", + "};\n", + "\n", + "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(\"What is {length} squared?\");\n", + "\n", + "const chain = RunnableLambda.from(lengthFunction)\n", + " .pipe(prompt)\n", + " .pipe(model)\n", + " .pipe(new StringOutputParser());\n", + "\n", + "await chain.invoke({ \"foo\": \"bar\" });" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Lion\n", - ",\n", - " wolf\n", - ",\n", - " tiger\n", - ",\n", - " cougar\n", - ",\n", - " leopard\n", - "\n" - ] - } - ], - "source": [ - "const streamingPrompt = ChatPromptTemplate.fromTemplate(\n", - " \"Write a comma-separated list of 5 animals similar to: {animal}. Do not include numbers\"\n", - ");\n", - "\n", - "const strChain = streamingPrompt.pipe(model).pipe(new StringOutputParser());\n", - "\n", - "const stream = await strChain.stream({ animal: \"bear\" });\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(chunk);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "46345323", - "metadata": {}, - "source": [ - "Next, we define a custom function that will aggregate the currently streamed output and yield it when the model generates the next comma in the list:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "f08b8a5b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "4728ddd9-914d-42ce-ae9b-72c9ce8ec940", + "metadata": {}, + "source": [ + "## Automatic coercion in chains\n", + "\n", + "When using custom functions in chains with [`RunnableSequence.from`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html#from) static method, you can omit the explicit `RunnableLambda` creation and rely on coercion.\n", + "\n", + "Here's a simple example with a function that takes the output from the model and returns the first five letters of it:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ \"wolf\" ]\n", - "[ \"lion\" ]\n", - "[ \"tiger\" ]\n", - "[ \"cougar\" ]\n", - "[ \"cheetah\" ]\n" - ] - } - ], - "source": [ - "// This is a custom parser that splits an iterator of llm tokens\n", - "// into a list of strings separated by commas\n", - "async function* splitIntoList(input) {\n", - " // hold partial input until we get a comma\n", - " let buffer = \"\";\n", - " for await (const chunk of input) {\n", - " // add current chunk to buffer\n", - " buffer += chunk;\n", - " // while there are commas in the buffer\n", - " while (buffer.includes(\",\")) {\n", - " // split buffer on comma\n", - " const commaIndex = buffer.indexOf(\",\");\n", - " // yield everything before the comma\n", - " yield [buffer.slice(0, commaIndex).trim()];\n", - " // save the rest for the next iteration\n", - " buffer = buffer.slice(commaIndex + 1);\n", - " }\n", - " }\n", - " // yield the last chunk\n", - " yield [buffer.trim()];\n", - "}\n", - "\n", - "const listChain = strChain.pipe(splitIntoList);\n", - "\n", - "const listChainStream = await listChain.stream({\"animal\": \"bear\"});\n", - "\n", - "for await (const chunk of listChainStream) {\n", - " console.log(chunk);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0a5adb69", - "metadata": {}, - "source": [ - "Invoking it gives a full array of values:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "9ea4ddc6", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "id": "5ab39a87", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Once \"\u001b[39m" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { RunnableSequence } from \"@langchain/core/runnables\";\n", + "\n", + "const storyPrompt = ChatPromptTemplate.fromTemplate(\"Tell me a short story about {topic}\");\n", + "\n", + "const storyModel = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + "const chainWithCoercedFunction = RunnableSequence.from([\n", + " storyPrompt,\n", + " storyModel,\n", + " (input) => input.content.slice(0, 5),\n", + "]);\n", + "\n", + "await chainWithCoercedFunction.invoke({ \"topic\": \"bears\" });" + ] + }, + { + "cell_type": "markdown", + "id": "c9a481d1", + "metadata": {}, + "source": [ + "Note that we didn't need to wrap the custom function `(input) => input.content.slice(0, 5)` in a `RunnableLambda` method. The custom function is **coerced** into a runnable. See [this section](/docs/how_to/sequence/#coercion) for more information.\n", + "\n", + "## Passing run metadata\n", + "\n", + "Runnable lambdas can optionally accept a [RunnableConfig](https://api.js.langchain.com/interfaces/langchain_core.runnables.RunnableConfig.html) parameter, which they can use to pass callbacks, tags, and other configuration information to nested runs." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ff0daf0c-49dd-4d21-9772-e5fa133c5f36", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " generations: [\n", + " [\n", + " {\n", + " text: \"oof\",\n", + " message: AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: \"oof\",\n", + " name: undefined,\n", + " additional_kwargs: [Object],\n", + " response_metadata: [Object],\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " },\n", + " generationInfo: { finish_reason: \"stop\" }\n", + " }\n", + " ]\n", + " ],\n", + " llmOutput: {\n", + " tokenUsage: { completionTokens: 2, promptTokens: 13, totalTokens: 15 }\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { type RunnableConfig } from \"@langchain/core/runnables\";\n", + "\n", + "const echo = (text: string, config: RunnableConfig) => {\n", + " const prompt = ChatPromptTemplate.fromTemplate(\"Reverse the following text: {text}\");\n", + " const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", + " const chain = prompt.pipe(model).pipe(new StringOutputParser());\n", + " return chain.invoke({ text }, config);\n", + "};\n", + "\n", + "const output = await RunnableLambda.from(echo).invoke(\"foo\", {\n", + " tags: [\"my-tag\"],\n", + " callbacks: [{\n", + " handleLLMEnd: (output) => console.log(output),\n", + " }],\n", + "});" + ] + }, { - "data": { - "text/plain": [ - "[ \u001b[32m\"lion\"\u001b[39m, \u001b[32m\"tiger\"\u001b[39m, \u001b[32m\"wolf\"\u001b[39m, \u001b[32m\"cougar\"\u001b[39m, \u001b[32m\"jaguar\"\u001b[39m ]" + "cell_type": "markdown", + "id": "922b48bd", + "metadata": {}, + "source": [ + "# Streaming\n", + "\n", + "You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a chain.\n", + "\n", + "The signature of these generators should be `AsyncGenerator -> AsyncGenerator`.\n", + "\n", + "These are useful for:\n", + "- implementing a custom output parser\n", + "- modifying the output of a previous step, while preserving streaming capabilities\n", + "\n", + "Here's an example of a custom output parser for comma-separated lists. First, we create a chain that generates such a list as text:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "29f55c38", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Lion\n", + ",\n", + " wolf\n", + ",\n", + " tiger\n", + ",\n", + " cougar\n", + ",\n", + " leopard\n", + "\n" + ] + } + ], + "source": [ + "const streamingPrompt = ChatPromptTemplate.fromTemplate(\n", + " \"Write a comma-separated list of 5 animals similar to: {animal}. Do not include numbers\"\n", + ");\n", + "\n", + "const strChain = streamingPrompt.pipe(model).pipe(new StringOutputParser());\n", + "\n", + "const stream = await strChain.stream({ animal: \"bear\" });\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(chunk);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "46345323", + "metadata": {}, + "source": [ + "Next, we define a custom function that will aggregate the currently streamed output and yield it when the model generates the next comma in the list:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f08b8a5b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ \"wolf\" ]\n", + "[ \"lion\" ]\n", + "[ \"tiger\" ]\n", + "[ \"cougar\" ]\n", + "[ \"cheetah\" ]\n" + ] + } + ], + "source": [ + "// This is a custom parser that splits an iterator of llm tokens\n", + "// into a list of strings separated by commas\n", + "async function* splitIntoList(input) {\n", + " // hold partial input until we get a comma\n", + " let buffer = \"\";\n", + " for await (const chunk of input) {\n", + " // add current chunk to buffer\n", + " buffer += chunk;\n", + " // while there are commas in the buffer\n", + " while (buffer.includes(\",\")) {\n", + " // split buffer on comma\n", + " const commaIndex = buffer.indexOf(\",\");\n", + " // yield everything before the comma\n", + " yield [buffer.slice(0, commaIndex).trim()];\n", + " // save the rest for the next iteration\n", + " buffer = buffer.slice(commaIndex + 1);\n", + " }\n", + " }\n", + " // yield the last chunk\n", + " yield [buffer.trim()];\n", + "}\n", + "\n", + "const listChain = strChain.pipe(splitIntoList);\n", + "\n", + "const listChainStream = await listChain.stream({\"animal\": \"bear\"});\n", + "\n", + "for await (const chunk of listChainStream) {\n", + " console.log(chunk);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0a5adb69", + "metadata": {}, + "source": [ + "Invoking it gives a full array of values:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "9ea4ddc6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[ \u001b[32m\"lion\"\u001b[39m, \u001b[32m\"tiger\"\u001b[39m, \u001b[32m\"wolf\"\u001b[39m, \u001b[32m\"cougar\"\u001b[39m, \u001b[32m\"jaguar\"\u001b[39m ]" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await listChain.invoke({\"animal\": \"bear\"})" + ] + }, + { + "cell_type": "markdown", + "id": "3306ac3b", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "Now you've learned a few different ways to use custom logic within your chains, and how to implement streaming.\n", + "\n", + "To learn more, see the other how-to guides on runnables in this section." ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "await listChain.invoke({\"animal\": \"bear\"})" - ] - }, - { - "cell_type": "markdown", - "id": "3306ac3b", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "Now you've learned a few different ways to use custom logic within your chains, and how to implement streaming.\n", - "\n", - "To learn more, see the other how-to guides on runnables in this section." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/index.mdx b/docs/core_docs/docs/how_to/index.mdx index dc689959b131..bab55e9230f8 100644 --- a/docs/core_docs/docs/how_to/index.mdx +++ b/docs/core_docs/docs/how_to/index.mdx @@ -48,7 +48,7 @@ These are the core building blocks you can use when building applications. ### Prompt templates -[Prompt Templates](/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model. +[Prompt Templates](/docs/concepts/prompt_templates) are responsible for formatting user input into a format that can be passed to a language model. - [How to: use few shot examples](/docs/how_to/few_shot_examples) - [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/) @@ -57,7 +57,7 @@ These are the core building blocks you can use when building applications. ### Example selectors -[Example Selectors](/docs/concepts/#example-selectors) are responsible for selecting the correct few shot examples to pass to the prompt. +[Example Selectors](/docs/concepts/example_selectors) are responsible for selecting the correct few shot examples to pass to the prompt. - [How to: use example selectors](/docs/how_to/example_selectors) - [How to: select examples by length](/docs/how_to/example_selectors_length_based) @@ -66,7 +66,7 @@ These are the core building blocks you can use when building applications. ### Chat models -[Chat Models](/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message. +[Chat Models](/docs/concepts/chat_models) are newer forms of language models that take messages in and output a message. - [How to: do function/tool calling](/docs/how_to/tool_calling) - [How to: get models to return structured output](/docs/how_to/structured_output) @@ -92,7 +92,7 @@ These are the core building blocks you can use when building applications. ### LLMs -What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language models that take a string in and output a string. +What LangChain calls [LLMs](/docs/concepts/text_llms) are older forms of language models that take a string in and output a string. - [How to: cache model responses](/docs/how_to/llm_caching) - [How to: create a custom LLM class](/docs/how_to/custom_llm) @@ -101,7 +101,7 @@ What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language mo ### Output parsers -[Output Parsers](/docs/concepts/#output-parsers) are responsible for taking the output of an LLM and parsing into more structured format. +[Output Parsers](/docs/concepts/output_parsers) are responsible for taking the output of an LLM and parsing into more structured format. - [How to: use output parsers to parse an LLM response into structured format](/docs/how_to/output_parser_structured) - [How to: parse JSON output](/docs/how_to/output_parser_json) @@ -110,7 +110,7 @@ What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language mo ### Document loaders -[Document Loaders](/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources. +[Document Loaders](/docs/concepts/document_loaders) are responsible for loading documents from a variety of sources. - [How to: load CSV data](/docs/how_to/document_loader_csv) - [How to: load data from a directory](/docs/how_to/document_loader_directory) @@ -121,7 +121,7 @@ What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language mo ### Text splitters -[Text Splitters](/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval. +[Text Splitters](/docs/concepts/text_splitters) take a document and split into chunks that can be used for retrieval. - [How to: recursively split text](/docs/how_to/recursive_text_splitter) - [How to: split by character](/docs/how_to/character_text_splitter) @@ -130,7 +130,7 @@ What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language mo ### Embedding models -[Embedding Models](/docs/concepts/#embedding-models) take a piece of text and create a numerical representation of it. +[Embedding Models](/docs/concepts/embedding_models) take a piece of text and create a numerical representation of it. - [How to: embed text data](/docs/how_to/embed_text) - [How to: cache embedding results](/docs/how_to/caching_embeddings) @@ -143,7 +143,7 @@ What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language mo ### Retrievers -[Retrievers](/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents. +[Retrievers](/docs/concepts/retrievers) are responsible for taking a query and returning relevant documents. - [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever) - [How to: generate multiple queries to retrieve data for](/docs/how_to/multiple_queries) @@ -164,7 +164,7 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying ### Tools -LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. +LangChain [Tools](/docs/concepts/tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. - [How to: create tools](/docs/how_to/custom_tools) - [How to: use built-in tools and toolkits](/docs/how_to/tools_builtin) @@ -194,7 +194,7 @@ For in depth how-to guides for agents, please check out [LangGraph](https://lang ### Callbacks -[Callbacks](/docs/concepts/#callbacks) allow you to hook into the various stages of your LLM application's execution. +[Callbacks](/docs/concepts/callbacks) allow you to hook into the various stages of your LLM application's execution. - [How to: pass in callbacks at runtime](/docs/how_to/callbacks_runtime) - [How to: attach callbacks to a module](/docs/how_to/callbacks_attach) diff --git a/docs/core_docs/docs/how_to/llm_token_usage_tracking.mdx b/docs/core_docs/docs/how_to/llm_token_usage_tracking.mdx index 360dcef46c4d..e32eca71e073 100644 --- a/docs/core_docs/docs/how_to/llm_token_usage_tracking.mdx +++ b/docs/core_docs/docs/how_to/llm_token_usage_tracking.mdx @@ -8,7 +8,7 @@ sidebar_position: 5 This guide assumes familiarity with the following concepts: -- [LLMs](/docs/concepts/#llms) +- [LLMs](/docs/concepts/text_llms) ::: diff --git a/docs/core_docs/docs/how_to/logprobs.ipynb b/docs/core_docs/docs/how_to/logprobs.ipynb index 3598e783d4a3..2e21c3e78cca 100644 --- a/docs/core_docs/docs/how_to/logprobs.ipynb +++ b/docs/core_docs/docs/how_to/logprobs.ipynb @@ -1,452 +1,452 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "78b45321-7740-4399-b2ad-459811131de3", - "metadata": {}, - "source": [ - "# How to get log probabilities\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "\n", - ":::\n", - "\n", - "Certain chat models can be configured to return token-level log probabilities representing the likelihood of a given token. This guide walks through how to get this information in LangChain." - ] - }, - { - "cell_type": "markdown", - "id": "7f5016bf-2a7b-4140-9b80-8c35c7e5c0d5", - "metadata": {}, - "source": [ - "## OpenAI\n", - "\n", - "Install the `@langchain/openai` package and set your API key:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "f88ffa0d-f4a7-482c-88de-cbec501a79b1", - "metadata": {}, - "source": [ - "For the OpenAI API to return log probabilities, we need to set the `logprobs` param to `true`. Then, the logprobs are included on each output [`AIMessage`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) as part of the `response_metadata`:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "d1bf0a9a-e402-4931-ab53-32899f8e0326", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "[\n", - " {\n", - " token: \u001b[32m\"Thank\"\u001b[39m,\n", - " logprob: \u001b[33m-0.70174205\u001b[39m,\n", - " bytes: [ \u001b[33m84\u001b[39m, \u001b[33m104\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m110\u001b[39m, \u001b[33m107\u001b[39m ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \u001b[32m\" you\"\u001b[39m,\n", - " logprob: \u001b[33m0\u001b[39m,\n", - " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m121\u001b[39m, \u001b[33m111\u001b[39m, \u001b[33m117\u001b[39m ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \u001b[32m\" for\"\u001b[39m,\n", - " logprob: \u001b[33m-0.000004723352\u001b[39m,\n", - " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m102\u001b[39m, \u001b[33m111\u001b[39m, \u001b[33m114\u001b[39m ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \u001b[32m\" asking\"\u001b[39m,\n", - " logprob: \u001b[33m-0.0000013856493\u001b[39m,\n", - " bytes: [\n", - " \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m115\u001b[39m,\n", - " \u001b[33m107\u001b[39m, \u001b[33m105\u001b[39m, \u001b[33m110\u001b[39m,\n", - " \u001b[33m103\u001b[39m\n", - " ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \u001b[32m\"!\"\u001b[39m,\n", - " logprob: \u001b[33m-0.00030102333\u001b[39m,\n", - " bytes: [ \u001b[33m33\u001b[39m ],\n", - " top_logprobs: []\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "78b45321-7740-4399-b2ad-459811131de3", + "metadata": {}, + "source": [ + "# How to get log probabilities\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "\n", + ":::\n", + "\n", + "Certain chat models can be configured to return token-level log probabilities representing the likelihood of a given token. This guide walks through how to get this information in LangChain." ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const model = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " logprobs: true,\n", - "});\n", - "\n", - "const responseMessage = await model.invoke(\"how are you today?\");\n", - "\n", - "responseMessage.response_metadata.logprobs.content.slice(0, 5);" - ] - }, - { - "cell_type": "markdown", - "id": "d1ee1c29-d27e-4353-8c3c-2ed7e7f95ff5", - "metadata": {}, - "source": [ - "And are part of streamed Message chunks as well:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "4bfaf309-3b23-43b7-b333-01fc4848992d", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[]\n", - "[\n", - " {\n", - " token: \"Thank\",\n", - " logprob: -0.23375113,\n", - " bytes: [ 84, 104, 97, 110, 107 ],\n", - " top_logprobs: []\n", - " }\n", - "]\n", - "[\n", - " {\n", - " token: \"Thank\",\n", - " logprob: -0.23375113,\n", - " bytes: [ 84, 104, 97, 110, 107 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" you\",\n", - " logprob: 0,\n", - " bytes: [ 32, 121, 111, 117 ],\n", - " top_logprobs: []\n", - " }\n", - "]\n", - "[\n", - " {\n", - " token: \"Thank\",\n", - " logprob: -0.23375113,\n", - " bytes: [ 84, 104, 97, 110, 107 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" you\",\n", - " logprob: 0,\n", - " bytes: [ 32, 121, 111, 117 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" for\",\n", - " logprob: -0.000004723352,\n", - " bytes: [ 32, 102, 111, 114 ],\n", - " top_logprobs: []\n", - " }\n", - "]\n", - "[\n", - " {\n", - " token: \"Thank\",\n", - " logprob: -0.23375113,\n", - " bytes: [ 84, 104, 97, 110, 107 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" you\",\n", - " logprob: 0,\n", - " bytes: [ 32, 121, 111, 117 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" for\",\n", - " logprob: -0.000004723352,\n", - " bytes: [ 32, 102, 111, 114 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" asking\",\n", - " logprob: -0.0000029352968,\n", - " bytes: [\n", - " 32, 97, 115,\n", - " 107, 105, 110,\n", - " 103\n", - " ],\n", - " top_logprobs: []\n", - " }\n", - "]\n", - "[\n", - " {\n", - " token: \"Thank\",\n", - " logprob: -0.23375113,\n", - " bytes: [ 84, 104, 97, 110, 107 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" you\",\n", - " logprob: 0,\n", - " bytes: [ 32, 121, 111, 117 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" for\",\n", - " logprob: -0.000004723352,\n", - " bytes: [ 32, 102, 111, 114 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \" asking\",\n", - " logprob: -0.0000029352968,\n", - " bytes: [\n", - " 32, 97, 115,\n", - " 107, 105, 110,\n", - " 103\n", - " ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: \"!\",\n", - " logprob: -0.00039694557,\n", - " bytes: [ 33 ],\n", - " top_logprobs: []\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "let count = 0;\n", - "const stream = await model.stream(\"How are you today?\");\n", - "let aggregateResponse;\n", - "\n", - "for await (const chunk of stream) {\n", - " if (count > 5) {\n", - " break;\n", - " }\n", - " if (aggregateResponse === undefined) {\n", - " aggregateResponse = chunk;\n", - " } else {\n", - " aggregateResponse = aggregateResponse.concat(chunk);\n", - " }\n", - " console.log(aggregateResponse.response_metadata.logprobs?.content);\n", - " count++;\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3c5222d2", - "metadata": {}, - "source": [ - "## `topLogprobs`\n", - "\n", - "To see alternate potential generations at each step, you can use the `topLogprobs` parameter:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "fa4d38b1", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "7f5016bf-2a7b-4140-9b80-8c35c7e5c0d5", + "metadata": {}, + "source": [ + "## OpenAI\n", + "\n", + "Install the `@langchain/openai` package and set your API key:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "f88ffa0d-f4a7-482c-88de-cbec501a79b1", + "metadata": {}, + "source": [ + "For the OpenAI API to return log probabilities, we need to set the `logprobs` param to `true`. Then, the logprobs are included on each output [`AIMessage`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html) as part of the `response_metadata`:" + ] + }, { - "data": { - "text/plain": [ - "[\n", - " {\n", - " token: \u001b[32m\"I'm\"\u001b[39m,\n", - " logprob: \u001b[33m-2.2864406\u001b[39m,\n", - " bytes: [ \u001b[33m73\u001b[39m, \u001b[33m39\u001b[39m, \u001b[33m109\u001b[39m ],\n", - " top_logprobs: [\n", - " {\n", - " token: \u001b[32m\"Thank\"\u001b[39m,\n", - " logprob: \u001b[33m-0.28644064\u001b[39m,\n", - " bytes: [ \u001b[33m84\u001b[39m, \u001b[33m104\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m110\u001b[39m, \u001b[33m107\u001b[39m ]\n", - " },\n", - " {\n", - " token: \u001b[32m\"Hello\"\u001b[39m,\n", - " logprob: \u001b[33m-2.0364406\u001b[39m,\n", - " bytes: [ \u001b[33m72\u001b[39m, \u001b[33m101\u001b[39m, \u001b[33m108\u001b[39m, \u001b[33m108\u001b[39m, \u001b[33m111\u001b[39m ]\n", - " },\n", - " { token: \u001b[32m\"I'm\"\u001b[39m, logprob: \u001b[33m-2.2864406\u001b[39m, bytes: [ \u001b[33m73\u001b[39m, \u001b[33m39\u001b[39m, \u001b[33m109\u001b[39m ] }\n", - " ]\n", - " },\n", - " {\n", - " token: \u001b[32m\" just\"\u001b[39m,\n", - " logprob: \u001b[33m-0.14442946\u001b[39m,\n", - " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m106\u001b[39m, \u001b[33m117\u001b[39m, \u001b[33m115\u001b[39m, \u001b[33m116\u001b[39m ],\n", - " top_logprobs: [\n", - " {\n", - " token: \u001b[32m\" just\"\u001b[39m,\n", - " logprob: \u001b[33m-0.14442946\u001b[39m,\n", - " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m106\u001b[39m, \u001b[33m117\u001b[39m, \u001b[33m115\u001b[39m, \u001b[33m116\u001b[39m ]\n", - " },\n", - " { token: \u001b[32m\" an\"\u001b[39m, logprob: \u001b[33m-2.2694294\u001b[39m, bytes: [ \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m110\u001b[39m ] },\n", - " {\n", - " token: \u001b[32m\" here\"\u001b[39m,\n", - " logprob: \u001b[33m-4.0194297\u001b[39m,\n", - " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m104\u001b[39m, \u001b[33m101\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m101\u001b[39m ]\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " token: \u001b[32m\" a\"\u001b[39m,\n", - " logprob: \u001b[33m-0.00066632946\u001b[39m,\n", - " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m ],\n", - " top_logprobs: [\n", - " { token: \u001b[32m\" a\"\u001b[39m, logprob: \u001b[33m-0.00066632946\u001b[39m, bytes: [ \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m ] },\n", - " {\n", - " token: \u001b[32m\" lines\"\u001b[39m,\n", - " logprob: \u001b[33m-7.750666\u001b[39m,\n", - " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m108\u001b[39m, \u001b[33m105\u001b[39m, \u001b[33m110\u001b[39m, \u001b[33m101\u001b[39m, \u001b[33m115\u001b[39m ]\n", - " },\n", - " { token: \u001b[32m\" an\"\u001b[39m, logprob: \u001b[33m-9.250667\u001b[39m, bytes: [ \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m110\u001b[39m ] }\n", - " ]\n", - " },\n", - " {\n", - " token: \u001b[32m\" computer\"\u001b[39m,\n", - " logprob: \u001b[33m-0.015423919\u001b[39m,\n", - " bytes: [\n", - " \u001b[33m32\u001b[39m, \u001b[33m99\u001b[39m, \u001b[33m111\u001b[39m, \u001b[33m109\u001b[39m,\n", - " \u001b[33m112\u001b[39m, \u001b[33m117\u001b[39m, \u001b[33m116\u001b[39m, \u001b[33m101\u001b[39m,\n", - " \u001b[33m114\u001b[39m\n", - " ],\n", - " top_logprobs: [\n", - " {\n", - " token: \u001b[32m\" computer\"\u001b[39m,\n", - " logprob: \u001b[33m-0.015423919\u001b[39m,\n", - " bytes: [\n", - " \u001b[33m32\u001b[39m, \u001b[33m99\u001b[39m, \u001b[33m111\u001b[39m, \u001b[33m109\u001b[39m,\n", - " \u001b[33m112\u001b[39m, \u001b[33m117\u001b[39m, \u001b[33m116\u001b[39m, \u001b[33m101\u001b[39m,\n", - " \u001b[33m114\u001b[39m\n", - " ]\n", - " },\n", - " {\n", - " token: \u001b[32m\" program\"\u001b[39m,\n", - " logprob: \u001b[33m-5.265424\u001b[39m,\n", - " bytes: [\n", - " \u001b[33m32\u001b[39m, \u001b[33m112\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m111\u001b[39m,\n", - " \u001b[33m103\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m109\u001b[39m\n", - " ]\n", - " },\n", - " {\n", - " token: \u001b[32m\" machine\"\u001b[39m,\n", - " logprob: \u001b[33m-5.390424\u001b[39m,\n", - " bytes: [\n", - " \u001b[33m32\u001b[39m, \u001b[33m109\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m99\u001b[39m,\n", - " \u001b[33m104\u001b[39m, \u001b[33m105\u001b[39m, \u001b[33m110\u001b[39m, \u001b[33m101\u001b[39m\n", - " ]\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " token: \u001b[32m\" program\"\u001b[39m,\n", - " logprob: \u001b[33m-0.0010724656\u001b[39m,\n", - " bytes: [\n", - " \u001b[33m32\u001b[39m, \u001b[33m112\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m111\u001b[39m,\n", - " \u001b[33m103\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m109\u001b[39m\n", - " ],\n", - " top_logprobs: [\n", - " {\n", - " token: \u001b[32m\" program\"\u001b[39m,\n", - " logprob: \u001b[33m-0.0010724656\u001b[39m,\n", - " bytes: [\n", - " \u001b[33m32\u001b[39m, \u001b[33m112\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m111\u001b[39m,\n", - " \u001b[33m103\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m109\u001b[39m\n", - " ]\n", - " },\n", - " {\n", - " token: \u001b[32m\"-based\"\u001b[39m,\n", - " logprob: \u001b[33m-6.8760724\u001b[39m,\n", - " bytes: [ \u001b[33m45\u001b[39m, \u001b[33m98\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m115\u001b[39m, \u001b[33m101\u001b[39m, \u001b[33m100\u001b[39m ]\n", - " },\n", - " {\n", - " token: \u001b[32m\" algorithm\"\u001b[39m,\n", - " logprob: \u001b[33m-10.626073\u001b[39m,\n", - " bytes: [\n", - " \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m108\u001b[39m, \u001b[33m103\u001b[39m,\n", - " \u001b[33m111\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m105\u001b[39m, \u001b[33m116\u001b[39m,\n", - " \u001b[33m104\u001b[39m, \u001b[33m109\u001b[39m\n", - " ]\n", - " }\n", - " ]\n", - " }\n", - "]" + "cell_type": "code", + "execution_count": 1, + "id": "d1bf0a9a-e402-4931-ab53-32899f8e0326", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " {\n", + " token: \u001b[32m\"Thank\"\u001b[39m,\n", + " logprob: \u001b[33m-0.70174205\u001b[39m,\n", + " bytes: [ \u001b[33m84\u001b[39m, \u001b[33m104\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m110\u001b[39m, \u001b[33m107\u001b[39m ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \u001b[32m\" you\"\u001b[39m,\n", + " logprob: \u001b[33m0\u001b[39m,\n", + " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m121\u001b[39m, \u001b[33m111\u001b[39m, \u001b[33m117\u001b[39m ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \u001b[32m\" for\"\u001b[39m,\n", + " logprob: \u001b[33m-0.000004723352\u001b[39m,\n", + " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m102\u001b[39m, \u001b[33m111\u001b[39m, \u001b[33m114\u001b[39m ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \u001b[32m\" asking\"\u001b[39m,\n", + " logprob: \u001b[33m-0.0000013856493\u001b[39m,\n", + " bytes: [\n", + " \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m115\u001b[39m,\n", + " \u001b[33m107\u001b[39m, \u001b[33m105\u001b[39m, \u001b[33m110\u001b[39m,\n", + " \u001b[33m103\u001b[39m\n", + " ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \u001b[32m\"!\"\u001b[39m,\n", + " logprob: \u001b[33m-0.00030102333\u001b[39m,\n", + " bytes: [ \u001b[33m33\u001b[39m ],\n", + " top_logprobs: []\n", + " }\n", + "]" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const model = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " logprobs: true,\n", + "});\n", + "\n", + "const responseMessage = await model.invoke(\"how are you today?\");\n", + "\n", + "responseMessage.response_metadata.logprobs.content.slice(0, 5);" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee1c29-d27e-4353-8c3c-2ed7e7f95ff5", + "metadata": {}, + "source": [ + "And are part of streamed Message chunks as well:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "4bfaf309-3b23-43b7-b333-01fc4848992d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[]\n", + "[\n", + " {\n", + " token: \"Thank\",\n", + " logprob: -0.23375113,\n", + " bytes: [ 84, 104, 97, 110, 107 ],\n", + " top_logprobs: []\n", + " }\n", + "]\n", + "[\n", + " {\n", + " token: \"Thank\",\n", + " logprob: -0.23375113,\n", + " bytes: [ 84, 104, 97, 110, 107 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" you\",\n", + " logprob: 0,\n", + " bytes: [ 32, 121, 111, 117 ],\n", + " top_logprobs: []\n", + " }\n", + "]\n", + "[\n", + " {\n", + " token: \"Thank\",\n", + " logprob: -0.23375113,\n", + " bytes: [ 84, 104, 97, 110, 107 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" you\",\n", + " logprob: 0,\n", + " bytes: [ 32, 121, 111, 117 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" for\",\n", + " logprob: -0.000004723352,\n", + " bytes: [ 32, 102, 111, 114 ],\n", + " top_logprobs: []\n", + " }\n", + "]\n", + "[\n", + " {\n", + " token: \"Thank\",\n", + " logprob: -0.23375113,\n", + " bytes: [ 84, 104, 97, 110, 107 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" you\",\n", + " logprob: 0,\n", + " bytes: [ 32, 121, 111, 117 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" for\",\n", + " logprob: -0.000004723352,\n", + " bytes: [ 32, 102, 111, 114 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" asking\",\n", + " logprob: -0.0000029352968,\n", + " bytes: [\n", + " 32, 97, 115,\n", + " 107, 105, 110,\n", + " 103\n", + " ],\n", + " top_logprobs: []\n", + " }\n", + "]\n", + "[\n", + " {\n", + " token: \"Thank\",\n", + " logprob: -0.23375113,\n", + " bytes: [ 84, 104, 97, 110, 107 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" you\",\n", + " logprob: 0,\n", + " bytes: [ 32, 121, 111, 117 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" for\",\n", + " logprob: -0.000004723352,\n", + " bytes: [ 32, 102, 111, 114 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \" asking\",\n", + " logprob: -0.0000029352968,\n", + " bytes: [\n", + " 32, 97, 115,\n", + " 107, 105, 110,\n", + " 103\n", + " ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: \"!\",\n", + " logprob: -0.00039694557,\n", + " bytes: [ 33 ],\n", + " top_logprobs: []\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "let count = 0;\n", + "const stream = await model.stream(\"How are you today?\");\n", + "let aggregateResponse;\n", + "\n", + "for await (const chunk of stream) {\n", + " if (count > 5) {\n", + " break;\n", + " }\n", + " if (aggregateResponse === undefined) {\n", + " aggregateResponse = chunk;\n", + " } else {\n", + " aggregateResponse = aggregateResponse.concat(chunk);\n", + " }\n", + " console.log(aggregateResponse.response_metadata.logprobs?.content);\n", + " count++;\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3c5222d2", + "metadata": {}, + "source": [ + "## `topLogprobs`\n", + "\n", + "To see alternate potential generations at each step, you can use the `topLogprobs` parameter:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "fa4d38b1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " {\n", + " token: \u001b[32m\"I'm\"\u001b[39m,\n", + " logprob: \u001b[33m-2.2864406\u001b[39m,\n", + " bytes: [ \u001b[33m73\u001b[39m, \u001b[33m39\u001b[39m, \u001b[33m109\u001b[39m ],\n", + " top_logprobs: [\n", + " {\n", + " token: \u001b[32m\"Thank\"\u001b[39m,\n", + " logprob: \u001b[33m-0.28644064\u001b[39m,\n", + " bytes: [ \u001b[33m84\u001b[39m, \u001b[33m104\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m110\u001b[39m, \u001b[33m107\u001b[39m ]\n", + " },\n", + " {\n", + " token: \u001b[32m\"Hello\"\u001b[39m,\n", + " logprob: \u001b[33m-2.0364406\u001b[39m,\n", + " bytes: [ \u001b[33m72\u001b[39m, \u001b[33m101\u001b[39m, \u001b[33m108\u001b[39m, \u001b[33m108\u001b[39m, \u001b[33m111\u001b[39m ]\n", + " },\n", + " { token: \u001b[32m\"I'm\"\u001b[39m, logprob: \u001b[33m-2.2864406\u001b[39m, bytes: [ \u001b[33m73\u001b[39m, \u001b[33m39\u001b[39m, \u001b[33m109\u001b[39m ] }\n", + " ]\n", + " },\n", + " {\n", + " token: \u001b[32m\" just\"\u001b[39m,\n", + " logprob: \u001b[33m-0.14442946\u001b[39m,\n", + " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m106\u001b[39m, \u001b[33m117\u001b[39m, \u001b[33m115\u001b[39m, \u001b[33m116\u001b[39m ],\n", + " top_logprobs: [\n", + " {\n", + " token: \u001b[32m\" just\"\u001b[39m,\n", + " logprob: \u001b[33m-0.14442946\u001b[39m,\n", + " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m106\u001b[39m, \u001b[33m117\u001b[39m, \u001b[33m115\u001b[39m, \u001b[33m116\u001b[39m ]\n", + " },\n", + " { token: \u001b[32m\" an\"\u001b[39m, logprob: \u001b[33m-2.2694294\u001b[39m, bytes: [ \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m110\u001b[39m ] },\n", + " {\n", + " token: \u001b[32m\" here\"\u001b[39m,\n", + " logprob: \u001b[33m-4.0194297\u001b[39m,\n", + " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m104\u001b[39m, \u001b[33m101\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m101\u001b[39m ]\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " token: \u001b[32m\" a\"\u001b[39m,\n", + " logprob: \u001b[33m-0.00066632946\u001b[39m,\n", + " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m ],\n", + " top_logprobs: [\n", + " { token: \u001b[32m\" a\"\u001b[39m, logprob: \u001b[33m-0.00066632946\u001b[39m, bytes: [ \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m ] },\n", + " {\n", + " token: \u001b[32m\" lines\"\u001b[39m,\n", + " logprob: \u001b[33m-7.750666\u001b[39m,\n", + " bytes: [ \u001b[33m32\u001b[39m, \u001b[33m108\u001b[39m, \u001b[33m105\u001b[39m, \u001b[33m110\u001b[39m, \u001b[33m101\u001b[39m, \u001b[33m115\u001b[39m ]\n", + " },\n", + " { token: \u001b[32m\" an\"\u001b[39m, logprob: \u001b[33m-9.250667\u001b[39m, bytes: [ \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m110\u001b[39m ] }\n", + " ]\n", + " },\n", + " {\n", + " token: \u001b[32m\" computer\"\u001b[39m,\n", + " logprob: \u001b[33m-0.015423919\u001b[39m,\n", + " bytes: [\n", + " \u001b[33m32\u001b[39m, \u001b[33m99\u001b[39m, \u001b[33m111\u001b[39m, \u001b[33m109\u001b[39m,\n", + " \u001b[33m112\u001b[39m, \u001b[33m117\u001b[39m, \u001b[33m116\u001b[39m, \u001b[33m101\u001b[39m,\n", + " \u001b[33m114\u001b[39m\n", + " ],\n", + " top_logprobs: [\n", + " {\n", + " token: \u001b[32m\" computer\"\u001b[39m,\n", + " logprob: \u001b[33m-0.015423919\u001b[39m,\n", + " bytes: [\n", + " \u001b[33m32\u001b[39m, \u001b[33m99\u001b[39m, \u001b[33m111\u001b[39m, \u001b[33m109\u001b[39m,\n", + " \u001b[33m112\u001b[39m, \u001b[33m117\u001b[39m, \u001b[33m116\u001b[39m, \u001b[33m101\u001b[39m,\n", + " \u001b[33m114\u001b[39m\n", + " ]\n", + " },\n", + " {\n", + " token: \u001b[32m\" program\"\u001b[39m,\n", + " logprob: \u001b[33m-5.265424\u001b[39m,\n", + " bytes: [\n", + " \u001b[33m32\u001b[39m, \u001b[33m112\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m111\u001b[39m,\n", + " \u001b[33m103\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m109\u001b[39m\n", + " ]\n", + " },\n", + " {\n", + " token: \u001b[32m\" machine\"\u001b[39m,\n", + " logprob: \u001b[33m-5.390424\u001b[39m,\n", + " bytes: [\n", + " \u001b[33m32\u001b[39m, \u001b[33m109\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m99\u001b[39m,\n", + " \u001b[33m104\u001b[39m, \u001b[33m105\u001b[39m, \u001b[33m110\u001b[39m, \u001b[33m101\u001b[39m\n", + " ]\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " token: \u001b[32m\" program\"\u001b[39m,\n", + " logprob: \u001b[33m-0.0010724656\u001b[39m,\n", + " bytes: [\n", + " \u001b[33m32\u001b[39m, \u001b[33m112\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m111\u001b[39m,\n", + " \u001b[33m103\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m109\u001b[39m\n", + " ],\n", + " top_logprobs: [\n", + " {\n", + " token: \u001b[32m\" program\"\u001b[39m,\n", + " logprob: \u001b[33m-0.0010724656\u001b[39m,\n", + " bytes: [\n", + " \u001b[33m32\u001b[39m, \u001b[33m112\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m111\u001b[39m,\n", + " \u001b[33m103\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m109\u001b[39m\n", + " ]\n", + " },\n", + " {\n", + " token: \u001b[32m\"-based\"\u001b[39m,\n", + " logprob: \u001b[33m-6.8760724\u001b[39m,\n", + " bytes: [ \u001b[33m45\u001b[39m, \u001b[33m98\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m115\u001b[39m, \u001b[33m101\u001b[39m, \u001b[33m100\u001b[39m ]\n", + " },\n", + " {\n", + " token: \u001b[32m\" algorithm\"\u001b[39m,\n", + " logprob: \u001b[33m-10.626073\u001b[39m,\n", + " bytes: [\n", + " \u001b[33m32\u001b[39m, \u001b[33m97\u001b[39m, \u001b[33m108\u001b[39m, \u001b[33m103\u001b[39m,\n", + " \u001b[33m111\u001b[39m, \u001b[33m114\u001b[39m, \u001b[33m105\u001b[39m, \u001b[33m116\u001b[39m,\n", + " \u001b[33m104\u001b[39m, \u001b[33m109\u001b[39m\n", + " ]\n", + " }\n", + " ]\n", + " }\n", + "]" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const modelWithTopLogprobs = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " logprobs: true,\n", + " topLogprobs: 3,\n", + "});\n", + "\n", + "const res = await modelWithTopLogprobs.invoke(\"how are you today?\");\n", + "\n", + "res.response_metadata.logprobs.content.slice(0, 5);" + ] + }, + { + "cell_type": "markdown", + "id": "19766435", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned how to get logprobs from OpenAI models in LangChain.\n", + "\n", + "Next, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/docs/how_to/structured_output) or [how to track token usage](/docs/how_to/chat_token_usage_tracking)." ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "const modelWithTopLogprobs = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " logprobs: true,\n", - " topLogprobs: 3,\n", - "});\n", - "\n", - "const res = await modelWithTopLogprobs.invoke(\"how are you today?\");\n", - "\n", - "res.response_metadata.logprobs.content.slice(0, 5);" - ] - }, - { - "cell_type": "markdown", - "id": "19766435", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned how to get logprobs from OpenAI models in LangChain.\n", - "\n", - "Next, check out the other how-to guides chat models in this section, like [how to get a model to return structured output](/docs/how_to/structured_output) or [how to track token usage](/docs/how_to/chat_token_usage_tracking)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/message_history.ipynb b/docs/core_docs/docs/how_to/message_history.ipynb index dbca922041ff..67640b10c11f 100644 --- a/docs/core_docs/docs/how_to/message_history.ipynb +++ b/docs/core_docs/docs/how_to/message_history.ipynb @@ -1,586 +1,586 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "8165bd4c", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "keywords: [memory]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "f47033eb", - "metadata": {}, - "source": [ - "# How to add message history\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chaining runnables](/docs/how_to/sequence/)\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "- [Chat Messages](/docs/concepts/#message-types)\n", - "\n", - ":::\n", - "\n", - "```{=mdx}\n", - ":::note\n", - "\n", - "This guide previously covered the [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html) abstraction. You can access this version of the guide in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/message_history/).\n", - "\n", - "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", - "\n", - ":::\n", - "```\n", - "\n", - "\n", - "Passing conversation state into and out a chain is vital when building a chatbot. LangGraph implements a built-in persistence layer, allowing chain states to be automatically persisted in memory, or external backends such as SQLite, Postgres or Redis. Details can be found in the LangGraph persistence documentation.\n", - "\n", - "In this guide we demonstrate how to add persistence to arbitrary LangChain runnables by wrapping them in a minimal LangGraph application. This lets us persist the message history and other elements of the chain's state, simplifying the development of multi-turn applications. It also supports multiple threads, enabling a single application to interact separately with multiple users.\n", - "\n", - "## Setup\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - " @langchain/core @langchain/langgraph\n", - "\n", - "```\n", - "\n", - "Let’s also set up a chat model that we’ll use for the below examples.\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```\n" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "8a4e4708", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "1f6121bc-2080-4ccc-acf0-f77de4bc951d", - "metadata": {}, - "source": [ - "## Example: message inputs\n", - "\n", - "Adding memory to a [chat model](/docs/concepts/#chat-models) provides a simple example. Chat models accept a list of messages as input and output a message. LangGraph includes a built-in `MessagesState` that we can use for this purpose.\n", - "\n", - "Below, we:\n", - "1. Define the graph state to be a list of messages;\n", - "2. Add a single node to the graph that calls a chat model;\n", - "3. Compile the graph with an in-memory checkpointer to store messages between runs.\n", - "\n", - ":::info\n", - "\n", - "The output of a LangGraph application is its [state](https://langchain-ai.github.io/langgraphjs/concepts/low_level/).\n", - "\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "f691a73a-a866-4354-9fff-8315605e2b8f", - "metadata": {}, - "outputs": [], - "source": [ - "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", - "\n", - "// Define the function that calls the model\n", - "const callModel = async (state: typeof MessagesAnnotation.State) => {\n", - " const response = await llm.invoke(state.messages);\n", - " // Update message history with response:\n", - " return { messages: response };\n", - "};\n", - "\n", - "// Define a new graph\n", - "const workflow = new StateGraph(MessagesAnnotation)\n", - " // Define the (single) node in the graph\n", - " .addNode(\"model\", callModel)\n", - " .addEdge(START, \"model\")\n", - " .addEdge(\"model\", END);\n", - "\n", - "// Add memory\n", - "const memory = new MemorySaver();\n", - "const app = workflow.compile({ checkpointer: memory });" - ] - }, - { - "cell_type": "markdown", - "id": "c0b396a8-f81e-4139-b4b2-75adf61d8179", - "metadata": {}, - "source": [ - "When we run the application, we pass in a configuration object that specifies a `thread_id`. This ID is used to distinguish conversational threads (e.g., between different users)." - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "id": "e4309511-2140-4d91-8f5f-ea3661e6d179", - "metadata": {}, - "outputs": [], - "source": [ - "import { v4 as uuidv4 } from \"uuid\";\n", - "\n", - "const config = { configurable: { thread_id: uuidv4() } }" - ] - }, - { - "cell_type": "markdown", - "id": "108c45a2-4971-4120-ba64-9a4305a414bb", - "metadata": {}, - "source": [ - "We can then invoke the application:" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "72a5ff6c-501f-4151-8dd9-f600f70554be", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABTqCeKnMQmG9IH8dNF5vPjsgXtcM\",\n", - " \"content\": \"Hi Bob! How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 10,\n", - " \"promptTokens\": 12,\n", - " \"totalTokens\": 22\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 12,\n", - " \"output_tokens\": 10,\n", - " \"total_tokens\": 22\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const input = [\n", - " {\n", - " role: \"user\",\n", - " content: \"Hi! I'm Bob.\",\n", - " }\n", - "]\n", - "const output = await app.invoke({ messages: input }, config)\n", - "// The output contains all messages in the state.\n", - "// This will long the last message in the conversation.\n", - "console.log(output.messages[output.messages.length - 1]);" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "id": "5931fb35-0fac-40e7-8ac6-b14cb4e926cd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "8165bd4c", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [memory]\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABTqD5jrJXeKCpvoIDp47fvgw2OPn\",\n", - " \"content\": \"Your name is Bob. How can I help you today, Bob?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 34,\n", - " \"totalTokens\": 48\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 34,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 48\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const input2 = [\n", - " {\n", - " role: \"user\",\n", - " content: \"What's my name?\",\n", - " }\n", - "]\n", - "const output2 = await app.invoke({ messages: input2 }, config)\n", - "console.log(output2.messages[output2.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "id": "91de6d12-881d-4d23-a421-f2e3bf829b79", - "metadata": {}, - "source": [ - "Note that states are separated for different threads. If we issue the same query to a thread with a new `thread_id`, the model indicates that it does not know the answer:" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "id": "6f12c26f-8913-4484-b2c5-b49eda2e6d7d", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "f47033eb", + "metadata": {}, + "source": [ + "# How to add message history\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chaining runnables](/docs/how_to/sequence/)\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "- [Chat Messages](/docs/concepts/messages)\n", + "\n", + ":::\n", + "\n", + "```{=mdx}\n", + ":::note\n", + "\n", + "This guide previously covered the [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html) abstraction. You can access this version of the guide in the [v0.2 docs](https://js.langchain.com/v0.2/docs/how_to/message_history/).\n", + "\n", + "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", + "\n", + ":::\n", + "```\n", + "\n", + "\n", + "Passing conversation state into and out a chain is vital when building a chatbot. LangGraph implements a built-in persistence layer, allowing chain states to be automatically persisted in memory, or external backends such as SQLite, Postgres or Redis. Details can be found in the LangGraph persistence documentation.\n", + "\n", + "In this guide we demonstrate how to add persistence to arbitrary LangChain runnables by wrapping them in a minimal LangGraph application. This lets us persist the message history and other elements of the chain's state, simplifying the development of multi-turn applications. It also supports multiple threads, enabling a single application to interact separately with multiple users.\n", + "\n", + "## Setup\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + " @langchain/core @langchain/langgraph\n", + "\n", + "```\n", + "\n", + "Let’s also set up a chat model that we’ll use for the below examples.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABTqDkctxwmXjeGOZpK6Km8jdCqdl\",\n", - " \"content\": \"I'm sorry, but I don't have access to personal information about users. How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 21,\n", - " \"promptTokens\": 11,\n", - " \"totalTokens\": 32\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 11,\n", - " \"output_tokens\": 21,\n", - " \"total_tokens\": 32\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const config2 = { configurable: { thread_id: uuidv4() } }\n", - "const input3 = [\n", - " {\n", - " role: \"user\",\n", - " content: \"What's my name?\",\n", - " }\n", - "]\n", - "const output3 = await app.invoke({ messages: input3 }, config2)\n", - "console.log(output3.messages[output3.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "id": "6749ea95-3382-4843-bb96-cfececb9e4e5", - "metadata": {}, - "source": [ - "## Example: object inputs\n", - "\n", - "LangChain runnables often accept multiple inputs via separate keys in a single object argument. A common example is a prompt template with multiple parameters.\n", - "\n", - "Whereas before our runnable was a chat model, here we chain together a prompt template and chat model." - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "id": "6e7a402a-0994-4fc5-a607-fb990a248aa4", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"Answer in {language}.\"],\n", - " new MessagesPlaceholder(\"messages\"),\n", - "])\n", - "\n", - "const runnable = prompt.pipe(llm);" - ] - }, - { - "cell_type": "markdown", - "id": "f83107bd-ae61-45e1-a57e-94ab043aad4b", - "metadata": {}, - "source": [ - "For this scenario, we define the graph state to include these parameters (in addition to the message history). We then define a single-node graph in the same way as before.\n", - "\n", - "Note that in the below state:\n", - "- Updates to the `messages` list will append messages;\n", - "- Updates to the `language` string will overwrite the string." - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "id": "267429ea-be0f-4f80-8daf-c63d881a1436", - "metadata": {}, - "outputs": [], - "source": [ - "import { START, END, StateGraph, MemorySaver, MessagesAnnotation, Annotation } from \"@langchain/langgraph\";\n", - "\n", - "// Define the State\n", - "// highlight-next-line\n", - "const GraphAnnotation = Annotation.Root({\n", - " // highlight-next-line\n", - " language: Annotation(),\n", - " // Spread `MessagesAnnotation` into the state to add the `messages` field.\n", - " // highlight-next-line\n", - " ...MessagesAnnotation.spec,\n", - "})\n", - "\n", - "\n", - "// Define the function that calls the model\n", - "const callModel2 = async (state: typeof GraphAnnotation.State) => {\n", - " const response = await runnable.invoke(state);\n", - " // Update message history with response:\n", - " return { messages: [response] };\n", - "};\n", - "\n", - "const workflow2 = new StateGraph(GraphAnnotation)\n", - " .addNode(\"model\", callModel2)\n", - " .addEdge(START, \"model\")\n", - " .addEdge(\"model\", END);\n", - "\n", - "const app2 = workflow2.compile({ checkpointer: new MemorySaver() });" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "id": "f3844fb4-58d7-43c8-b427-6d9f64d7411b", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 30, + "id": "8a4e4708", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", - " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 19,\n", - " \"promptTokens\": 19,\n", - " \"totalTokens\": 38\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 19,\n", - " \"output_tokens\": 19,\n", - " \"total_tokens\": 38\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const config3 = { configurable: { thread_id: uuidv4() } }\n", - "const input4 = {\n", - " messages: [\n", - " {\n", - " role: \"user\",\n", - " content: \"What's my name?\",\n", - " }\n", - " ],\n", - " language: \"Spanish\",\n", - "} \n", - "const output4 = await app2.invoke(input4, config3)\n", - "console.log(output4.messages[output4.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "id": "7df47824-ef18-4a6e-a416-345ec9203f88", - "metadata": {}, - "source": [ - "## Managing message history\n", - "\n", - "The message history (and other elements of the application state) can be accessed via `.getState`:" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "id": "1cbd6d82-43c1-4d11-98af-5c3ad9cd9b3b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "1f6121bc-2080-4ccc-acf0-f77de4bc951d", + "metadata": {}, + "source": [ + "## Example: message inputs\n", + "\n", + "Adding memory to a [chat model](/docs/concepts/chat_models) provides a simple example. Chat models accept a list of messages as input and output a message. LangGraph includes a built-in `MessagesState` that we can use for this purpose.\n", + "\n", + "Below, we:\n", + "1. Define the graph state to be a list of messages;\n", + "2. Add a single node to the graph that calls a chat model;\n", + "3. Compile the graph with an in-memory checkpointer to store messages between runs.\n", + "\n", + ":::info\n", + "\n", + "The output of a LangGraph application is its [state](https://langchain-ai.github.io/langgraphjs/concepts/low_level/).\n", + "\n", + ":::" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Language: Spanish\n", - "[\n", - " HumanMessage {\n", - " \"content\": \"What's my name?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", - " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 19,\n", - " \"promptTokens\": 19,\n", - " \"totalTokens\": 38\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const state = (await app2.getState(config3)).values\n", - "\n", - "console.log(`Language: ${state.language}`);\n", - "console.log(state.messages)" - ] - }, - { - "cell_type": "markdown", - "id": "acfbccda-0bd6-4c4d-ae6e-8118520314e1", - "metadata": {}, - "source": [ - "We can also update the state via `.updateState`. For example, we can manually append a new message:" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "id": "e98310d7-8ab1-461d-94a7-dd419494ab8d", - "metadata": {}, - "outputs": [], - "source": [ - "const _ = await app2.updateState(config3, { messages: [{ role: \"user\", content: \"test\" }]})" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "id": "74ab3691-6f3b-49c5-aad0-2a90fc2a1e6a", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 31, + "id": "f691a73a-a866-4354-9fff-8315605e2b8f", + "metadata": {}, + "outputs": [], + "source": [ + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", + "\n", + "// Define the function that calls the model\n", + "const callModel = async (state: typeof MessagesAnnotation.State) => {\n", + " const response = await llm.invoke(state.messages);\n", + " // Update message history with response:\n", + " return { messages: response };\n", + "};\n", + "\n", + "// Define a new graph\n", + "const workflow = new StateGraph(MessagesAnnotation)\n", + " // Define the (single) node in the graph\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Add memory\n", + "const memory = new MemorySaver();\n", + "const app = workflow.compile({ checkpointer: memory });" + ] + }, + { + "cell_type": "markdown", + "id": "c0b396a8-f81e-4139-b4b2-75adf61d8179", + "metadata": {}, + "source": [ + "When we run the application, we pass in a configuration object that specifies a `thread_id`. This ID is used to distinguish conversational threads (e.g., between different users)." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "e4309511-2140-4d91-8f5f-ea3661e6d179", + "metadata": {}, + "outputs": [], + "source": [ + "import { v4 as uuidv4 } from \"uuid\";\n", + "\n", + "const config = { configurable: { thread_id: uuidv4() } }" + ] + }, + { + "cell_type": "markdown", + "id": "108c45a2-4971-4120-ba64-9a4305a414bb", + "metadata": {}, + "source": [ + "We can then invoke the application:" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "72a5ff6c-501f-4151-8dd9-f600f70554be", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABTqCeKnMQmG9IH8dNF5vPjsgXtcM\",\n", + " \"content\": \"Hi Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 10,\n", + " \"promptTokens\": 12,\n", + " \"totalTokens\": 22\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 12,\n", + " \"output_tokens\": 10,\n", + " \"total_tokens\": 22\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const input = [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi! I'm Bob.\",\n", + " }\n", + "]\n", + "const output = await app.invoke({ messages: input }, config)\n", + "// The output contains all messages in the state.\n", + "// This will long the last message in the conversation.\n", + "console.log(output.messages[output.messages.length - 1]);" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "5931fb35-0fac-40e7-8ac6-b14cb4e926cd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABTqD5jrJXeKCpvoIDp47fvgw2OPn\",\n", + " \"content\": \"Your name is Bob. How can I help you today, Bob?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 34,\n", + " \"totalTokens\": 48\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 34,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 48\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const input2 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + "]\n", + "const output2 = await app.invoke({ messages: input2 }, config)\n", + "console.log(output2.messages[output2.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "id": "91de6d12-881d-4d23-a421-f2e3bf829b79", + "metadata": {}, + "source": [ + "Note that states are separated for different threads. If we issue the same query to a thread with a new `thread_id`, the model indicates that it does not know the answer:" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "6f12c26f-8913-4484-b2c5-b49eda2e6d7d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABTqDkctxwmXjeGOZpK6Km8jdCqdl\",\n", + " \"content\": \"I'm sorry, but I don't have access to personal information about users. How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 21,\n", + " \"promptTokens\": 11,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_52a7f40b0b\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 11,\n", + " \"output_tokens\": 21,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config2 = { configurable: { thread_id: uuidv4() } }\n", + "const input3 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + "]\n", + "const output3 = await app.invoke({ messages: input3 }, config2)\n", + "console.log(output3.messages[output3.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "id": "6749ea95-3382-4843-bb96-cfececb9e4e5", + "metadata": {}, + "source": [ + "## Example: object inputs\n", + "\n", + "LangChain runnables often accept multiple inputs via separate keys in a single object argument. A common example is a prompt template with multiple parameters.\n", + "\n", + "Whereas before our runnable was a chat model, here we chain together a prompt template and chat model." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Language: Spanish\n", - "[\n", - " HumanMessage {\n", - " \"content\": \"What's my name?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", - " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 19,\n", - " \"promptTokens\": 19,\n", - " \"totalTokens\": 38\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " },\n", - " HumanMessage {\n", - " \"content\": \"test\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 36, + "id": "6e7a402a-0994-4fc5-a607-fb990a248aa4", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"Answer in {language}.\"],\n", + " new MessagesPlaceholder(\"messages\"),\n", + "])\n", + "\n", + "const runnable = prompt.pipe(llm);" + ] + }, + { + "cell_type": "markdown", + "id": "f83107bd-ae61-45e1-a57e-94ab043aad4b", + "metadata": {}, + "source": [ + "For this scenario, we define the graph state to include these parameters (in addition to the message history). We then define a single-node graph in the same way as before.\n", + "\n", + "Note that in the below state:\n", + "- Updates to the `messages` list will append messages;\n", + "- Updates to the `language` string will overwrite the string." + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "id": "267429ea-be0f-4f80-8daf-c63d881a1436", + "metadata": {}, + "outputs": [], + "source": [ + "import { START, END, StateGraph, MemorySaver, MessagesAnnotation, Annotation } from \"@langchain/langgraph\";\n", + "\n", + "// Define the State\n", + "// highlight-next-line\n", + "const GraphAnnotation = Annotation.Root({\n", + " // highlight-next-line\n", + " language: Annotation(),\n", + " // Spread `MessagesAnnotation` into the state to add the `messages` field.\n", + " // highlight-next-line\n", + " ...MessagesAnnotation.spec,\n", + "})\n", + "\n", + "\n", + "// Define the function that calls the model\n", + "const callModel2 = async (state: typeof GraphAnnotation.State) => {\n", + " const response = await runnable.invoke(state);\n", + " // Update message history with response:\n", + " return { messages: [response] };\n", + "};\n", + "\n", + "const workflow2 = new StateGraph(GraphAnnotation)\n", + " .addNode(\"model\", callModel2)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "const app2 = workflow2.compile({ checkpointer: new MemorySaver() });" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "f3844fb4-58d7-43c8-b427-6d9f64d7411b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", + " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 19,\n", + " \"totalTokens\": 38\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 19,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 38\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config3 = { configurable: { thread_id: uuidv4() } }\n", + "const input4 = {\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + " ],\n", + " language: \"Spanish\",\n", + "} \n", + "const output4 = await app2.invoke(input4, config3)\n", + "console.log(output4.messages[output4.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "id": "7df47824-ef18-4a6e-a416-345ec9203f88", + "metadata": {}, + "source": [ + "## Managing message history\n", + "\n", + "The message history (and other elements of the application state) can be accessed via `.getState`:" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "1cbd6d82-43c1-4d11-98af-5c3ad9cd9b3b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Language: Spanish\n", + "[\n", + " HumanMessage {\n", + " \"content\": \"What's my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", + " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 19,\n", + " \"totalTokens\": 38\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const state = (await app2.getState(config3)).values\n", + "\n", + "console.log(`Language: ${state.language}`);\n", + "console.log(state.messages)" + ] + }, + { + "cell_type": "markdown", + "id": "acfbccda-0bd6-4c4d-ae6e-8118520314e1", + "metadata": {}, + "source": [ + "We can also update the state via `.updateState`. For example, we can manually append a new message:" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "e98310d7-8ab1-461d-94a7-dd419494ab8d", + "metadata": {}, + "outputs": [], + "source": [ + "const _ = await app2.updateState(config3, { messages: [{ role: \"user\", content: \"test\" }]})" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "74ab3691-6f3b-49c5-aad0-2a90fc2a1e6a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Language: Spanish\n", + "[\n", + " HumanMessage {\n", + " \"content\": \"What's my name?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABTqFnCASRB5UhZ7XAbbf5T0Bva4U\",\n", + " \"content\": \"Lo siento, pero no tengo suficiente información para saber tu nombre. ¿Cómo te llamas?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 19,\n", + " \"totalTokens\": 38\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"test\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const state2 = (await app2.getState(config3)).values\n", + "\n", + "console.log(`Language: ${state2.language}`);\n", + "console.log(state2.messages)" + ] + }, + { + "cell_type": "markdown", + "id": "e4a1ea00-d7ff-4f18-b9ec-9aec5909d027", + "metadata": {}, + "source": [ + "For details on managing state, including deleting messages, see the LangGraph documentation:\n", + "\n", + "- [How to delete messages](https://langchain-ai.github.io/langgraphjs/how-tos/delete-messages/)\n", + "- [How to view and update past graph state](https://langchain-ai.github.io/langgraphjs/how-tos/time-travel/)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const state2 = (await app2.getState(config3)).values\n", - "\n", - "console.log(`Language: ${state2.language}`);\n", - "console.log(state2.messages)" - ] - }, - { - "cell_type": "markdown", - "id": "e4a1ea00-d7ff-4f18-b9ec-9aec5909d027", - "metadata": {}, - "source": [ - "For details on managing state, including deleting messages, see the LangGraph documentation:\n", - "\n", - "- [How to delete messages](https://langchain-ai.github.io/langgraphjs/how-tos/delete-messages/)\n", - "- [How to view and update past graph state](https://langchain-ai.github.io/langgraphjs/how-tos/time-travel/)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/migrate_agent.ipynb b/docs/core_docs/docs/how_to/migrate_agent.ipynb index 33fbf3332002..816f45e40fac 100644 --- a/docs/core_docs/docs/how_to/migrate_agent.ipynb +++ b/docs/core_docs/docs/how_to/migrate_agent.ipynb @@ -1,1340 +1,1340 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "8f21bf6b", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "keywords: [create_react_agent, create_react_agent()]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "579c24a2", - "metadata": {}, - "source": [ - "# How to migrate from legacy LangChain agents to LangGraph\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "- [Agents](/docs/concepts/#agents)\n", - "- [LangGraph.js](https://langchain-ai.github.io/langgraphjs/)\n", - "- [Tool calling](/docs/how_to/tool_calling/)\n", - "\n", - ":::\n", - "\n", - "Here we focus on how to move from legacy LangChain agents to more flexible [LangGraph](https://langchain-ai.github.io/langgraphjs/) agents.\n", - "LangChain agents (the\n", - "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain.agents.AgentExecutor.html)\n", - "in particular) have multiple configuration parameters. In this notebook we will\n", - "show how those parameters map to the LangGraph\n", - "react agent executor using the [create_react_agent](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html) prebuilt helper method.\n", - "\n", - "For more information on how to build agentic workflows in LangGraph, check out\n", - "the [docs here](https://langchain-ai.github.io/langgraphjs/how-tos/).\n", - "\n", - "#### Prerequisites\n", - "\n", - "This how-to guide uses OpenAI's `\"gpt-4o-mini\"` as the LLM. If you are running this guide as a notebook, set your OpenAI API key as shown below:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "24ef582f", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [], - "source": [ - "// process.env.OPENAI_API_KEY = \"...\";\n", - "\n", - "// Optional, add tracing in LangSmith\n", - "// process.env.LANGCHAIN_API_KEY = \"ls...\";\n", - "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", - "// process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", - "// process.env.LANGCHAIN_PROJECT = \"How to migrate: LangGraphJS\";\n", - "\n", - "// Reduce tracing latency if you are not in a serverless environment\n", - "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";" - ] - }, - { - "cell_type": "markdown", - "id": "c1ff5c79", - "metadata": {}, - "source": [ - "## Basic Usage\n", - "\n", - "For basic creation and usage of a tool-calling ReAct-style agent, the\n", - "functionality is the same. First, let's define a model and tool(s), then we'll\n", - "use those to create an agent.\n", - "\n", - ":::note\n", - "The `tool` function is available in `@langchain/core` version 0.2.7 and above.\n", - "\n", - "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) instead.\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "1222c5e2", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [], - "source": [ - "import { tool } from \"@langchain/core/tools\";\n", - "import { z } from \"zod\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - "});\n", - "\n", - "const magicTool = tool(async ({ input }: { input: number }) => {\n", - " return `${input + 2}`;\n", - "}, {\n", - " name: \"magic_function\",\n", - " description: \"Applies a magic function to an input.\",\n", - " schema: z.object({\n", - " input: z.number(),\n", - " }),\n", - "});\n", - "\n", - "const tools = [magicTool];\n", - "\n", - "const query = \"what is the value of magic_function(3)?\";" - ] - }, - { - "cell_type": "markdown", - "id": "768d9e8c", - "metadata": {}, - "source": [ - "For the LangChain\n", - "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain_agents.AgentExecutor.html),\n", - "we define a prompt with a placeholder for the agent's scratchpad. The agent can\n", - "be invoked as follows:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "e52bf891", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "{\n", - " input: \u001b[32m\"what is the value of magic_function(3)?\"\u001b[39m,\n", - " output: \u001b[32m\"The value of `magic_function(3)` is 5.\"\u001b[39m\n", - "}" + "cell_type": "raw", + "id": "8f21bf6b", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [create_react_agent, create_react_agent()]\n", + "---" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import {\n", - " ChatPromptTemplate,\n", - "} from \"@langchain/core/prompts\";\n", - "import { createToolCallingAgent } from \"langchain/agents\";\n", - "import { AgentExecutor } from \"langchain/agents\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant\"],\n", - " [\"placeholder\", \"{chat_history}\"],\n", - " [\"human\", \"{input}\"],\n", - " [\"placeholder\", \"{agent_scratchpad}\"],\n", - "]);\n", - "\n", - "const agent = createToolCallingAgent({\n", - " llm,\n", - " tools,\n", - " prompt\n", - "});\n", - "const agentExecutor = new AgentExecutor({\n", - " agent,\n", - " tools,\n", - "});\n", - "\n", - "await agentExecutor.invoke({ input: query });" - ] - }, - { - "cell_type": "markdown", - "id": "ba3e5db9", - "metadata": {}, - "source": [ - "LangGraph's off-the-shelf\n", - "[react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html)\n", - "manages a state that is defined by a list of messages. In a similar way to the `AgentExecutor`, it will continue to\n", - "process the list until there are no tool calls in the agent's output. To kick it\n", - "off, we input a list of messages. The output will contain the entire state of\n", - "the graph - in this case, the conversation history and messages representing intermediate tool calls:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "dcda7082", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " \"id\": \"eeef343c-80d1-4ccb-86af-c109343689cd\",\n", - " \"content\": \"what is the value of magic_function(3)?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7exs2uRqEipaZ7MtRbXnqu0vT0Da\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 55,\n", - " \"totalTokens\": 69\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_483d39d857\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"magic_function\",\n", - " \"args\": {\n", - " \"input\": 3\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 55,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 69\n", - " }\n", - " },\n", - " ToolMessage {\n", - " \"id\": \"1001bf20-7cde-4f8b-81f1-1faa654a8bb4\",\n", - " \"content\": \"5\",\n", - " \"name\": \"magic_function\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\"\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7exsTk3ilzGzC8DuY8GpnKOaGdvx\",\n", - " \"content\": \"The value of `magic_function(3)` is 5.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 78,\n", - " \"totalTokens\": 92\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_54e2f484be\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 78,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 92\n", - " }\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", - "\n", - "const app = createReactAgent({\n", - " llm,\n", - " tools,\n", - "});\n", - "\n", - "let agentOutput = await app.invoke({\n", - " messages: [\n", - " {\n", - " role: \"user\",\n", - " content: query\n", - " },\n", - " ],\n", - "});\n", - "\n", - "console.log(agentOutput);" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "b0a390a2", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "579c24a2", + "metadata": {}, + "source": [ + "# How to migrate from legacy LangChain agents to LangGraph\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "- [Agents](/docs/concepts/agents)\n", + "- [LangGraph.js](https://langchain-ai.github.io/langgraphjs/)\n", + "- [Tool calling](/docs/how_to/tool_calling/)\n", + "\n", + ":::\n", + "\n", + "Here we focus on how to move from legacy LangChain agents to more flexible [LangGraph](https://langchain-ai.github.io/langgraphjs/) agents.\n", + "LangChain agents (the\n", + "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain.agents.AgentExecutor.html)\n", + "in particular) have multiple configuration parameters. In this notebook we will\n", + "show how those parameters map to the LangGraph\n", + "react agent executor using the [create_react_agent](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html) prebuilt helper method.\n", + "\n", + "For more information on how to build agentic workflows in LangGraph, check out\n", + "the [docs here](https://langchain-ai.github.io/langgraphjs/how-tos/).\n", + "\n", + "#### Prerequisites\n", + "\n", + "This how-to guide uses OpenAI's `\"gpt-4o-mini\"` as the LLM. If you are running this guide as a notebook, set your OpenAI API key as shown below:" + ] + }, { - "data": { - "text/plain": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " \"id\": \"eeef343c-80d1-4ccb-86af-c109343689cd\",\n", - " \"content\": \"what is the value of magic_function(3)?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7exs2uRqEipaZ7MtRbXnqu0vT0Da\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 55,\n", - " \"totalTokens\": 69\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_483d39d857\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"magic_function\",\n", - " \"args\": {\n", - " \"input\": 3\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 55,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 69\n", - " }\n", - " },\n", - " ToolMessage {\n", - " \"id\": \"1001bf20-7cde-4f8b-81f1-1faa654a8bb4\",\n", - " \"content\": \"5\",\n", - " \"name\": \"magic_function\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\"\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7exsTk3ilzGzC8DuY8GpnKOaGdvx\",\n", - " \"content\": \"The value of `magic_function(3)` is 5.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 78,\n", - " \"totalTokens\": 92\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_54e2f484be\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 78,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 92\n", - " }\n", - " },\n", - " HumanMessage {\n", - " \"id\": \"1f2a9f41-c8ff-48fe-9d93-e663ee9279ff\",\n", - " \"content\": \"Pardon?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7exyTe9Ofs63Ex3sKwRx3wWksNup\",\n", - " \"content\": \"The result of calling the `magic_function` with an input of 3 is 5.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 20,\n", - " \"promptTokens\": 102,\n", - " \"totalTokens\": 122\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_483d39d857\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 102,\n", - " \"output_tokens\": 20,\n", - " \"total_tokens\": 122\n", - " }\n", - " }\n", - " ]\n", - "}" + "cell_type": "code", + "execution_count": 1, + "id": "24ef582f", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "// process.env.OPENAI_API_KEY = \"...\";\n", + "\n", + "// Optional, add tracing in LangSmith\n", + "// process.env.LANGCHAIN_API_KEY = \"ls...\";\n", + "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "// process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "// process.env.LANGCHAIN_PROJECT = \"How to migrate: LangGraphJS\";\n", + "\n", + "// Reduce tracing latency if you are not in a serverless environment\n", + "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";" ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const messageHistory = agentOutput.messages;\n", - "const newQuery = \"Pardon?\";\n", - "\n", - "agentOutput = await app.invoke({\n", - " messages: [\n", - " ...messageHistory,\n", - " { role: \"user\", content: newQuery }\n", - " ],\n", - "});\n" - ] - }, - { - "cell_type": "markdown", - "id": "41a12f7a", - "metadata": {}, - "source": [ - "## Prompt Templates\n", - "\n", - "With legacy LangChain agents you have to pass in a prompt template. You can use\n", - "this to control the agent.\n", - "\n", - "With LangGraph\n", - "[react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html),\n", - "by default there is no prompt. You can achieve similar control over the agent in\n", - "a few ways:\n", - "\n", - "1. Pass in a system message as input\n", - "2. Initialize the agent with a system message\n", - "3. Initialize the agent with a function to transform messages before passing to\n", - " the model.\n", - "\n", - "Let's take a look at all of these below. We will pass in custom instructions to\n", - "get the agent to respond in Spanish.\n", - "\n", - "First up, using LangChain's `AgentExecutor`:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "4c5266cc", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [ + }, { - "data": { - "text/plain": [ - "{\n", - " input: \u001b[32m\"what is the value of magic_function(3)?\"\u001b[39m,\n", - " output: \u001b[32m\"El valor de `magic_function(3)` es 5.\"\u001b[39m\n", - "}" + "cell_type": "markdown", + "id": "c1ff5c79", + "metadata": {}, + "source": [ + "## Basic Usage\n", + "\n", + "For basic creation and usage of a tool-calling ReAct-style agent, the\n", + "functionality is the same. First, let's define a model and tool(s), then we'll\n", + "use those to create an agent.\n", + "\n", + ":::note\n", + "The `tool` function is available in `@langchain/core` version 0.2.7 and above.\n", + "\n", + "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) instead.\n", + ":::" ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const spanishPrompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant. Respond only in Spanish.\"],\n", - " [\"placeholder\", \"{chat_history}\"],\n", - " [\"human\", \"{input}\"],\n", - " [\"placeholder\", \"{agent_scratchpad}\"],\n", - "]);\n", - "\n", - "const spanishAgent = createToolCallingAgent({\n", - " llm,\n", - " tools,\n", - " prompt: spanishPrompt,\n", - "});\n", - "const spanishAgentExecutor = new AgentExecutor({\n", - " agent: spanishAgent,\n", - " tools,\n", - "});\n", - "\n", - "await spanishAgentExecutor.invoke({ input: query });\n" - ] - }, - { - "cell_type": "markdown", - "id": "c54b374d", - "metadata": {}, - "source": [ - "Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html).\n", - "\n", - "LangGraph's prebuilt `create_react_agent` does not take a prompt template directly as a parameter, but instead takes a `messages_modifier` parameter. This modifies messages before they are passed into the model, and can be one of four values:\n", - "\n", - "- A `SystemMessage`, which is added to the beginning of the list of messages.\n", - "- A `string`, which is converted to a `SystemMessage` and added to the beginning of the list of messages.\n", - "- A `Callable`, which should take in a list of messages. The output is then passed to the language model.\n", - "- Or a [`Runnable`](/docs/concepts/#langchain-expression-language), which should should take in a list of messages. The output is then passed to the language model.\n", - "\n", - "Here's how it looks in action:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "38a751ba", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [ + }, { - "data": { - "text/plain": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-A7ey8LGWAs8ldrRRcO5wlHM85w9T8\",\n", - " \"content\": \"El valor de `magic_function(3)` es 5.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 89,\n", - " \"totalTokens\": 103\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_483d39d857\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 89,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 103\n", - " }\n", - "}" + "cell_type": "code", + "execution_count": 2, + "id": "1222c5e2", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { z } from \"zod\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + "});\n", + "\n", + "const magicTool = tool(async ({ input }: { input: number }) => {\n", + " return `${input + 2}`;\n", + "}, {\n", + " name: \"magic_function\",\n", + " description: \"Applies a magic function to an input.\",\n", + " schema: z.object({\n", + " input: z.number(),\n", + " }),\n", + "});\n", + "\n", + "const tools = [magicTool];\n", + "\n", + "const query = \"what is the value of magic_function(3)?\";" ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const systemMessage = \"You are a helpful assistant. Respond only in Spanish.\";\n", - "\n", - "// This could also be a SystemMessage object\n", - "// const systemMessage = new SystemMessage(\"You are a helpful assistant. Respond only in Spanish.\");\n", - "\n", - "const appWithSystemMessage = createReactAgent({\n", - " llm,\n", - " tools,\n", - " messageModifier: systemMessage,\n", - "});\n", - "\n", - "agentOutput = await appWithSystemMessage.invoke({\n", - " messages: [\n", - " { role: \"user\", content: query }\n", - " ],\n", - "});\n", - "agentOutput.messages[agentOutput.messages.length - 1];" - ] - }, - { - "cell_type": "markdown", - "id": "7622d8f7", - "metadata": {}, - "source": [ - "We can also pass in an arbitrary function. This function should take in a list\n", - "of messages and output a list of messages. We can do all types of arbitrary\n", - "formatting of messages here. In this cases, let's just add a `SystemMessage` to\n", - "the start of the list of messages.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "c7120cdd", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " input: \"what is the value of magic_function(3)?\",\n", - " output: \"El valor de magic_function(3) es 5. ¡Pandemonium!\"\n", - "}\n" - ] - } - ], - "source": [ - "import { BaseMessage, SystemMessage, HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const modifyMessages = (messages: BaseMessage[]) => {\n", - " return [\n", - " new SystemMessage(\"You are a helpful assistant. Respond only in Spanish.\"),\n", - " ...messages,\n", - " new HumanMessage(\"Also say 'Pandemonium!' after the answer.\"),\n", - " ];\n", - "};\n", - "\n", - "const appWithMessagesModifier = createReactAgent({\n", - " llm,\n", - " tools,\n", - " messageModifier: modifyMessages,\n", - "});\n", - "\n", - "agentOutput = await appWithMessagesModifier.invoke({\n", - " messages: [{ role: \"user\", content: query }],\n", - "});\n", - "\n", - "console.log({\n", - " input: query,\n", - " output: agentOutput.messages[agentOutput.messages.length - 1].content,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "44337a14", - "metadata": {}, - "source": [ - "## Memory\n", - "\n", - "With LangChain's\n", - "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain_agents.AgentExecutor.html), you could add chat memory classes so it can engage in a multi-turn conversation.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "4d67ba36", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "768d9e8c", + "metadata": {}, + "source": [ + "For the LangChain\n", + "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain_agents.AgentExecutor.html),\n", + "we define a prompt with a placeholder for the agent's scratchpad. The agent can\n", + "be invoked as follows:\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "The output of the magic function for the input 3 is 5.\n", - "---\n", - "Yes, your name is Polly! How can I assist you today?\n", - "---\n", - "The output of the magic function for the input 3 is 5.\n" - ] - } - ], - "source": [ - "import { ChatMessageHistory } from \"@langchain/community/stores/message/in_memory\";\n", - "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", - "\n", - "const memory = new ChatMessageHistory();\n", - "const agentExecutorWithMemory = new RunnableWithMessageHistory({\n", - " runnable: agentExecutor,\n", - " getMessageHistory: () => memory,\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - "});\n", - "\n", - "const config = { configurable: { sessionId: \"test-session\" } };\n", - "\n", - "agentOutput = await agentExecutorWithMemory.invoke(\n", - " { input: \"Hi, I'm polly! What's the output of magic_function of 3?\" },\n", - " config,\n", - ");\n", - "\n", - "console.log(agentOutput.output);\n", - "\n", - "agentOutput = await agentExecutorWithMemory.invoke(\n", - " { input: \"Remember my name?\" },\n", - " config,\n", - ");\n", - "\n", - "console.log(\"---\");\n", - "console.log(agentOutput.output);\n", - "console.log(\"---\");\n", - "\n", - "agentOutput = await agentExecutorWithMemory.invoke(\n", - " { input: \"what was that output again?\" },\n", - " config,\n", - ");\n", - "\n", - "console.log(agentOutput.output);" - ] - }, - { - "cell_type": "markdown", - "id": "a7fe4e21", - "metadata": {}, - "source": [ - "#### In LangGraph\n", - "\n", - "The equivalent to this type of memory in LangGraph is [persistence](https://langchain-ai.github.io/langgraphjs/how-tos/persistence/), and [checkpointing](https://langchain-ai.github.io/langgraphjs/reference/interfaces/index.Checkpoint.html).\n", - "\n", - "Add a `checkpointer` to the agent and you get chat memory for free. You'll need to also pass a `thread_id` within the `configurable` field in the `config` parameter. Notice that we only pass one message into each request, but the model still has context from previous runs:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "bbc64438", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "id": "e52bf891", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " input: \u001b[32m\"what is the value of magic_function(3)?\"\u001b[39m,\n", + " output: \u001b[32m\"The value of `magic_function(3)` is 5.\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import {\n", + " ChatPromptTemplate,\n", + "} from \"@langchain/core/prompts\";\n", + "import { createToolCallingAgent } from \"langchain/agents\";\n", + "import { AgentExecutor } from \"langchain/agents\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"placeholder\", \"{chat_history}\"],\n", + " [\"human\", \"{input}\"],\n", + " [\"placeholder\", \"{agent_scratchpad}\"],\n", + "]);\n", + "\n", + "const agent = createToolCallingAgent({\n", + " llm,\n", + " tools,\n", + " prompt\n", + "});\n", + "const agentExecutor = new AgentExecutor({\n", + " agent,\n", + " tools,\n", + "});\n", + "\n", + "await agentExecutor.invoke({ input: query });" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hi Polly! The output of the magic function for the input 3 is 5.\n", - "---\n", - "Yes, your name is Polly!\n", - "---\n", - "The output of the magic function for the input 3 was 5.\n" - ] - } - ], - "source": [ - "import { MemorySaver } from \"@langchain/langgraph\";\n", - "\n", - "const checkpointer = new MemorySaver();\n", - "const appWithMemory = createReactAgent({\n", - " llm: llm,\n", - " tools: tools,\n", - " checkpointSaver: checkpointer\n", - "});\n", - "\n", - "const langGraphConfig = {\n", - " configurable: {\n", - " thread_id: \"test-thread\",\n", - " },\n", - "};\n", - "\n", - "agentOutput = await appWithMemory.invoke(\n", - " {\n", - " messages: [\n", - " {\n", - " role: \"user\",\n", - " content: \"Hi, I'm polly! What's the output of magic_function of 3?\",\n", - " }\n", - " ],\n", - " },\n", - " langGraphConfig,\n", - ");\n", - "\n", - "console.log(agentOutput.messages[agentOutput.messages.length - 1].content);\n", - "console.log(\"---\");\n", - "\n", - "agentOutput = await appWithMemory.invoke(\n", - " {\n", - " messages: [\n", - " { role: \"user\", content: \"Remember my name?\" }\n", - " ]\n", - " },\n", - " langGraphConfig,\n", - ");\n", - "\n", - "console.log(agentOutput.messages[agentOutput.messages.length - 1].content);\n", - "console.log(\"---\");\n", - "\n", - "agentOutput = await appWithMemory.invoke(\n", - " {\n", - " messages: [\n", - " { role: \"user\", content: \"what was that output again?\" }\n", - " ]\n", - " },\n", - " langGraphConfig,\n", - ");\n", - "\n", - "console.log(agentOutput.messages[agentOutput.messages.length - 1].content);" - ] - }, - { - "cell_type": "markdown", - "id": "2997b4da", - "metadata": {}, - "source": [ - "## Iterating through steps\n", - "\n", - "With LangChain's\n", - "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain_agents.AgentExecutor.html),\n", - "you could iterate over the steps using the\n", - "[`stream`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#stream) method:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "5c928049", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [ + "cell_type": "markdown", + "id": "ba3e5db9", + "metadata": {}, + "source": [ + "LangGraph's off-the-shelf\n", + "[react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html)\n", + "manages a state that is defined by a list of messages. In a similar way to the `AgentExecutor`, it will continue to\n", + "process the list until there are no tool calls in the agent's output. To kick it\n", + "off, we input a list of messages. The output will contain the entire state of\n", + "the graph - in this case, the conversation history and messages representing intermediate tool calls:\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " intermediateSteps: [\n", - " {\n", - " action: {\n", - " tool: \"magic_function\",\n", - " toolInput: { input: 3 },\n", - " toolCallId: \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n", - " log: 'Invoking \"magic_function\" with {\"input\":3}\\n',\n", - " messageLog: [\n", - " AIMessageChunk {\n", - " \"id\": \"chatcmpl-A7eziUrDmLSSMoiOskhrfbsHqx4Sd\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"index\": 0,\n", - " \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"prompt\": 0,\n", - " \"completion\": 0,\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_483d39d857\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"magic_function\",\n", - " \"args\": {\n", - " \"input\": 3\n", - " },\n", - " \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n", - " \"type\": \"tool_call\"\n", - " }\n", - " ],\n", - " \"tool_call_chunks\": [\n", - " {\n", - " \"name\": \"magic_function\",\n", - " \"args\": \"{\\\"input\\\":3}\",\n", - " \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n", - " \"index\": 0,\n", - " \"type\": \"tool_call_chunk\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 61,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 75\n", - " }\n", - " }\n", - " ]\n", - " },\n", - " observation: \"5\"\n", - " }\n", - " ]\n", - "}\n", - "{ output: \"The value of `magic_function(3)` is 5.\" }\n" - ] - } - ], - "source": [ - "const langChainStream = await agentExecutor.stream({ input: query });\n", - "\n", - "for await (const step of langChainStream) {\n", - " console.log(step);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "cd371818", - "metadata": {}, - "source": [ - "#### In LangGraph\n", - "\n", - "In LangGraph, things are handled natively using the stream method.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "2be89a30", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 4, + "id": "dcda7082", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"eeef343c-80d1-4ccb-86af-c109343689cd\",\n", + " \"content\": \"what is the value of magic_function(3)?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7exs2uRqEipaZ7MtRbXnqu0vT0Da\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 55,\n", + " \"totalTokens\": 69\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_483d39d857\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": {\n", + " \"input\": 3\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 55,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 69\n", + " }\n", + " },\n", + " ToolMessage {\n", + " \"id\": \"1001bf20-7cde-4f8b-81f1-1faa654a8bb4\",\n", + " \"content\": \"5\",\n", + " \"name\": \"magic_function\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\"\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7exsTk3ilzGzC8DuY8GpnKOaGdvx\",\n", + " \"content\": \"The value of `magic_function(3)` is 5.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 78,\n", + " \"totalTokens\": 92\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_54e2f484be\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 78,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 92\n", + " }\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "\n", + "const app = createReactAgent({\n", + " llm,\n", + " tools,\n", + "});\n", + "\n", + "let agentOutput = await app.invoke({\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: query\n", + " },\n", + " ],\n", + "});\n", + "\n", + "console.log(agentOutput);" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7ezu8hirCENjdjR2GpLjkzXFTEmp\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 55,\n", - " \"totalTokens\": 69\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_483d39d857\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"magic_function\",\n", - " \"args\": {\n", - " \"input\": 3\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 55,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 69\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "{\n", - " tools: {\n", - " messages: [\n", - " ToolMessage {\n", - " \"content\": \"5\",\n", - " \"name\": \"magic_function\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\"\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7ezuTrh8GC550eKa1ZqRZGjpY5zh\",\n", - " \"content\": \"The value of `magic_function(3)` is 5.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 78,\n", - " \"totalTokens\": 92\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_483d39d857\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 78,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 92\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const langGraphStream = await app.stream(\n", - " { messages: [{ role: \"user\", content: query }] },\n", - " { streamMode: \"updates\" },\n", - ");\n", - "\n", - "for await (const step of langGraphStream) {\n", - " console.log(step);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "ce023792", - "metadata": {}, - "source": [ - "## `returnIntermediateSteps`\n", - "\n", - "Setting this parameter on AgentExecutor allows users to access\n", - "intermediate_steps, which pairs agent actions (e.g., tool invocations) with\n", - "their outcomes." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "77ce2771", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [ + "cell_type": "code", + "execution_count": 5, + "id": "b0a390a2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " messages: [\n", + " HumanMessage {\n", + " \"id\": \"eeef343c-80d1-4ccb-86af-c109343689cd\",\n", + " \"content\": \"what is the value of magic_function(3)?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7exs2uRqEipaZ7MtRbXnqu0vT0Da\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 55,\n", + " \"totalTokens\": 69\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_483d39d857\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": {\n", + " \"input\": 3\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 55,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 69\n", + " }\n", + " },\n", + " ToolMessage {\n", + " \"id\": \"1001bf20-7cde-4f8b-81f1-1faa654a8bb4\",\n", + " \"content\": \"5\",\n", + " \"name\": \"magic_function\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_MtwWLn000BQHeSYQKsbxYNR0\"\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7exsTk3ilzGzC8DuY8GpnKOaGdvx\",\n", + " \"content\": \"The value of `magic_function(3)` is 5.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 78,\n", + " \"totalTokens\": 92\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_54e2f484be\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 78,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 92\n", + " }\n", + " },\n", + " HumanMessage {\n", + " \"id\": \"1f2a9f41-c8ff-48fe-9d93-e663ee9279ff\",\n", + " \"content\": \"Pardon?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7exyTe9Ofs63Ex3sKwRx3wWksNup\",\n", + " \"content\": \"The result of calling the `magic_function` with an input of 3 is 5.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 20,\n", + " \"promptTokens\": 102,\n", + " \"totalTokens\": 122\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_483d39d857\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 102,\n", + " \"output_tokens\": 20,\n", + " \"total_tokens\": 122\n", + " }\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const messageHistory = agentOutput.messages;\n", + "const newQuery = \"Pardon?\";\n", + "\n", + "agentOutput = await app.invoke({\n", + " messages: [\n", + " ...messageHistory,\n", + " { role: \"user\", content: newQuery }\n", + " ],\n", + "});\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " action: {\n", - " tool: \"magic_function\",\n", - " toolInput: { input: 3 },\n", - " toolCallId: \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n", - " log: 'Invoking \"magic_function\" with {\"input\":3}\\n',\n", - " messageLog: [\n", - " AIMessageChunk {\n", - " \"id\": \"chatcmpl-A7f0NdSRSUJsBP6ENTpiQD4LzpBAH\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"index\": 0,\n", - " \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"prompt\": 0,\n", - " \"completion\": 0,\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_54e2f484be\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"magic_function\",\n", - " \"args\": {\n", - " \"input\": 3\n", - " },\n", - " \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n", - " \"type\": \"tool_call\"\n", - " }\n", - " ],\n", - " \"tool_call_chunks\": [\n", - " {\n", - " \"name\": \"magic_function\",\n", - " \"args\": \"{\\\"input\\\":3}\",\n", - " \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n", - " \"index\": 0,\n", - " \"type\": \"tool_call_chunk\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 61,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 75\n", - " }\n", - " }\n", - " ]\n", - " },\n", - " observation: \"5\"\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const agentExecutorWithIntermediateSteps = new AgentExecutor({\n", - " agent,\n", - " tools,\n", - " returnIntermediateSteps: true,\n", - "});\n", - "\n", - "const result = await agentExecutorWithIntermediateSteps.invoke({\n", - " input: query,\n", - "});\n", - "\n", - "console.log(result.intermediateSteps);\n" - ] - }, - { - "cell_type": "markdown", - "id": "050845ae", - "metadata": {}, - "source": [ - "By default the\n", - "[react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html)\n", - "in LangGraph appends all messages to the central state. Therefore, it is easy to\n", - "see any intermediate steps by just looking at the full state.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "2f9cdfa8", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [ + "cell_type": "markdown", + "id": "41a12f7a", + "metadata": {}, + "source": [ + "## Prompt Templates\n", + "\n", + "With legacy LangChain agents you have to pass in a prompt template. You can use\n", + "this to control the agent.\n", + "\n", + "With LangGraph\n", + "[react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html),\n", + "by default there is no prompt. You can achieve similar control over the agent in\n", + "a few ways:\n", + "\n", + "1. Pass in a system message as input\n", + "2. Initialize the agent with a system message\n", + "3. Initialize the agent with a function to transform messages before passing to\n", + " the model.\n", + "\n", + "Let's take a look at all of these below. We will pass in custom instructions to\n", + "get the agent to respond in Spanish.\n", + "\n", + "First up, using LangChain's `AgentExecutor`:\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " HumanMessage {\n", - " \"id\": \"46a825b2-13a3-4f19-b1aa-7716c53eb247\",\n", - " \"content\": \"what is the value of magic_function(3)?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7f0iUuWktC8gXztWZCjofqyCozY2\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 55,\n", - " \"totalTokens\": 69\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_483d39d857\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"magic_function\",\n", - " \"args\": {\n", - " \"input\": 3\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 55,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 69\n", - " }\n", - " },\n", - " ToolMessage {\n", - " \"id\": \"ac6aa309-bbfb-46cd-ba27-cbdbfd848705\",\n", - " \"content\": \"5\",\n", - " \"name\": \"magic_function\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\"\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-A7f0i7iHyDUV6is6sgwtcXivmFZ1x\",\n", - " \"content\": \"The value of `magic_function(3)` is 5.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 78,\n", - " \"totalTokens\": 92\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_54e2f484be\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 78,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 92\n", - " }\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "agentOutput = await app.invoke({\n", - " messages: [\n", - " { role: \"user\", content: query },\n", - " ]\n", - "});\n", - "\n", - "console.log(agentOutput.messages);" - ] - }, - { - "cell_type": "markdown", - "id": "f6e671e6", - "metadata": {}, - "source": [ - "## `maxIterations`\n", - "\n", - "`AgentExecutor` implements a `maxIterations` parameter, whereas this is\n", - "controlled via `recursionLimit` in LangGraph.\n", - "\n", - "Note that in the LangChain `AgentExecutor`, an \"iteration\" includes a full turn of tool\n", - "invocation and execution. In LangGraph, each step contributes to the recursion\n", - "limit, so we will need to multiply by two (and add one) to get equivalent\n", - "results.\n", - "\n", - "Here's an example of how you'd set this parameter with the legacy `AgentExecutor`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1cca9d11", - "metadata": { - "lines_to_next_cell": 2 - }, - "outputs": [], - "source": [ - "const badMagicTool = tool(async ({ input: _input }) => {\n", - " return \"Sorry, there was a temporary error. Please try again with the same input.\";\n", - "}, {\n", - " name: \"magic_function\",\n", - " description: \"Applies a magic function to an input.\",\n", - " schema: z.object({\n", - " input: z.string(),\n", - " }),\n", - "});\n", - "\n", - "const badTools = [badMagicTool];\n", - "\n", - "const spanishAgentExecutorWithMaxIterations = new AgentExecutor({\n", - " agent: createToolCallingAgent({\n", - " llm,\n", - " tools: badTools,\n", - " prompt: spanishPrompt,\n", - " }),\n", - " tools: badTools,\n", - " verbose: true,\n", - " maxIterations: 2,\n", - "});\n", - "\n", - "await spanishAgentExecutorWithMaxIterations.invoke({ input: query });" - ] - }, - { - "cell_type": "markdown", - "id": "245e064c", - "metadata": {}, - "source": [ - "If the recursion limit is reached in LangGraph.js, the framework will raise a specific exception type that we can catch and manage similarly to AgentExecutor." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "2f5e7d58", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 6, + "id": "4c5266cc", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " input: \u001b[32m\"what is the value of magic_function(3)?\"\u001b[39m,\n", + " output: \u001b[32m\"El valor de `magic_function(3)` es 5.\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const spanishPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant. Respond only in Spanish.\"],\n", + " [\"placeholder\", \"{chat_history}\"],\n", + " [\"human\", \"{input}\"],\n", + " [\"placeholder\", \"{agent_scratchpad}\"],\n", + "]);\n", + "\n", + "const spanishAgent = createToolCallingAgent({\n", + " llm,\n", + " tools,\n", + " prompt: spanishPrompt,\n", + "});\n", + "const spanishAgentExecutor = new AgentExecutor({\n", + " agent: spanishAgent,\n", + " tools,\n", + "});\n", + "\n", + "await spanishAgentExecutor.invoke({ input: query });\n" + ] + }, + { + "cell_type": "markdown", + "id": "c54b374d", + "metadata": {}, + "source": [ + "Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html).\n", + "\n", + "LangGraph's prebuilt `create_react_agent` does not take a prompt template directly as a parameter, but instead takes a `messages_modifier` parameter. This modifies messages before they are passed into the model, and can be one of four values:\n", + "\n", + "- A `SystemMessage`, which is added to the beginning of the list of messages.\n", + "- A `string`, which is converted to a `SystemMessage` and added to the beginning of the list of messages.\n", + "- A `Callable`, which should take in a list of messages. The output is then passed to the language model.\n", + "- Or a [`Runnable`](/docs/concepts/lcel), which should should take in a list of messages. The output is then passed to the language model.\n", + "\n", + "Here's how it looks in action:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "38a751ba", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-A7ey8LGWAs8ldrRRcO5wlHM85w9T8\",\n", + " \"content\": \"El valor de `magic_function(3)` es 5.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 89,\n", + " \"totalTokens\": 103\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_483d39d857\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 89,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 103\n", + " }\n", + "}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const systemMessage = \"You are a helpful assistant. Respond only in Spanish.\";\n", + "\n", + "// This could also be a SystemMessage object\n", + "// const systemMessage = new SystemMessage(\"You are a helpful assistant. Respond only in Spanish.\");\n", + "\n", + "const appWithSystemMessage = createReactAgent({\n", + " llm,\n", + " tools,\n", + " messageModifier: systemMessage,\n", + "});\n", + "\n", + "agentOutput = await appWithSystemMessage.invoke({\n", + " messages: [\n", + " { role: \"user\", content: query }\n", + " ],\n", + "});\n", + "agentOutput.messages[agentOutput.messages.length - 1];" + ] + }, + { + "cell_type": "markdown", + "id": "7622d8f7", + "metadata": {}, + "source": [ + "We can also pass in an arbitrary function. This function should take in a list\n", + "of messages and output a list of messages. We can do all types of arbitrary\n", + "formatting of messages here. In this cases, let's just add a `SystemMessage` to\n", + "the start of the list of messages.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "c7120cdd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " input: \"what is the value of magic_function(3)?\",\n", + " output: \"El valor de magic_function(3) es 5. ¡Pandemonium!\"\n", + "}\n" + ] + } + ], + "source": [ + "import { BaseMessage, SystemMessage, HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const modifyMessages = (messages: BaseMessage[]) => {\n", + " return [\n", + " new SystemMessage(\"You are a helpful assistant. Respond only in Spanish.\"),\n", + " ...messages,\n", + " new HumanMessage(\"Also say 'Pandemonium!' after the answer.\"),\n", + " ];\n", + "};\n", + "\n", + "const appWithMessagesModifier = createReactAgent({\n", + " llm,\n", + " tools,\n", + " messageModifier: modifyMessages,\n", + "});\n", + "\n", + "agentOutput = await appWithMessagesModifier.invoke({\n", + " messages: [{ role: \"user\", content: query }],\n", + "});\n", + "\n", + "console.log({\n", + " input: query,\n", + " output: agentOutput.messages[agentOutput.messages.length - 1].content,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "44337a14", + "metadata": {}, + "source": [ + "## Memory\n", + "\n", + "With LangChain's\n", + "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain_agents.AgentExecutor.html), you could add chat memory classes so it can engage in a multi-turn conversation.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "4d67ba36", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The output of the magic function for the input 3 is 5.\n", + "---\n", + "Yes, your name is Polly! How can I assist you today?\n", + "---\n", + "The output of the magic function for the input 3 is 5.\n" + ] + } + ], + "source": [ + "import { ChatMessageHistory } from \"@langchain/community/stores/message/in_memory\";\n", + "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "\n", + "const memory = new ChatMessageHistory();\n", + "const agentExecutorWithMemory = new RunnableWithMessageHistory({\n", + " runnable: agentExecutor,\n", + " getMessageHistory: () => memory,\n", + " inputMessagesKey: \"input\",\n", + " historyMessagesKey: \"chat_history\",\n", + "});\n", + "\n", + "const config = { configurable: { sessionId: \"test-session\" } };\n", + "\n", + "agentOutput = await agentExecutorWithMemory.invoke(\n", + " { input: \"Hi, I'm polly! What's the output of magic_function of 3?\" },\n", + " config,\n", + ");\n", + "\n", + "console.log(agentOutput.output);\n", + "\n", + "agentOutput = await agentExecutorWithMemory.invoke(\n", + " { input: \"Remember my name?\" },\n", + " config,\n", + ");\n", + "\n", + "console.log(\"---\");\n", + "console.log(agentOutput.output);\n", + "console.log(\"---\");\n", + "\n", + "agentOutput = await agentExecutorWithMemory.invoke(\n", + " { input: \"what was that output again?\" },\n", + " config,\n", + ");\n", + "\n", + "console.log(agentOutput.output);" + ] + }, + { + "cell_type": "markdown", + "id": "a7fe4e21", + "metadata": {}, + "source": [ + "#### In LangGraph\n", + "\n", + "The equivalent to this type of memory in LangGraph is [persistence](https://langchain-ai.github.io/langgraphjs/how-tos/persistence/), and [checkpointing](https://langchain-ai.github.io/langgraphjs/reference/interfaces/index.Checkpoint.html).\n", + "\n", + "Add a `checkpointer` to the agent and you get chat memory for free. You'll need to also pass a `thread_id` within the `configurable` field in the `config` parameter. Notice that we only pass one message into each request, but the model still has context from previous runs:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "bbc64438", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hi Polly! The output of the magic function for the input 3 is 5.\n", + "---\n", + "Yes, your name is Polly!\n", + "---\n", + "The output of the magic function for the input 3 was 5.\n" + ] + } + ], + "source": [ + "import { MemorySaver } from \"@langchain/langgraph\";\n", + "\n", + "const checkpointer = new MemorySaver();\n", + "const appWithMemory = createReactAgent({\n", + " llm: llm,\n", + " tools: tools,\n", + " checkpointSaver: checkpointer\n", + "});\n", + "\n", + "const langGraphConfig = {\n", + " configurable: {\n", + " thread_id: \"test-thread\",\n", + " },\n", + "};\n", + "\n", + "agentOutput = await appWithMemory.invoke(\n", + " {\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi, I'm polly! What's the output of magic_function of 3?\",\n", + " }\n", + " ],\n", + " },\n", + " langGraphConfig,\n", + ");\n", + "\n", + "console.log(agentOutput.messages[agentOutput.messages.length - 1].content);\n", + "console.log(\"---\");\n", + "\n", + "agentOutput = await appWithMemory.invoke(\n", + " {\n", + " messages: [\n", + " { role: \"user\", content: \"Remember my name?\" }\n", + " ]\n", + " },\n", + " langGraphConfig,\n", + ");\n", + "\n", + "console.log(agentOutput.messages[agentOutput.messages.length - 1].content);\n", + "console.log(\"---\");\n", + "\n", + "agentOutput = await appWithMemory.invoke(\n", + " {\n", + " messages: [\n", + " { role: \"user\", content: \"what was that output again?\" }\n", + " ]\n", + " },\n", + " langGraphConfig,\n", + ");\n", + "\n", + "console.log(agentOutput.messages[agentOutput.messages.length - 1].content);" + ] + }, + { + "cell_type": "markdown", + "id": "2997b4da", + "metadata": {}, + "source": [ + "## Iterating through steps\n", + "\n", + "With LangChain's\n", + "[`AgentExecutor`](https://api.js.langchain.com/classes/langchain_agents.AgentExecutor.html),\n", + "you could iterate over the steps using the\n", + "[`stream`](https://api.js.langchain.com/classes/langchain_core.runnables.Runnable.html#stream) method:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "5c928049", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " intermediateSteps: [\n", + " {\n", + " action: {\n", + " tool: \"magic_function\",\n", + " toolInput: { input: 3 },\n", + " toolCallId: \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n", + " log: 'Invoking \"magic_function\" with {\"input\":3}\\n',\n", + " messageLog: [\n", + " AIMessageChunk {\n", + " \"id\": \"chatcmpl-A7eziUrDmLSSMoiOskhrfbsHqx4Sd\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"index\": 0,\n", + " \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"prompt\": 0,\n", + " \"completion\": 0,\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_483d39d857\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": {\n", + " \"input\": 3\n", + " },\n", + " \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"tool_call_chunks\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": \"{\\\"input\\\":3}\",\n", + " \"id\": \"call_IQZr1yy2Ug6904VkQg6pWGgR\",\n", + " \"index\": 0,\n", + " \"type\": \"tool_call_chunk\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 61,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 75\n", + " }\n", + " }\n", + " ]\n", + " },\n", + " observation: \"5\"\n", + " }\n", + " ]\n", + "}\n", + "{ output: \"The value of `magic_function(3)` is 5.\" }\n" + ] + } + ], + "source": [ + "const langChainStream = await agentExecutor.stream({ input: query });\n", + "\n", + "for await (const step of langChainStream) {\n", + " console.log(step);\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Recursion limit reached.\n" - ] + "cell_type": "markdown", + "id": "cd371818", + "metadata": {}, + "source": [ + "#### In LangGraph\n", + "\n", + "In LangGraph, things are handled natively using the stream method.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "2be89a30", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7ezu8hirCENjdjR2GpLjkzXFTEmp\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 55,\n", + " \"totalTokens\": 69\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_483d39d857\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": {\n", + " \"input\": 3\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 55,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 69\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"5\",\n", + " \"name\": \"magic_function\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_KhhNL0m3mlPoJiboFMoX8hzk\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7ezuTrh8GC550eKa1ZqRZGjpY5zh\",\n", + " \"content\": \"The value of `magic_function(3)` is 5.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 78,\n", + " \"totalTokens\": 92\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_483d39d857\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 78,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 92\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const langGraphStream = await app.stream(\n", + " { messages: [{ role: \"user\", content: query }] },\n", + " { streamMode: \"updates\" },\n", + ");\n", + "\n", + "for await (const step of langGraphStream) {\n", + " console.log(step);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "ce023792", + "metadata": {}, + "source": [ + "## `returnIntermediateSteps`\n", + "\n", + "Setting this parameter on AgentExecutor allows users to access\n", + "intermediate_steps, which pairs agent actions (e.g., tool invocations) with\n", + "their outcomes." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "77ce2771", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " action: {\n", + " tool: \"magic_function\",\n", + " toolInput: { input: 3 },\n", + " toolCallId: \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n", + " log: 'Invoking \"magic_function\" with {\"input\":3}\\n',\n", + " messageLog: [\n", + " AIMessageChunk {\n", + " \"id\": \"chatcmpl-A7f0NdSRSUJsBP6ENTpiQD4LzpBAH\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"index\": 0,\n", + " \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"prompt\": 0,\n", + " \"completion\": 0,\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_54e2f484be\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": {\n", + " \"input\": 3\n", + " },\n", + " \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"tool_call_chunks\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": \"{\\\"input\\\":3}\",\n", + " \"id\": \"call_mbg1xgLEYEEWClbEaDe7p5tK\",\n", + " \"index\": 0,\n", + " \"type\": \"tool_call_chunk\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 61,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 75\n", + " }\n", + " }\n", + " ]\n", + " },\n", + " observation: \"5\"\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const agentExecutorWithIntermediateSteps = new AgentExecutor({\n", + " agent,\n", + " tools,\n", + " returnIntermediateSteps: true,\n", + "});\n", + "\n", + "const result = await agentExecutorWithIntermediateSteps.invoke({\n", + " input: query,\n", + "});\n", + "\n", + "console.log(result.intermediateSteps);\n" + ] + }, + { + "cell_type": "markdown", + "id": "050845ae", + "metadata": {}, + "source": [ + "By default the\n", + "[react agent executor](https://langchain-ai.github.io/langgraphjs/reference/functions/prebuilt.createReactAgent.html)\n", + "in LangGraph appends all messages to the central state. Therefore, it is easy to\n", + "see any intermediate steps by just looking at the full state.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "2f9cdfa8", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " HumanMessage {\n", + " \"id\": \"46a825b2-13a3-4f19-b1aa-7716c53eb247\",\n", + " \"content\": \"what is the value of magic_function(3)?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7f0iUuWktC8gXztWZCjofqyCozY2\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 55,\n", + " \"totalTokens\": 69\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_483d39d857\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": {\n", + " \"input\": 3\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 55,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 69\n", + " }\n", + " },\n", + " ToolMessage {\n", + " \"id\": \"ac6aa309-bbfb-46cd-ba27-cbdbfd848705\",\n", + " \"content\": \"5\",\n", + " \"name\": \"magic_function\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_ndsPDU58wsMeGaqr41cSlLlF\"\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-A7f0i7iHyDUV6is6sgwtcXivmFZ1x\",\n", + " \"content\": \"The value of `magic_function(3)` is 5.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 78,\n", + " \"totalTokens\": 92\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_54e2f484be\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 78,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 92\n", + " }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "agentOutput = await app.invoke({\n", + " messages: [\n", + " { role: \"user\", content: query },\n", + " ]\n", + "});\n", + "\n", + "console.log(agentOutput.messages);" + ] + }, + { + "cell_type": "markdown", + "id": "f6e671e6", + "metadata": {}, + "source": [ + "## `maxIterations`\n", + "\n", + "`AgentExecutor` implements a `maxIterations` parameter, whereas this is\n", + "controlled via `recursionLimit` in LangGraph.\n", + "\n", + "Note that in the LangChain `AgentExecutor`, an \"iteration\" includes a full turn of tool\n", + "invocation and execution. In LangGraph, each step contributes to the recursion\n", + "limit, so we will need to multiply by two (and add one) to get equivalent\n", + "results.\n", + "\n", + "Here's an example of how you'd set this parameter with the legacy `AgentExecutor`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1cca9d11", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "const badMagicTool = tool(async ({ input: _input }) => {\n", + " return \"Sorry, there was a temporary error. Please try again with the same input.\";\n", + "}, {\n", + " name: \"magic_function\",\n", + " description: \"Applies a magic function to an input.\",\n", + " schema: z.object({\n", + " input: z.string(),\n", + " }),\n", + "});\n", + "\n", + "const badTools = [badMagicTool];\n", + "\n", + "const spanishAgentExecutorWithMaxIterations = new AgentExecutor({\n", + " agent: createToolCallingAgent({\n", + " llm,\n", + " tools: badTools,\n", + " prompt: spanishPrompt,\n", + " }),\n", + " tools: badTools,\n", + " verbose: true,\n", + " maxIterations: 2,\n", + "});\n", + "\n", + "await spanishAgentExecutorWithMaxIterations.invoke({ input: query });" + ] + }, + { + "cell_type": "markdown", + "id": "245e064c", + "metadata": {}, + "source": [ + "If the recursion limit is reached in LangGraph.js, the framework will raise a specific exception type that we can catch and manage similarly to AgentExecutor." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "2f5e7d58", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Recursion limit reached.\n" + ] + } + ], + "source": [ + "import { GraphRecursionError } from \"@langchain/langgraph\";\n", + "\n", + "const RECURSION_LIMIT = 2 * 2 + 1;\n", + "\n", + "const appWithBadTools = createReactAgent({ llm, tools: badTools });\n", + "\n", + "try {\n", + " await appWithBadTools.invoke({\n", + " messages: [\n", + " { role: \"user\", content: query }\n", + " ]\n", + " }, {\n", + " recursionLimit: RECURSION_LIMIT,\n", + " });\n", + "} catch (e) {\n", + " if (e instanceof GraphRecursionError) {\n", + " console.log(\"Recursion limit reached.\");\n", + " } else {\n", + " throw e;\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "e56203e7", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned how to migrate your LangChain agent executors to LangGraph.\n", + "\n", + "Next, check out other [LangGraph how-to guides](https://langchain-ai.github.io/langgraphjs/how-tos/)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "import { GraphRecursionError } from \"@langchain/langgraph\";\n", - "\n", - "const RECURSION_LIMIT = 2 * 2 + 1;\n", - "\n", - "const appWithBadTools = createReactAgent({ llm, tools: badTools });\n", - "\n", - "try {\n", - " await appWithBadTools.invoke({\n", - " messages: [\n", - " { role: \"user\", content: query }\n", - " ]\n", - " }, {\n", - " recursionLimit: RECURSION_LIMIT,\n", - " });\n", - "} catch (e) {\n", - " if (e instanceof GraphRecursionError) {\n", - " console.log(\"Recursion limit reached.\");\n", - " } else {\n", - " throw e;\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "e56203e7", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned how to migrate your LangChain agent executors to LangGraph.\n", - "\n", - "Next, check out other [LangGraph how-to guides](https://langchain-ai.github.io/langgraphjs/how-tos/)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/multi_vector.mdx b/docs/core_docs/docs/how_to/multi_vector.mdx index 50904987fa79..5c7de671abd8 100644 --- a/docs/core_docs/docs/how_to/multi_vector.mdx +++ b/docs/core_docs/docs/how_to/multi_vector.mdx @@ -4,8 +4,8 @@ This guide assumes familiarity with the following concepts: -- [Retrievers](/docs/concepts/#retrievers) -- [Text splitters](/docs/concepts/#text-splitters) +- [Retrievers](/docs/concepts/retrievers) +- [Text splitters](/docs/concepts/text_splitters) - [Retrieval-augmented generation (RAG)](/docs/tutorials/rag) ::: diff --git a/docs/core_docs/docs/how_to/multimodal_inputs.ipynb b/docs/core_docs/docs/how_to/multimodal_inputs.ipynb index 9fa741dd3565..e6efa1da709c 100644 --- a/docs/core_docs/docs/how_to/multimodal_inputs.ipynb +++ b/docs/core_docs/docs/how_to/multimodal_inputs.ipynb @@ -1,211 +1,211 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", - "metadata": {}, - "source": [ - "# How to pass multimodal data directly to models\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "\n", - ":::\n", - "\n", - "Here we demonstrate how to pass multimodal input directly to models. \n", - "We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision).\n", - "For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format.\n", - "\n", - "In this example we will ask a model to describe an image." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", - "metadata": {}, - "outputs": [], - "source": [ - "import * as fs from \"node:fs/promises\";\n", - "\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - "});\n", - "\n", - "const imageData = await fs.readFile(\"../../../../examples/hotdog.jpg\");" - ] - }, - { - "cell_type": "markdown", - "id": "4fca4da7", - "metadata": {}, - "source": [ - "The most commonly supported way to pass in images is to pass it in as a byte string within a message with a complex content type for models that support multimodal input. Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "ec680b6b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "This image contains a hot dog. It shows a frankfurter or sausage encased in a soft, elongated bread bun. The sausage itself appears to be reddish in color, likely a smoked or cured variety. The bun is a golden-brown color, suggesting it has been lightly toasted or grilled. The hot dog is presented against a plain white background, allowing the details of the iconic American fast food item to be clearly visible.\n" - ] - } - ], - "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const message = new HumanMessage({\n", - " content: [\n", - " {\n", - " type: \"text\",\n", - " text: \"what does this image contain?\"},\n", - " {\n", - " type: \"image_url\",\n", - " image_url: {\n", - " url: `data:image/jpeg;base64,${imageData.toString(\"base64\")}`},\n", - " },\n", - " ],\n", - "})\n", - "const response = await model.invoke([message]);\n", - "console.log(response.content);" - ] - }, - { - "cell_type": "markdown", - "id": "8656018e-c56d-47d2-b2be-71e87827f90a", - "metadata": {}, - "source": [ - "Some model providers support taking an HTTP URL to the image directly in a content block of type `\"image_url\"`:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", + "metadata": {}, + "source": [ + "# How to pass multimodal data directly to models\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "\n", + ":::\n", + "\n", + "Here we demonstrate how to pass multimodal input directly to models. \n", + "We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision).\n", + "For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format.\n", + "\n", + "In this example we will ask a model to describe an image." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "The weather in the image appears to be pleasant and clear. The sky is mostly blue with a few scattered clouds, indicating good visibility and no immediate signs of rain. The lighting suggests it’s either morning or late afternoon, with sunlight creating a warm and bright atmosphere. There is no indication of strong winds, as the grass and foliage appear calm and undisturbed. Overall, it looks like a beautiful day, possibly spring or summer, ideal for outdoor activities.\n" - ] - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const openAIModel = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - "});\n", - "\n", - "const imageUrl = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\";\n", - "\n", - "const message = new HumanMessage({\n", - " content: [\n", - " {\n", - " type: \"text\",\n", - " text: \"describe the weather in this image\"},\n", - " {\n", - " type: \"image_url\",\n", - " image_url: { url: imageUrl }\n", - " },\n", - " ],\n", - "});\n", - "const response = await openAIModel.invoke([message]);\n", - "console.log(response.content);" - ] - }, - { - "cell_type": "markdown", - "id": "1c470309", - "metadata": {}, - "source": [ - "We can also pass in multiple images." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "325fb4ca", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 6, + "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", + "metadata": {}, + "outputs": [], + "source": [ + "import * as fs from \"node:fs/promises\";\n", + "\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + "});\n", + "\n", + "const imageData = await fs.readFile(\"../../../../examples/hotdog.jpg\");" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Yes, the two images are the same.\n" - ] + "cell_type": "markdown", + "id": "4fca4da7", + "metadata": {}, + "source": [ + "The most commonly supported way to pass in images is to pass it in as a byte string within a message with a complex content type for models that support multimodal input. Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "ec680b6b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "This image contains a hot dog. It shows a frankfurter or sausage encased in a soft, elongated bread bun. The sausage itself appears to be reddish in color, likely a smoked or cured variety. The bun is a golden-brown color, suggesting it has been lightly toasted or grilled. The hot dog is presented against a plain white background, allowing the details of the iconic American fast food item to be clearly visible.\n" + ] + } + ], + "source": [ + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const message = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"what does this image contain?\"},\n", + " {\n", + " type: \"image_url\",\n", + " image_url: {\n", + " url: `data:image/jpeg;base64,${imageData.toString(\"base64\")}`},\n", + " },\n", + " ],\n", + "})\n", + "const response = await model.invoke([message]);\n", + "console.log(response.content);" + ] + }, + { + "cell_type": "markdown", + "id": "8656018e-c56d-47d2-b2be-71e87827f90a", + "metadata": {}, + "source": [ + "Some model providers support taking an HTTP URL to the image directly in a content block of type `\"image_url\"`:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The weather in the image appears to be pleasant and clear. The sky is mostly blue with a few scattered clouds, indicating good visibility and no immediate signs of rain. The lighting suggests it’s either morning or late afternoon, with sunlight creating a warm and bright atmosphere. There is no indication of strong winds, as the grass and foliage appear calm and undisturbed. Overall, it looks like a beautiful day, possibly spring or summer, ideal for outdoor activities.\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const openAIModel = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + "});\n", + "\n", + "const imageUrl = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\";\n", + "\n", + "const message = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"describe the weather in this image\"},\n", + " {\n", + " type: \"image_url\",\n", + " image_url: { url: imageUrl }\n", + " },\n", + " ],\n", + "});\n", + "const response = await openAIModel.invoke([message]);\n", + "console.log(response.content);" + ] + }, + { + "cell_type": "markdown", + "id": "1c470309", + "metadata": {}, + "source": [ + "We can also pass in multiple images." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "325fb4ca", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Yes, the two images are the same.\n" + ] + } + ], + "source": [ + "const message = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"are these two images the same?\"\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: {\n", + " url: imageUrl\n", + " }\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: {\n", + " url: imageUrl\n", + " }\n", + " },\n", + " ],\n", + "});\n", + "const response = await openAIModel.invoke([message]);\n", + "console.log(response.content);" + ] + }, + { + "cell_type": "markdown", + "id": "bad38378", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned how to pass multimodal data to a modal.\n", + "\n", + "Next, you can check out our guide on [multimodal tool calls](/docs/how_to/tool_calls_multimodal)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "typescript", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" } - ], - "source": [ - "const message = new HumanMessage({\n", - " content: [\n", - " {\n", - " type: \"text\",\n", - " text: \"are these two images the same?\"\n", - " },\n", - " {\n", - " type: \"image_url\",\n", - " image_url: {\n", - " url: imageUrl\n", - " }\n", - " },\n", - " {\n", - " type: \"image_url\",\n", - " image_url: {\n", - " url: imageUrl\n", - " }\n", - " },\n", - " ],\n", - "});\n", - "const response = await openAIModel.invoke([message]);\n", - "console.log(response.content);" - ] - }, - { - "cell_type": "markdown", - "id": "bad38378", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned how to pass multimodal data to a modal.\n", - "\n", - "Next, you can check out our guide on [multimodal tool calls](/docs/how_to/tool_calls_multimodal)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "typescript", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.1" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/multimodal_prompts.ipynb b/docs/core_docs/docs/how_to/multimodal_prompts.ipynb index bba9bfc42434..3483f1e05d97 100644 --- a/docs/core_docs/docs/how_to/multimodal_prompts.ipynb +++ b/docs/core_docs/docs/how_to/multimodal_prompts.ipynb @@ -1,193 +1,193 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", - "metadata": {}, - "source": [ - "# How to use multimodal prompts\n", - "\n", - "Here we demonstrate how to use prompt templates to format multimodal inputs to models. \n", - "\n", - "In this example we will ask a model to describe an image.\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "\n", - ":::\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", - "\n", - "\n", - " axios @langchain/openai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", - "metadata": {}, - "outputs": [], - "source": [ - "import axios from \"axios\";\n", - "\n", - "const imageUrl = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\";\n", - "const axiosRes = await axios.get(imageUrl, { responseType: \"arraybuffer\" });\n", - "const base64 = btoa(\n", - " new Uint8Array(axiosRes.data).reduce(\n", - " (data, byte) => data + String.fromCharCode(byte),\n", - " ''\n", - " )\n", - ");" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "2671f995", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const model = new ChatOpenAI({ model: \"gpt-4o\" })" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "4ee35e4f", - "metadata": {}, - "outputs": [], - "source": [ - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"system\", \"Describe the image provided\"],\n", - " [\n", - " \"user\",\n", - " [{ type: \"image_url\", image_url: \"data:image/jpeg;base64,{base64}\" }],\n", - " ]\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "089f75c2", - "metadata": {}, - "outputs": [], - "source": [ - "const chain = prompt.pipe(model);" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "02744b06", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "The image depicts a scenic outdoor landscape featuring a wooden boardwalk path extending forward through a large field of green grass and vegetation. On either side of the path, the grass is lush and vibrant, with a variety of bushes and low shrubs visible as well. The sky overhead is expansive and mostly clear, adorned with soft, wispy clouds, illuminated by the light giving a warm and serene ambiance. In the distant background, there are clusters of trees and additional foliage, suggesting a natural and tranquil setting, ideal for a peaceful walk or nature exploration.\n" - ] - } - ], - "source": [ - "const response = await chain.invoke({ base64 })\n", - "console.log(response.content)" - ] - }, - { - "cell_type": "markdown", - "id": "e9b9ebf6", - "metadata": {}, - "source": [ - "We can also pass in multiple images." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "02190ee3", - "metadata": {}, - "outputs": [], - "source": [ - "const promptWithMultipleImages = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"system\", \"compare the two pictures provided\"],\n", - " [\n", - " \"user\",\n", - " [\n", - " {\n", - " \"type\": \"image_url\",\n", - " \"image_url\": \"data:image/jpeg;base64,{imageData1}\",\n", - " },\n", - " {\n", - " \"type\": \"image_url\",\n", - " \"image_url\": \"data:image/jpeg;base64,{imageData2}\",\n", - " },\n", - " ],\n", - " ],\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "42af057b", - "metadata": {}, - "outputs": [], - "source": [ - "const chainWithMultipleImages = promptWithMultipleImages.pipe(model);" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "513abe00", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", + "metadata": {}, + "source": [ + "# How to use multimodal prompts\n", + "\n", + "Here we demonstrate how to use prompt templates to format multimodal inputs to models. \n", + "\n", + "In this example we will ask a model to describe an image.\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "\n", + ":::\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " axios @langchain/openai @langchain/core\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "The two images provided are identical. Both show a wooden boardwalk path extending into a grassy field under a blue sky with scattered clouds. The scenery includes green shrubs and trees in the background, with a bright and clear sky above.\n" - ] + "cell_type": "code", + "execution_count": 1, + "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", + "metadata": {}, + "outputs": [], + "source": [ + "import axios from \"axios\";\n", + "\n", + "const imageUrl = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\";\n", + "const axiosRes = await axios.get(imageUrl, { responseType: \"arraybuffer\" });\n", + "const base64 = btoa(\n", + " new Uint8Array(axiosRes.data).reduce(\n", + " (data, byte) => data + String.fromCharCode(byte),\n", + " ''\n", + " )\n", + ");" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2671f995", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const model = new ChatOpenAI({ model: \"gpt-4o\" })" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "4ee35e4f", + "metadata": {}, + "outputs": [], + "source": [ + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", \"Describe the image provided\"],\n", + " [\n", + " \"user\",\n", + " [{ type: \"image_url\", image_url: \"data:image/jpeg;base64,{base64}\" }],\n", + " ]\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "089f75c2", + "metadata": {}, + "outputs": [], + "source": [ + "const chain = prompt.pipe(model);" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "02744b06", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The image depicts a scenic outdoor landscape featuring a wooden boardwalk path extending forward through a large field of green grass and vegetation. On either side of the path, the grass is lush and vibrant, with a variety of bushes and low shrubs visible as well. The sky overhead is expansive and mostly clear, adorned with soft, wispy clouds, illuminated by the light giving a warm and serene ambiance. In the distant background, there are clusters of trees and additional foliage, suggesting a natural and tranquil setting, ideal for a peaceful walk or nature exploration.\n" + ] + } + ], + "source": [ + "const response = await chain.invoke({ base64 })\n", + "console.log(response.content)" + ] + }, + { + "cell_type": "markdown", + "id": "e9b9ebf6", + "metadata": {}, + "source": [ + "We can also pass in multiple images." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "02190ee3", + "metadata": {}, + "outputs": [], + "source": [ + "const promptWithMultipleImages = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", \"compare the two pictures provided\"],\n", + " [\n", + " \"user\",\n", + " [\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": \"data:image/jpeg;base64,{imageData1}\",\n", + " },\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": \"data:image/jpeg;base64,{imageData2}\",\n", + " },\n", + " ],\n", + " ],\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "42af057b", + "metadata": {}, + "outputs": [], + "source": [ + "const chainWithMultipleImages = promptWithMultipleImages.pipe(model);" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "513abe00", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The two images provided are identical. Both show a wooden boardwalk path extending into a grassy field under a blue sky with scattered clouds. The scenery includes green shrubs and trees in the background, with a bright and clear sky above.\n" + ] + } + ], + "source": [ + "const res = await chainWithMultipleImages.invoke({ imageData1: base64, imageData2: base64 })\n", + "console.log(res.content)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "const res = await chainWithMultipleImages.invoke({ imageData1: base64, imageData2: base64 })\n", - "console.log(res.content)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/multiple_queries.ipynb b/docs/core_docs/docs/how_to/multiple_queries.ipynb index b54012bb204a..3e01d86bbfa2 100644 --- a/docs/core_docs/docs/how_to/multiple_queries.ipynb +++ b/docs/core_docs/docs/how_to/multiple_queries.ipynb @@ -1,266 +1,266 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to generate multiple queries to retrieve data for\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Vector stores](/docs/concepts/#vectorstores)\n", - "- [Retrievers](/docs/concepts/#retrievers)\n", - "- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)\n", - "\n", - ":::\n", - "\n", - "Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\".\n", - "But retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well.\n", - "Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n", - "\n", - "The [`MultiQueryRetriever`](https://api.js.langchain.com/classes/langchain.retrievers_multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query.\n", - "For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents.\n", - "By generating multiple perspectives on the same question, the `MultiQueryRetriever` can help overcome some of the limitations of the distance-based retrieval and get a richer set of results.\n", - "\n", - "## Get started\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/anthropic @langchain/cohere\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: \"mitochondria is made of lipids\",\n", - " metadata: {}\n", - " },\n", - " Document {\n", - " pageContent: \"mitochondria is the powerhouse of the cell\",\n", - " metadata: {}\n", - " },\n", - " Document {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { id: 1 }\n", - " },\n", - " Document {\n", - " pageContent: \"Buildings are made out of wood\",\n", - " metadata: { id: 2 }\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { CohereEmbeddings } from \"@langchain/cohere\";\n", - "import { MultiQueryRetriever } from \"langchain/retrievers/multi_query\";\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const embeddings = new CohereEmbeddings();\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromTexts(\n", - " [\n", - " \"Buildings are made out of brick\",\n", - " \"Buildings are made out of wood\",\n", - " \"Buildings are made out of stone\",\n", - " \"Cars are made out of metal\",\n", - " \"Cars are made out of plastic\",\n", - " \"mitochondria is the powerhouse of the cell\",\n", - " \"mitochondria is made of lipids\",\n", - " ],\n", - " [{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],\n", - " embeddings\n", - ");\n", - "\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\"\n", - "});\n", - "\n", - "const retriever = MultiQueryRetriever.fromLLM({\n", - " llm: model,\n", - " retriever: vectorstore.asRetriever(),\n", - "});\n", - "\n", - "const query = \"What are mitochondria made of?\";\n", - "const retrievedDocs = await retriever.invoke(query);\n", - "\n", - "/*\n", - " Generated queries: What are the components of mitochondria?,What substances comprise the mitochondria organelle? ,What is the molecular composition of mitochondria?\n", - "*/\n", - "\n", - "console.log(retrievedDocs);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Customization\n", - "\n", - "You can also supply a custom prompt to tune what types of questions are generated.\n", - "You can also pass a custom output parser to parse and split the results of the LLM call into a list of queries." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to generate multiple queries to retrieve data for\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Vector stores](/docs/concepts/#vectorstores)\n", + "- [Retrievers](/docs/concepts/retrievers)\n", + "- [Retrieval-augmented generation (RAG)](/docs/tutorials/rag)\n", + "\n", + ":::\n", + "\n", + "Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\".\n", + "But retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well.\n", + "Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n", + "\n", + "The [`MultiQueryRetriever`](https://api.js.langchain.com/classes/langchain.retrievers_multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query.\n", + "For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents.\n", + "By generating multiple perspectives on the same question, the `MultiQueryRetriever` can help overcome some of the limitations of the distance-based retrieval and get a richer set of results.\n", + "\n", + "## Get started\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/anthropic @langchain/cohere\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: \"mitochondria is made of lipids\",\n", + " metadata: {}\n", + " },\n", + " Document {\n", + " pageContent: \"mitochondria is the powerhouse of the cell\",\n", + " metadata: {}\n", + " },\n", + " Document {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { id: 1 }\n", + " },\n", + " Document {\n", + " pageContent: \"Buildings are made out of wood\",\n", + " metadata: { id: 2 }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { CohereEmbeddings } from \"@langchain/cohere\";\n", + "import { MultiQueryRetriever } from \"langchain/retrievers/multi_query\";\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const embeddings = new CohereEmbeddings();\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromTexts(\n", + " [\n", + " \"Buildings are made out of brick\",\n", + " \"Buildings are made out of wood\",\n", + " \"Buildings are made out of stone\",\n", + " \"Cars are made out of metal\",\n", + " \"Cars are made out of plastic\",\n", + " \"mitochondria is the powerhouse of the cell\",\n", + " \"mitochondria is made of lipids\",\n", + " ],\n", + " [{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],\n", + " embeddings\n", + ");\n", + "\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\"\n", + "});\n", + "\n", + "const retriever = MultiQueryRetriever.fromLLM({\n", + " llm: model,\n", + " retriever: vectorstore.asRetriever(),\n", + "});\n", + "\n", + "const query = \"What are mitochondria made of?\";\n", + "const retrievedDocs = await retriever.invoke(query);\n", + "\n", + "/*\n", + " Generated queries: What are the components of mitochondria?,What substances comprise the mitochondria organelle? ,What is the molecular composition of mitochondria?\n", + "*/\n", + "\n", + "console.log(retrievedDocs);" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: \"Mitochondrien bestehen aus Lipiden\",\n", - " metadata: {}\n", - " },\n", - " Document {\n", - " pageContent: \"Mitochondrien sind die Energiekraftwerke der Zelle\",\n", - " metadata: {}\n", - " },\n", - " Document {\n", - " pageContent: \"Gebäude werden aus Stein hergestellt\",\n", - " metadata: { id: 3 }\n", - " },\n", - " Document {\n", - " pageContent: \"Autos werden aus Metall hergestellt\",\n", - " metadata: { id: 4 }\n", - " },\n", - " Document {\n", - " pageContent: \"Gebäude werden aus Holz hergestellt\",\n", - " metadata: { id: 2 }\n", - " },\n", - " Document {\n", - " pageContent: \"Gebäude werden aus Ziegelsteinen hergestellt\",\n", - " metadata: { id: 1 }\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Customization\n", + "\n", + "You can also supply a custom prompt to tune what types of questions are generated.\n", + "You can also pass a custom output parser to parse and split the results of the LLM call into a list of queries." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: \"Mitochondrien bestehen aus Lipiden\",\n", + " metadata: {}\n", + " },\n", + " Document {\n", + " pageContent: \"Mitochondrien sind die Energiekraftwerke der Zelle\",\n", + " metadata: {}\n", + " },\n", + " Document {\n", + " pageContent: \"Gebäude werden aus Stein hergestellt\",\n", + " metadata: { id: 3 }\n", + " },\n", + " Document {\n", + " pageContent: \"Autos werden aus Metall hergestellt\",\n", + " metadata: { id: 4 }\n", + " },\n", + " Document {\n", + " pageContent: \"Gebäude werden aus Holz hergestellt\",\n", + " metadata: { id: 2 }\n", + " },\n", + " Document {\n", + " pageContent: \"Gebäude werden aus Ziegelsteinen hergestellt\",\n", + " metadata: { id: 1 }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { LLMChain } from \"langchain/chains\";\n", + "import { pull } from \"langchain/hub\";\n", + "import { BaseOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { PromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "\n", + "type LineList = {\n", + " lines: string[];\n", + "};\n", + "\n", + "class LineListOutputParser extends BaseOutputParser {\n", + " static lc_name() {\n", + " return \"LineListOutputParser\";\n", + " }\n", + "\n", + " lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n", + "\n", + " async parse(text: string): Promise {\n", + " const startKeyIndex = text.indexOf(\"\");\n", + " const endKeyIndex = text.indexOf(\"\");\n", + " const questionsStartIndex =\n", + " startKeyIndex === -1 ? 0 : startKeyIndex + \"\".length;\n", + " const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;\n", + " const lines = text\n", + " .slice(questionsStartIndex, questionsEndIndex)\n", + " .trim()\n", + " .split(\"\\n\")\n", + " .filter((line) => line.trim() !== \"\");\n", + " return { lines };\n", + " }\n", + "\n", + " getFormatInstructions(): string {\n", + " throw new Error(\"Not implemented.\");\n", + " }\n", + "}\n", + "\n", + "// Default prompt is available at: https://smith.langchain.com/hub/jacob/multi-vector-retriever-german\n", + "const prompt: PromptTemplate = await pull(\n", + " \"jacob/multi-vector-retriever-german\"\n", + ");\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromTexts(\n", + " [\n", + " \"Gebäude werden aus Ziegelsteinen hergestellt\",\n", + " \"Gebäude werden aus Holz hergestellt\",\n", + " \"Gebäude werden aus Stein hergestellt\",\n", + " \"Autos werden aus Metall hergestellt\",\n", + " \"Autos werden aus Kunststoff hergestellt\",\n", + " \"Mitochondrien sind die Energiekraftwerke der Zelle\",\n", + " \"Mitochondrien bestehen aus Lipiden\",\n", + " ],\n", + " [{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],\n", + " embeddings\n", + ");\n", + "const model = new ChatAnthropic({});\n", + "const llmChain = new LLMChain({\n", + " llm: model,\n", + " prompt,\n", + " outputParser: new LineListOutputParser(),\n", + "});\n", + "const retriever = new MultiQueryRetriever({\n", + " retriever: vectorstore.asRetriever(),\n", + " llmChain,\n", + "});\n", + "\n", + "const query = \"What are mitochondria made of?\";\n", + "const retrievedDocs = await retriever.invoke(query);\n", + "\n", + "/*\n", + " Generated queries: Was besteht ein Mitochondrium?,Aus welchen Komponenten setzt sich ein Mitochondrium zusammen? ,Welche Moleküle finden sich in einem Mitochondrium?\n", + "*/\n", + "\n", + "console.log(retrievedDocs);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned how to use the `MultiQueryRetriever` to query a vector store with automatically generated queries.\n", + "\n", + "See the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to\n", + "[create your own custom retriever over any data source](/docs/how_to/custom_retriever/)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "import { LLMChain } from \"langchain/chains\";\n", - "import { pull } from \"langchain/hub\";\n", - "import { BaseOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { PromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "\n", - "type LineList = {\n", - " lines: string[];\n", - "};\n", - "\n", - "class LineListOutputParser extends BaseOutputParser {\n", - " static lc_name() {\n", - " return \"LineListOutputParser\";\n", - " }\n", - "\n", - " lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n", - "\n", - " async parse(text: string): Promise {\n", - " const startKeyIndex = text.indexOf(\"\");\n", - " const endKeyIndex = text.indexOf(\"\");\n", - " const questionsStartIndex =\n", - " startKeyIndex === -1 ? 0 : startKeyIndex + \"\".length;\n", - " const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;\n", - " const lines = text\n", - " .slice(questionsStartIndex, questionsEndIndex)\n", - " .trim()\n", - " .split(\"\\n\")\n", - " .filter((line) => line.trim() !== \"\");\n", - " return { lines };\n", - " }\n", - "\n", - " getFormatInstructions(): string {\n", - " throw new Error(\"Not implemented.\");\n", - " }\n", - "}\n", - "\n", - "// Default prompt is available at: https://smith.langchain.com/hub/jacob/multi-vector-retriever-german\n", - "const prompt: PromptTemplate = await pull(\n", - " \"jacob/multi-vector-retriever-german\"\n", - ");\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromTexts(\n", - " [\n", - " \"Gebäude werden aus Ziegelsteinen hergestellt\",\n", - " \"Gebäude werden aus Holz hergestellt\",\n", - " \"Gebäude werden aus Stein hergestellt\",\n", - " \"Autos werden aus Metall hergestellt\",\n", - " \"Autos werden aus Kunststoff hergestellt\",\n", - " \"Mitochondrien sind die Energiekraftwerke der Zelle\",\n", - " \"Mitochondrien bestehen aus Lipiden\",\n", - " ],\n", - " [{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }],\n", - " embeddings\n", - ");\n", - "const model = new ChatAnthropic({});\n", - "const llmChain = new LLMChain({\n", - " llm: model,\n", - " prompt,\n", - " outputParser: new LineListOutputParser(),\n", - "});\n", - "const retriever = new MultiQueryRetriever({\n", - " retriever: vectorstore.asRetriever(),\n", - " llmChain,\n", - "});\n", - "\n", - "const query = \"What are mitochondria made of?\";\n", - "const retrievedDocs = await retriever.invoke(query);\n", - "\n", - "/*\n", - " Generated queries: Was besteht ein Mitochondrium?,Aus welchen Komponenten setzt sich ein Mitochondrium zusammen? ,Welche Moleküle finden sich in einem Mitochondrium?\n", - "*/\n", - "\n", - "console.log(retrievedDocs);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned how to use the `MultiQueryRetriever` to query a vector store with automatically generated queries.\n", - "\n", - "See the individual sections for deeper dives on specific retrievers, the [broader tutorial on RAG](/docs/tutorials/rag), or this section to learn how to\n", - "[create your own custom retriever over any data source](/docs/how_to/custom_retriever/)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/output_parser_fixing.ipynb b/docs/core_docs/docs/how_to/output_parser_fixing.ipynb index 248796be393c..20da71a2f31a 100644 --- a/docs/core_docs/docs/how_to/output_parser_fixing.ipynb +++ b/docs/core_docs/docs/how_to/output_parser_fixing.ipynb @@ -1,138 +1,138 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "0fee7096", - "metadata": {}, - "source": [ - "# How to try to fix errors in output parsing\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Output parsers](/docs/concepts/#output-parsers)\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "- [Chaining runnables together](/docs/how_to/sequence/)\n", - "\n", - ":::\n", - "\n", - "LLMs aren't perfect, and sometimes fail to produce output that perfectly matches a the desired format. To help handle errors, we can use the [`OutputFixingParser`](https://api.js.langchain.com/classes/langchain.output_parsers.OutputFixingParser.html) This output parser wraps another output parser, and in the event that the first one fails, it calls out to another LLM in an attempt to fix any errors.\n", - "\n", - "Specifically, we can pass the misformatted output, along with the formatted instructions, to the model and ask it to fix it.\n", - "\n", - "For this example, we'll use the [`StructuredOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html), which can validate output according to a Zod schema. Here's what happens if we pass it a result that does not comply with the schema:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "15283e0b", - "metadata": {}, - "outputs": [ + "cells": [ { - "ename": "Error", - "evalue": "Failed to parse. Text: \"{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}\". Error: SyntaxError: Expected property name or '}' in JSON at position 1 (line 1 column 2)", - "output_type": "error", - "traceback": [ - "Stack trace:", - "Error: Failed to parse. Text: \"{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}\". Error: SyntaxError: Expected property name or '}' in JSON at position 1 (line 1 column 2)", - " at StructuredOutputParser.parse (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.1.63/dist/output_parsers/structured.js:86:19)", - " at :11:14" - ] - } - ], - "source": [ - "import { z } from \"zod\";\n", - "import { RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { StructuredOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const zodSchema = z.object({\n", - " name: z.string().describe(\"name of an actor\"),\n", - " film_names: z.array(z.string()).describe(\"list of names of films they starred in\"),\n", - "});\n", - "\n", - "const parser = StructuredOutputParser.fromZodSchema(zodSchema);\n", - "\n", - "const misformatted = \"{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}\";\n", - "\n", - "await parser.parse(misformatted);" - ] - }, - { - "cell_type": "markdown", - "id": "723c559d", - "metadata": {}, - "source": [ - "Now we can construct and use a `OutputFixingParser`. This output parser takes as an argument another output parser but also an LLM with which to try to correct any formatting mistakes." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "4aaccbf1", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "0fee7096", + "metadata": {}, + "source": [ + "# How to try to fix errors in output parsing\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Output parsers](/docs/concepts/output_parsers)\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "- [Chaining runnables together](/docs/how_to/sequence/)\n", + "\n", + ":::\n", + "\n", + "LLMs aren't perfect, and sometimes fail to produce output that perfectly matches a the desired format. To help handle errors, we can use the [`OutputFixingParser`](https://api.js.langchain.com/classes/langchain.output_parsers.OutputFixingParser.html) This output parser wraps another output parser, and in the event that the first one fails, it calls out to another LLM in an attempt to fix any errors.\n", + "\n", + "Specifically, we can pass the misformatted output, along with the formatted instructions, to the model and ask it to fix it.\n", + "\n", + "For this example, we'll use the [`StructuredOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html), which can validate output according to a Zod schema. Here's what happens if we pass it a result that does not comply with the schema:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "15283e0b", + "metadata": {}, + "outputs": [ + { + "ename": "Error", + "evalue": "Failed to parse. Text: \"{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}\". Error: SyntaxError: Expected property name or '}' in JSON at position 1 (line 1 column 2)", + "output_type": "error", + "traceback": [ + "Stack trace:", + "Error: Failed to parse. Text: \"{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}\". Error: SyntaxError: Expected property name or '}' in JSON at position 1 (line 1 column 2)", + " at StructuredOutputParser.parse (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.1.63/dist/output_parsers/structured.js:86:19)", + " at :11:14" + ] + } + ], + "source": [ + "import { z } from \"zod\";\n", + "import { RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { StructuredOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const zodSchema = z.object({\n", + " name: z.string().describe(\"name of an actor\"),\n", + " film_names: z.array(z.string()).describe(\"list of names of films they starred in\"),\n", + "});\n", + "\n", + "const parser = StructuredOutputParser.fromZodSchema(zodSchema);\n", + "\n", + "const misformatted = \"{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}\";\n", + "\n", + "await parser.parse(misformatted);" + ] + }, { - "data": { - "text/plain": [ - "{\n", - " name: \u001b[32m\"Tom Hanks\"\u001b[39m,\n", - " film_names: [\n", - " \u001b[32m\"Forrest Gump\"\u001b[39m,\n", - " \u001b[32m\"Saving Private Ryan\"\u001b[39m,\n", - " \u001b[32m\"Cast Away\"\u001b[39m,\n", - " \u001b[32m\"Catch Me If You Can\"\u001b[39m\n", - " ]\n", - "}" + "cell_type": "markdown", + "id": "723c559d", + "metadata": {}, + "source": [ + "Now we can construct and use a `OutputFixingParser`. This output parser takes as an argument another output parser but also an LLM with which to try to correct any formatting mistakes." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "4aaccbf1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " name: \u001b[32m\"Tom Hanks\"\u001b[39m,\n", + " film_names: [\n", + " \u001b[32m\"Forrest Gump\"\u001b[39m,\n", + " \u001b[32m\"Saving Private Ryan\"\u001b[39m,\n", + " \u001b[32m\"Cast Away\"\u001b[39m,\n", + " \u001b[32m\"Catch Me If You Can\"\u001b[39m\n", + " ]\n", + "}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "import { OutputFixingParser } from \"langchain/output_parsers\";\n", + "\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + " maxTokens: 512,\n", + " temperature: 0.1,\n", + "});\n", + "\n", + "const parserWithFix = OutputFixingParser.fromLLM(model, parser);\n", + "\n", + "await parserWithFix.parse(misformatted);" + ] + }, + { + "cell_type": "markdown", + "id": "84498e02", + "metadata": {}, + "source": [ + "For more about different parameters and options, check out our [API reference docs](https://api.js.langchain.com/classes/langchain.output_parsers.OutputFixingParser.html)." ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "import { OutputFixingParser } from \"langchain/output_parsers\";\n", - "\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - " maxTokens: 512,\n", - " temperature: 0.1,\n", - "});\n", - "\n", - "const parserWithFix = OutputFixingParser.fromLLM(model, parser);\n", - "\n", - "await parserWithFix.parse(misformatted);" - ] - }, - { - "cell_type": "markdown", - "id": "84498e02", - "metadata": {}, - "source": [ - "For more about different parameters and options, check out our [API reference docs](https://api.js.langchain.com/classes/langchain.output_parsers.OutputFixingParser.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/output_parser_json.ipynb b/docs/core_docs/docs/how_to/output_parser_json.ipynb index 489852110408..46c9d15f9dcc 100644 --- a/docs/core_docs/docs/how_to/output_parser_json.ipynb +++ b/docs/core_docs/docs/how_to/output_parser_json.ipynb @@ -1,191 +1,191 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "72b1b316", - "metadata": {}, - "source": [ - "# How to parse JSON output\n", - "\n", - "While some model providers support [built-in ways to return structured output](/docs/how_to/structured_output), not all do. We can use an output parser to help users to specify an arbitrary JSON schema via the prompt, query a model for outputs that conform to that schema, and finally parse that schema as JSON.\n", - "\n", - ":::{.callout-note}\n", - "Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed JSON.\n", - ":::\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Output parsers](/docs/concepts/#output-parsers)\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "- [Structured output](/docs/how_to/structured_output)\n", - "- [Chaining runnables together](/docs/how_to/sequence/)\n", - "\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "id": "ae909b7a", - "metadata": {}, - "source": [ - "The [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) is one built-in option for prompting for and then parsing JSON output." - ] - }, - { - "cell_type": "markdown", - "id": "6c667607", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "4ccf45a3", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "{\n", - " setup: \u001b[32m\"Why don't scientists trust atoms?\"\u001b[39m,\n", - " punchline: \u001b[32m\"Because they make up everything!\"\u001b[39m\n", - "}" + "cell_type": "markdown", + "id": "72b1b316", + "metadata": {}, + "source": [ + "# How to parse JSON output\n", + "\n", + "While some model providers support [built-in ways to return structured output](/docs/how_to/structured_output), not all do. We can use an output parser to help users to specify an arbitrary JSON schema via the prompt, query a model for outputs that conform to that schema, and finally parse that schema as JSON.\n", + "\n", + ":::{.callout-note}\n", + "Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed JSON.\n", + ":::\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Output parsers](/docs/concepts/output_parsers)\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "- [Structured output](/docs/how_to/structured_output)\n", + "- [Chaining runnables together](/docs/how_to/sequence/)\n", + "\n", + ":::" ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "const model = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - "})\n", - "\n", - "import { JsonOutputParser } from \"@langchain/core/output_parsers\"\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "// Define your desired data structure. Only used for typing the parser output.\n", - "interface Joke {\n", - " setup: string\n", - " punchline: string\n", - "}\n", - "\n", - "// A query and format instructions used to prompt a language model.\n", - "const jokeQuery = \"Tell me a joke.\";\n", - "const formatInstructions = \"Respond with a valid JSON object, containing two fields: 'setup' and 'punchline'.\"\n", - "\n", - "// Set up a parser + inject instructions into the prompt template.\n", - "const parser = new JsonOutputParser()\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(\n", - " \"Answer the user query.\\n{format_instructions}\\n{query}\\n\"\n", - ");\n", - "\n", - "const partialedPrompt = await prompt.partial({\n", - " format_instructions: formatInstructions\n", - "});\n", - "\n", - "const chain = partialedPrompt.pipe(model).pipe(parser);\n", - "\n", - "await chain.invoke({ query: jokeQuery });" - ] - }, - { - "cell_type": "markdown", - "id": "37d801be", - "metadata": {}, - "source": [ - "## Streaming\n", - "\n", - "The `JsonOutputParser` also supports streaming partial chunks. This is useful when the model returns partial JSON output in multiple chunks. The parser will keep track of the partial chunks and return the final JSON output when the model finishes generating the output." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0309256d", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "id": "ae909b7a", + "metadata": {}, + "source": [ + "The [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) is one built-in option for prompting for and then parsing JSON output." + ] + }, + { + "cell_type": "markdown", + "id": "6c667607", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{}\n", - "{ setup: \"\" }\n", - "{ setup: \"Why\" }\n", - "{ setup: \"Why don't\" }\n", - "{ setup: \"Why don't scientists\" }\n", - "{ setup: \"Why don't scientists trust\" }\n", - "{ setup: \"Why don't scientists trust atoms\" }\n", - "{ setup: \"Why don't scientists trust atoms?\", punchline: \"\" }\n", - "{ setup: \"Why don't scientists trust atoms?\", punchline: \"Because\" }\n", - "{\n", - " setup: \"Why don't scientists trust atoms?\",\n", - " punchline: \"Because they\"\n", - "}\n", - "{\n", - " setup: \"Why don't scientists trust atoms?\",\n", - " punchline: \"Because they make\"\n", - "}\n", - "{\n", - " setup: \"Why don't scientists trust atoms?\",\n", - " punchline: \"Because they make up\"\n", - "}\n", - "{\n", - " setup: \"Why don't scientists trust atoms?\",\n", - " punchline: \"Because they make up everything\"\n", - "}\n", - "{\n", - " setup: \"Why don't scientists trust atoms?\",\n", - " punchline: \"Because they make up everything!\"\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 2, + "id": "4ccf45a3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " setup: \u001b[32m\"Why don't scientists trust atoms?\"\u001b[39m,\n", + " punchline: \u001b[32m\"Because they make up everything!\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "const model = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "})\n", + "\n", + "import { JsonOutputParser } from \"@langchain/core/output_parsers\"\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "// Define your desired data structure. Only used for typing the parser output.\n", + "interface Joke {\n", + " setup: string\n", + " punchline: string\n", + "}\n", + "\n", + "// A query and format instructions used to prompt a language model.\n", + "const jokeQuery = \"Tell me a joke.\";\n", + "const formatInstructions = \"Respond with a valid JSON object, containing two fields: 'setup' and 'punchline'.\"\n", + "\n", + "// Set up a parser + inject instructions into the prompt template.\n", + "const parser = new JsonOutputParser()\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(\n", + " \"Answer the user query.\\n{format_instructions}\\n{query}\\n\"\n", + ");\n", + "\n", + "const partialedPrompt = await prompt.partial({\n", + " format_instructions: formatInstructions\n", + "});\n", + "\n", + "const chain = partialedPrompt.pipe(model).pipe(parser);\n", + "\n", + "await chain.invoke({ query: jokeQuery });" + ] + }, + { + "cell_type": "markdown", + "id": "37d801be", + "metadata": {}, + "source": [ + "## Streaming\n", + "\n", + "The `JsonOutputParser` also supports streaming partial chunks. This is useful when the model returns partial JSON output in multiple chunks. The parser will keep track of the partial chunks and return the final JSON output when the model finishes generating the output." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0309256d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{}\n", + "{ setup: \"\" }\n", + "{ setup: \"Why\" }\n", + "{ setup: \"Why don't\" }\n", + "{ setup: \"Why don't scientists\" }\n", + "{ setup: \"Why don't scientists trust\" }\n", + "{ setup: \"Why don't scientists trust atoms\" }\n", + "{ setup: \"Why don't scientists trust atoms?\", punchline: \"\" }\n", + "{ setup: \"Why don't scientists trust atoms?\", punchline: \"Because\" }\n", + "{\n", + " setup: \"Why don't scientists trust atoms?\",\n", + " punchline: \"Because they\"\n", + "}\n", + "{\n", + " setup: \"Why don't scientists trust atoms?\",\n", + " punchline: \"Because they make\"\n", + "}\n", + "{\n", + " setup: \"Why don't scientists trust atoms?\",\n", + " punchline: \"Because they make up\"\n", + "}\n", + "{\n", + " setup: \"Why don't scientists trust atoms?\",\n", + " punchline: \"Because they make up everything\"\n", + "}\n", + "{\n", + " setup: \"Why don't scientists trust atoms?\",\n", + " punchline: \"Because they make up everything!\"\n", + "}\n" + ] + } + ], + "source": [ + "for await (const s of await chain.stream({ query: jokeQuery })) {\n", + " console.log(s)\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "1eefe12b", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned one way to prompt a model to return structured JSON. Next, check out the [broader guide on obtaining structured output](/docs/how_to/structured_output) for other techniques." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "for await (const s of await chain.stream({ query: jokeQuery })) {\n", - " console.log(s)\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "1eefe12b", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned one way to prompt a model to return structured JSON. Next, check out the [broader guide on obtaining structured output](/docs/how_to/structured_output) for other techniques." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/output_parser_structured.ipynb b/docs/core_docs/docs/how_to/output_parser_structured.ipynb index a6252a3aa1b9..86c7f489bf7d 100644 --- a/docs/core_docs/docs/how_to/output_parser_structured.ipynb +++ b/docs/core_docs/docs/how_to/output_parser_structured.ipynb @@ -1,570 +1,570 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "38831021-76ed-48b3-9f62-d1241a68b6ad", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 3\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "a745f98b-c495-44f6-a882-757c38992d76", - "metadata": {}, - "source": [ - "# How to use output parsers to parse an LLM response into structured format\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Output parsers](/docs/concepts#output-parsers)\n", - "- [Chat models](/docs/concepts#chat-models)\n", - "\n", - ":::\n", - "\n", - "Language models output text. But there are times where you want to get more structured information than just text back. While some model providers support [built-in ways to return structured output](/docs/how_to/structured_output), not all do. For these providers, you must use prompting to encourage the model to return structured data in the desired format.\n", - "\n", - "LangChain has [output parsers](/docs/concepts#output-parsers) which can help parse model outputs into usable objects. We'll go over a few examples below.\n", - "\n", - "## Get started\n", - "\n", - "The primary type of output parser for working with structured data in model responses is the [`StructuredOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html). In the below example, we define a schema for the type of output we expect from the model using [`zod`](https://zod.dev).\n", - "\n", - "First, let's see the default formatting instructions we'll plug into the prompt:" - ] - }, - { - "cell_type": "markdown", - "id": "b62367da", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "1594b2bf-2a6f-47bb-9a81-38930f8e606b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "You must format your output as a JSON value that adheres to a given \"JSON Schema\" instance.\n", - "\n", - "\"JSON Schema\" is a declarative language that allows you to annotate and validate JSON documents.\n", - "\n", - "For example, the example \"JSON Schema\" instance {{\"properties\": {{\"foo\": {{\"description\": \"a list of test words\", \"type\": \"array\", \"items\": {{\"type\": \"string\"}}}}}}, \"required\": [\"foo\"]}}}}\n", - "would match an object with one required property, \"foo\". The \"type\" property specifies \"foo\" must be an \"array\", and the \"description\" property semantically describes it as \"a list of test words\". The items within \"foo\" must be strings.\n", - "Thus, the object {{\"foo\": [\"bar\", \"baz\"]}} is a well-formatted instance of this example \"JSON Schema\". The object {{\"properties\": {{\"foo\": [\"bar\", \"baz\"]}}}} is not well-formatted.\n", - "\n", - "Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas!\n", - "\n", - "Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock:\n", - "```json\n", - "{\"type\":\"object\",\"properties\":{\"answer\":{\"type\":\"string\",\"description\":\"answer to the user's question\"},\"source\":{\"type\":\"string\",\"description\":\"source used to answer the user's question, should be a website.\"}},\"required\":[\"answer\",\"source\"],\"additionalProperties\":false,\"$schema\":\"http://json-schema.org/draft-07/schema#\"}\n", - "```\n", - "\n" - ] - } - ], - "source": [ - "import { z } from \"zod\";\n", - "import { RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { StructuredOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const zodSchema = z.object({\n", - " answer: z.string().describe(\"answer to the user's question\"),\n", - " source: z.string().describe(\"source used to answer the user's question, should be a website.\"),\n", - "})\n", - "\n", - "const parser = StructuredOutputParser.fromZodSchema(zodSchema);\n", - "\n", - "const chain = RunnableSequence.from([\n", - " ChatPromptTemplate.fromTemplate(\n", - " \"Answer the users question as best as possible.\\n{format_instructions}\\n{question}\"\n", - " ),\n", - " model,\n", - " parser,\n", - "]);\n", - "\n", - "console.log(parser.getFormatInstructions());\n" - ] - }, - { - "cell_type": "markdown", - "id": "2bd357c5", - "metadata": {}, - "source": [ - "Next, let's invoke the chain:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "301471a0", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "38831021-76ed-48b3-9f62-d1241a68b6ad", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 3\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " answer: \"The capital of France is Paris.\",\n", - " source: \"https://en.wikipedia.org/wiki/Paris\"\n", - "}\n" - ] - } - ], - "source": [ - "const response = await chain.invoke({\n", - " question: \"What is the capital of France?\",\n", - " format_instructions: parser.getFormatInstructions(),\n", - "});\n", - "\n", - "console.log(response);" - ] - }, - { - "cell_type": "markdown", - "id": "75976cd6-78e2-458b-821f-3ddf3683466b", - "metadata": {}, - "source": [ - "Output parsers implement the [Runnable interface](/docs/how_to/#langchain-expression-language-lcel), the basic building block of [LangChain Expression Language (LCEL)](/docs/how_to/#langchain-expression-language-lcel). This means they support `invoke`, `stream`, `batch`, `streamLog` calls.\n", - "\n", - "## Validation\n", - "\n", - "One feature of the `StructuredOutputParser` is that it supports stricter Zod validations. For example, if you pass a simulated model output that does not conform to the schema, we get a detailed type error:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "475f1ae5", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a745f98b-c495-44f6-a882-757c38992d76", + "metadata": {}, + "source": [ + "# How to use output parsers to parse an LLM response into structured format\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Output parsers](/docs/concepts/output_parsers)\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "\n", + ":::\n", + "\n", + "Language models output text. But there are times where you want to get more structured information than just text back. While some model providers support [built-in ways to return structured output](/docs/how_to/structured_output), not all do. For these providers, you must use prompting to encourage the model to return structured data in the desired format.\n", + "\n", + "LangChain has [output parsers](/docs/concepts/output_parsers) which can help parse model outputs into usable objects. We'll go over a few examples below.\n", + "\n", + "## Get started\n", + "\n", + "The primary type of output parser for working with structured data in model responses is the [`StructuredOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.StructuredOutputParser.html). In the below example, we define a schema for the type of output we expect from the model using [`zod`](https://zod.dev).\n", + "\n", + "First, let's see the default formatting instructions we'll plug into the prompt:" + ] + }, { - "ename": "Error", - "evalue": "Failed to parse. Text: \"{\"badfield\": \"foo\"}\". Error: [\n {\n \"code\": \"invalid_type\",\n \"expected\": \"string\",\n \"received\": \"undefined\",\n \"path\": [\n \"answer\"\n ],\n \"message\": \"Required\"\n },\n {\n \"code\": \"invalid_type\",\n \"expected\": \"string\",\n \"received\": \"undefined\",\n \"path\": [\n \"source\"\n ],\n \"message\": \"Required\"\n }\n]", - "output_type": "error", - "traceback": [ - "Stack trace:", - "Error: Failed to parse. Text: \"{\"badfield\": \"foo\"}\". Error: [", - " {", - " \"code\": \"invalid_type\",", - " \"expected\": \"string\",", - " \"received\": \"undefined\",", - " \"path\": [", - " \"answer\"", - " ],", - " \"message\": \"Required\"", - " },", - " {", - " \"code\": \"invalid_type\",", - " \"expected\": \"string\",", - " \"received\": \"undefined\",", - " \"path\": [", - " \"source\"", - " ],", - " \"message\": \"Required\"", - " }", - "]", - " at StructuredOutputParser.parse (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.1.63/dist/output_parsers/structured.js:86:19)", - " at async StructuredOutputParser._callWithConfig (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.1.63/dist/runnables/base.js:203:22)", - " at async :2:1" - ] - } - ], - "source": [ - "import { AIMessage } from \"@langchain/core/messages\";\n", - "\n", - "await parser.invoke(new AIMessage(`{\"badfield\": \"foo\"}`));" - ] - }, - { - "cell_type": "markdown", - "id": "653a0236", - "metadata": {}, - "source": [ - "Compared to:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "5f39d45e", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "b62367da", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, { - "data": { - "text/plain": [ - "{ answer: \u001b[32m\"Paris\"\u001b[39m, source: \u001b[32m\"I made it up\"\u001b[39m }" + "cell_type": "code", + "execution_count": 1, + "id": "1594b2bf-2a6f-47bb-9a81-38930f8e606b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You must format your output as a JSON value that adheres to a given \"JSON Schema\" instance.\n", + "\n", + "\"JSON Schema\" is a declarative language that allows you to annotate and validate JSON documents.\n", + "\n", + "For example, the example \"JSON Schema\" instance {{\"properties\": {{\"foo\": {{\"description\": \"a list of test words\", \"type\": \"array\", \"items\": {{\"type\": \"string\"}}}}}}, \"required\": [\"foo\"]}}}}\n", + "would match an object with one required property, \"foo\". The \"type\" property specifies \"foo\" must be an \"array\", and the \"description\" property semantically describes it as \"a list of test words\". The items within \"foo\" must be strings.\n", + "Thus, the object {{\"foo\": [\"bar\", \"baz\"]}} is a well-formatted instance of this example \"JSON Schema\". The object {{\"properties\": {{\"foo\": [\"bar\", \"baz\"]}}}} is not well-formatted.\n", + "\n", + "Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas!\n", + "\n", + "Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock:\n", + "```json\n", + "{\"type\":\"object\",\"properties\":{\"answer\":{\"type\":\"string\",\"description\":\"answer to the user's question\"},\"source\":{\"type\":\"string\",\"description\":\"source used to answer the user's question, should be a website.\"}},\"required\":[\"answer\",\"source\"],\"additionalProperties\":false,\"$schema\":\"http://json-schema.org/draft-07/schema#\"}\n", + "```\n", + "\n" + ] + } + ], + "source": [ + "import { z } from \"zod\";\n", + "import { RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { StructuredOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const zodSchema = z.object({\n", + " answer: z.string().describe(\"answer to the user's question\"),\n", + " source: z.string().describe(\"source used to answer the user's question, should be a website.\"),\n", + "})\n", + "\n", + "const parser = StructuredOutputParser.fromZodSchema(zodSchema);\n", + "\n", + "const chain = RunnableSequence.from([\n", + " ChatPromptTemplate.fromTemplate(\n", + " \"Answer the users question as best as possible.\\n{format_instructions}\\n{question}\"\n", + " ),\n", + " model,\n", + " parser,\n", + "]);\n", + "\n", + "console.log(parser.getFormatInstructions());\n" ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await parser.invoke(new AIMessage(`{\"answer\": \"Paris\", \"source\": \"I made it up\"}`));" - ] - }, - { - "cell_type": "markdown", - "id": "d289aa9c", - "metadata": {}, - "source": [ - "More advanced Zod validations are supported as well. To learn more, check out the [Zod documentation](https://zod.dev)." - ] - }, - { - "cell_type": "markdown", - "id": "d88590a0-f36b-4ad5-8a56-d300971a6440", - "metadata": {}, - "source": [ - "## Streaming\n", - "\n", - "While all parsers are runnables and support the streaming interface, only certain parsers can stream through partially parsed objects, since this is highly dependent on the output type. The `StructuredOutputParser` does not support partial streaming because it validates the output at each step. If you try to stream using a chain with this output parser, the chain will simply yield the fully parsed output:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "fac8f714", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " answer: \"The capital of France is Paris.\",\n", - " source: \"https://en.wikipedia.org/wiki/Paris\"\n", - "}\n" - ] - } - ], - "source": [ - "const stream = await chain.stream({\n", - " question: \"What is the capital of France?\",\n", - " format_instructions: parser.getFormatInstructions(),\n", - "});\n", - "\n", - "for await (const s of stream) {\n", - " console.log(s)\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "a3a40f19", - "metadata": {}, - "source": [ - "The simpler [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html), however, supports streaming through partial outputs:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "d7ecfe4d-dae8-4452-98ea-e48bdc498788", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "2bd357c5", + "metadata": {}, + "source": [ + "Next, let's invoke the chain:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{}\n", - "{ answer: \"\" }\n", - "{ answer: \"The\" }\n", - "{ answer: \"The invention\" }\n", - "{ answer: \"The invention of\" }\n", - "{ answer: \"The invention of the\" }\n", - "{ answer: \"The invention of the microscope\" }\n", - "{ answer: \"The invention of the microscope is\" }\n", - "{ answer: \"The invention of the microscope is attributed\" }\n", - "{ answer: \"The invention of the microscope is attributed to\" }\n", - "{ answer: \"The invention of the microscope is attributed to Hans\" }\n", - "{ answer: \"The invention of the microscope is attributed to Hans L\" }\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippers\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey,\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zach\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Jans\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen,\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Anton\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 4 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 8 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 12 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 13 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 18 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 20 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 26 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 29 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 33 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 38 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 43 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 48 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 51 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 52 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 57 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 63 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 73 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 80 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 81 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 85 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 94 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 99 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 108 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 112 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 118 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 127 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 138 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 145 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 149 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 150 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 151 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 157 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 159 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 163 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 167 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 171 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 175 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 176 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 181 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 186 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 190 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 202 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 203 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 209 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 214 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 226 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 239 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 242 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 246 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 253 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 257 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 262 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 265 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 268 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 273 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 288 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 300 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 303 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 311 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 316 more characters\n", - "}\n", - "{\n", - " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 317 more characters\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 2, + "id": "301471a0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " answer: \"The capital of France is Paris.\",\n", + " source: \"https://en.wikipedia.org/wiki/Paris\"\n", + "}\n" + ] + } + ], + "source": [ + "const response = await chain.invoke({\n", + " question: \"What is the capital of France?\",\n", + " format_instructions: parser.getFormatInstructions(),\n", + "});\n", + "\n", + "console.log(response);" + ] + }, + { + "cell_type": "markdown", + "id": "75976cd6-78e2-458b-821f-3ddf3683466b", + "metadata": {}, + "source": [ + "Output parsers implement the [Runnable interface](/docs/how_to/#langchain-expression-language-lcel), the basic building block of [LangChain Expression Language (LCEL)](/docs/how_to/#langchain-expression-language-lcel). This means they support `invoke`, `stream`, `batch`, `streamLog` calls.\n", + "\n", + "## Validation\n", + "\n", + "One feature of the `StructuredOutputParser` is that it supports stricter Zod validations. For example, if you pass a simulated model output that does not conform to the schema, we get a detailed type error:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "475f1ae5", + "metadata": {}, + "outputs": [ + { + "ename": "Error", + "evalue": "Failed to parse. Text: \"{\"badfield\": \"foo\"}\". Error: [\n {\n \"code\": \"invalid_type\",\n \"expected\": \"string\",\n \"received\": \"undefined\",\n \"path\": [\n \"answer\"\n ],\n \"message\": \"Required\"\n },\n {\n \"code\": \"invalid_type\",\n \"expected\": \"string\",\n \"received\": \"undefined\",\n \"path\": [\n \"source\"\n ],\n \"message\": \"Required\"\n }\n]", + "output_type": "error", + "traceback": [ + "Stack trace:", + "Error: Failed to parse. Text: \"{\"badfield\": \"foo\"}\". Error: [", + " {", + " \"code\": \"invalid_type\",", + " \"expected\": \"string\",", + " \"received\": \"undefined\",", + " \"path\": [", + " \"answer\"", + " ],", + " \"message\": \"Required\"", + " },", + " {", + " \"code\": \"invalid_type\",", + " \"expected\": \"string\",", + " \"received\": \"undefined\",", + " \"path\": [", + " \"source\"", + " ],", + " \"message\": \"Required\"", + " }", + "]", + " at StructuredOutputParser.parse (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.1.63/dist/output_parsers/structured.js:86:19)", + " at async StructuredOutputParser._callWithConfig (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.1.63/dist/runnables/base.js:203:22)", + " at async :2:1" + ] + } + ], + "source": [ + "import { AIMessage } from \"@langchain/core/messages\";\n", + "\n", + "await parser.invoke(new AIMessage(`{\"badfield\": \"foo\"}`));" + ] + }, + { + "cell_type": "markdown", + "id": "653a0236", + "metadata": {}, + "source": [ + "Compared to:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5f39d45e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{ answer: \u001b[32m\"Paris\"\u001b[39m, source: \u001b[32m\"I made it up\"\u001b[39m }" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await parser.invoke(new AIMessage(`{\"answer\": \"Paris\", \"source\": \"I made it up\"}`));" + ] + }, + { + "cell_type": "markdown", + "id": "d289aa9c", + "metadata": {}, + "source": [ + "More advanced Zod validations are supported as well. To learn more, check out the [Zod documentation](https://zod.dev)." + ] + }, + { + "cell_type": "markdown", + "id": "d88590a0-f36b-4ad5-8a56-d300971a6440", + "metadata": {}, + "source": [ + "## Streaming\n", + "\n", + "While all parsers are runnables and support the streaming interface, only certain parsers can stream through partially parsed objects, since this is highly dependent on the output type. The `StructuredOutputParser` does not support partial streaming because it validates the output at each step. If you try to stream using a chain with this output parser, the chain will simply yield the fully parsed output:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "fac8f714", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " answer: \"The capital of France is Paris.\",\n", + " source: \"https://en.wikipedia.org/wiki/Paris\"\n", + "}\n" + ] + } + ], + "source": [ + "const stream = await chain.stream({\n", + " question: \"What is the capital of France?\",\n", + " format_instructions: parser.getFormatInstructions(),\n", + "});\n", + "\n", + "for await (const s of stream) {\n", + " console.log(s)\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "a3a40f19", + "metadata": {}, + "source": [ + "The simpler [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html), however, supports streaming through partial outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "d7ecfe4d-dae8-4452-98ea-e48bdc498788", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{}\n", + "{ answer: \"\" }\n", + "{ answer: \"The\" }\n", + "{ answer: \"The invention\" }\n", + "{ answer: \"The invention of\" }\n", + "{ answer: \"The invention of the\" }\n", + "{ answer: \"The invention of the microscope\" }\n", + "{ answer: \"The invention of the microscope is\" }\n", + "{ answer: \"The invention of the microscope is attributed\" }\n", + "{ answer: \"The invention of the microscope is attributed to\" }\n", + "{ answer: \"The invention of the microscope is attributed to Hans\" }\n", + "{ answer: \"The invention of the microscope is attributed to Hans L\" }\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippers\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey,\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zach\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Jans\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen,\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Anton\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 4 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 8 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 12 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 13 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 18 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 20 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 26 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 29 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 33 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 38 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 43 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 48 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 51 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 52 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 57 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 63 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 73 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 80 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 81 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 85 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 94 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 99 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 108 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 112 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 118 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 127 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 138 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 145 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 149 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 150 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 151 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 157 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 159 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 163 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 167 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 171 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 175 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 176 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 181 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 186 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 190 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 202 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 203 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 209 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 214 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 226 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 239 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 242 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 246 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 253 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 257 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 262 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 265 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 268 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 273 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 288 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 300 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 303 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 311 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 316 more characters\n", + "}\n", + "{\n", + " answer: \"The invention of the microscope is attributed to Hans Lippershey, Zacharias Janssen, and Antonie van\"... 317 more characters\n", + "}\n" + ] + } + ], + "source": [ + "import { JsonOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "const template = `Return a JSON object with a single key named \"answer\" that answers the following question: {question}.\n", + "Do not wrap the JSON output in markdown blocks.`\n", + "\n", + "const jsonPrompt = ChatPromptTemplate.fromTemplate(template);\n", + "const jsonParser = new JsonOutputParser();\n", + "const jsonChain = jsonPrompt.pipe(model).pipe(jsonParser);\n", + "\n", + "const stream = await jsonChain.stream({\n", + " question: \"Who invented the microscope?\",\n", + "});\n", + "\n", + "for await (const s of stream) {\n", + " console.log(s)\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "605b8dd1", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've learned about using output parsers to parse structured outputs from prompted model outputs.\n", + "\n", + "Next, check out the [guide on tool calling](/docs/how_to/tool_calling), a more built-in way of obtaining structured output that some model providers support, or read more about output parsers for other types of structured data like [XML](/docs/how_to/output_parser_xml)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "import { JsonOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "const template = `Return a JSON object with a single key named \"answer\" that answers the following question: {question}.\n", - "Do not wrap the JSON output in markdown blocks.`\n", - "\n", - "const jsonPrompt = ChatPromptTemplate.fromTemplate(template);\n", - "const jsonParser = new JsonOutputParser();\n", - "const jsonChain = jsonPrompt.pipe(model).pipe(jsonParser);\n", - "\n", - "const stream = await jsonChain.stream({\n", - " question: \"Who invented the microscope?\",\n", - "});\n", - "\n", - "for await (const s of stream) {\n", - " console.log(s)\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "605b8dd1", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've learned about using output parsers to parse structured outputs from prompted model outputs.\n", - "\n", - "Next, check out the [guide on tool calling](/docs/how_to/tool_calling), a more built-in way of obtaining structured output that some model providers support, or read more about output parsers for other types of structured data like [XML](/docs/how_to/output_parser_xml)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/output_parser_xml.ipynb b/docs/core_docs/docs/how_to/output_parser_xml.ipynb index 0a05ae3080f3..c665d4ab97b5 100644 --- a/docs/core_docs/docs/how_to/output_parser_xml.ipynb +++ b/docs/core_docs/docs/how_to/output_parser_xml.ipynb @@ -1,403 +1,403 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "181b5b6d", - "metadata": {}, - "source": [ - "# How to parse XML output\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Output parsers](/docs/concepts/#output-parsers)\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "- [Structured output](/docs/how_to/structured_output)\n", - "- [Chaining runnables together](/docs/how_to/sequence/)\n", - "\n", - ":::\n", - "\n", - "LLMs from different providers often have different strengths depending on the specific data they are trianed on. This also means that some may be \"better\" and more reliable at generating output in formats other than JSON.\n", - "\n", - "This guide shows you how to use the [`XMLOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.XMLOutputParser.html) to prompt models for XML output, then and parse that output into a usable format.\n", - "\n", - ":::{.callout-note}\n", - "Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed XML.\n", - ":::\n", - "\n", - "In the following examples, we use Anthropic's Claude (https://docs.anthropic.com/claude/docs), which is one such model that is optimized for XML tags.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/anthropic @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "da312f86-0d2a-4aef-a09d-1e72bd0ea9b1", - "metadata": {}, - "source": [ - "Let's start with a simple request to the model." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "b03785af-69fc-40a1-a1be-c04ed6fade70", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Here is the shortened filmography for Tom Hanks, with movies enclosed in \"movie\" tags:\n", - "\n", - "Forrest Gump\n", - "Saving Private Ryan\n", - "Cast Away\n", - "Apollo 13\n", - "Catch Me If You Can\n", - "The Green Mile\n", - "Toy Story\n", - "Toy Story 2\n", - "Toy Story 3\n", - "Toy Story 4\n", - "Philadelphia\n", - "Big\n", - "Sleepless in Seattle\n", - "You've Got Mail\n", - "The Terminal\n" - ] - } - ], - "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - " maxTokens: 512,\n", - " temperature: 0.1,\n", - "});\n", - "\n", - "const query = `Generate the shortened filmograph for Tom Hanks.`;\n", - "\n", - "const result = await model.invoke(query + ` Please enclose the movies in \"movie\" tags.`);\n", - "\n", - "console.log(result.content);" - ] - }, - { - "cell_type": "markdown", - "id": "4db65781-3d54-4ba6-ae26-5b4ead47a4c8", - "metadata": {}, - "source": [ - "This actually worked pretty well! But it would be nice to parse that XML into a more easily usable format. We can use the `XMLOutputParser` to both add default format instructions to the prompt and parse outputted XML into a dict:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "6917e057", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "181b5b6d", + "metadata": {}, + "source": [ + "# How to parse XML output\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Output parsers](/docs/concepts/output_parsers)\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "- [Structured output](/docs/how_to/structured_output)\n", + "- [Chaining runnables together](/docs/how_to/sequence/)\n", + "\n", + ":::\n", + "\n", + "LLMs from different providers often have different strengths depending on the specific data they are trianed on. This also means that some may be \"better\" and more reliable at generating output in formats other than JSON.\n", + "\n", + "This guide shows you how to use the [`XMLOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.XMLOutputParser.html) to prompt models for XML output, then and parse that output into a usable format.\n", + "\n", + ":::{.callout-note}\n", + "Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed XML.\n", + ":::\n", + "\n", + "In the following examples, we use Anthropic's Claude (https://docs.anthropic.com/claude/docs), which is one such model that is optimized for XML tags.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/anthropic @langchain/core\n", + "\n", + "```" + ] + }, { - "data": { - "text/plain": [ - "\u001b[32m\"The output should be formatted as a XML file.\\n\"\u001b[39m +\n", - " \u001b[32m\"1. Output should conform to the tags below. \\n\"\u001b[39m +\n", - " \u001b[32m\"2. If tag\"\u001b[39m... 434 more characters" + "cell_type": "markdown", + "id": "da312f86-0d2a-4aef-a09d-1e72bd0ea9b1", + "metadata": {}, + "source": [ + "Let's start with a simple request to the model." ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { XMLOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "// We will add these instructions to the prompt below\n", - "const parser = new XMLOutputParser();\n", - "\n", - "parser.getFormatInstructions();" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "87ba8d11", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"filmography\": [\n", - " {\n", - " \"actor\": [\n", - " {\n", - " \"name\": \"Tom Hanks\"\n", - " },\n", - " {\n", - " \"films\": [\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"title\": \"Forrest Gump\"\n", - " },\n", - " {\n", - " \"year\": \"1994\"\n", - " },\n", - " {\n", - " \"role\": \"Forrest Gump\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"title\": \"Saving Private Ryan\"\n", - " },\n", - " {\n", - " \"year\": \"1998\"\n", - " },\n", - " {\n", - " \"role\": \"Captain Miller\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"title\": \"Cast Away\"\n", - " },\n", - " {\n", - " \"year\": \"2000\"\n", - " },\n", - " {\n", - " \"role\": \"Chuck Noland\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"title\": \"Catch Me If You Can\"\n", - " },\n", - " {\n", - " \"year\": \"2002\"\n", - " },\n", - " {\n", - " \"role\": \"Carl Hanratty\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"title\": \"The Terminal\"\n", - " },\n", - " {\n", - " \"year\": \"2004\"\n", - " },\n", - " {\n", - " \"role\": \"Viktor Navorski\"\n", - " }\n", - " ]\n", - " }\n", - " ]\n", - " }\n", - " ]\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`{query}\\n{format_instructions}`);\n", - "const partialedPrompt = await prompt.partial({\n", - " format_instructions: parser.getFormatInstructions(),\n", - "});\n", - "\n", - "const chain = partialedPrompt.pipe(model).pipe(parser);\n", - "\n", - "const output = await chain.invoke({\n", - " query: \"Generate the shortened filmograph for Tom Hanks.\",\n", - "});\n", - "\n", - "console.log(JSON.stringify(output, null, 2));" - ] - }, - { - "cell_type": "markdown", - "id": "327f5479-77e0-4549-8393-2cd7a286d491", - "metadata": {}, - "source": [ - "You'll notice above that our output is no longer just between `movie` tags. We can also add some tags to tailor the output to our needs:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "4af50494", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "id": "b03785af-69fc-40a1-a1be-c04ed6fade70", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here is the shortened filmography for Tom Hanks, with movies enclosed in \"movie\" tags:\n", + "\n", + "Forrest Gump\n", + "Saving Private Ryan\n", + "Cast Away\n", + "Apollo 13\n", + "Catch Me If You Can\n", + "The Green Mile\n", + "Toy Story\n", + "Toy Story 2\n", + "Toy Story 3\n", + "Toy Story 4\n", + "Philadelphia\n", + "Big\n", + "Sleepless in Seattle\n", + "You've Got Mail\n", + "The Terminal\n" + ] + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + " maxTokens: 512,\n", + " temperature: 0.1,\n", + "});\n", + "\n", + "const query = `Generate the shortened filmograph for Tom Hanks.`;\n", + "\n", + "const result = await model.invoke(query + ` Please enclose the movies in \"movie\" tags.`);\n", + "\n", + "console.log(result.content);" + ] + }, { - "data": { - "text/plain": [ - "\u001b[32m\"The output should be formatted as a XML file.\\n\"\u001b[39m +\n", - " \u001b[32m\"1. Output should conform to the tags below. \\n\"\u001b[39m +\n", - " \u001b[32m\"2. If tag\"\u001b[39m... 460 more characters" + "cell_type": "markdown", + "id": "4db65781-3d54-4ba6-ae26-5b4ead47a4c8", + "metadata": {}, + "source": [ + "This actually worked pretty well! But it would be nice to parse that XML into a more easily usable format. We can use the `XMLOutputParser` to both add default format instructions to the prompt and parse outputted XML into a dict:" ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const parserWithTags = new XMLOutputParser({ tags: [\"movies\", \"actor\", \"film\", \"name\", \"genre\"] });\n", - "\n", - "// We will add these instructions to the prompt below\n", - "parserWithTags.getFormatInstructions();" - ] - }, - { - "cell_type": "markdown", - "id": "6563ca36", - "metadata": {}, - "source": [ - "You can and should experiment with adding your own formatting hints in the other parts of your prompt to either augment or replace the default instructions.\n", - "\n", - "Here's the result when we invoke it:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "b722a235", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"movies\": [\n", - " {\n", - " \"actor\": [\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"name\": \"Forrest Gump\"\n", - " },\n", - " {\n", - " \"genre\": \"Drama\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"name\": \"Saving Private Ryan\"\n", - " },\n", - " {\n", - " \"genre\": \"War\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"name\": \"Cast Away\"\n", - " },\n", - " {\n", - " \"genre\": \"Drama\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"name\": \"Catch Me If You Can\"\n", - " },\n", - " {\n", - " \"genre\": \"Biography\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"film\": [\n", - " {\n", - " \"name\": \"The Terminal\"\n", - " },\n", - " {\n", - " \"genre\": \"Comedy-drama\"\n", - " }\n", - " ]\n", - " }\n", - " ]\n", - " }\n", - " ]\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 4, + "id": "6917e057", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"The output should be formatted as a XML file.\\n\"\u001b[39m +\n", + " \u001b[32m\"1. Output should conform to the tags below. \\n\"\u001b[39m +\n", + " \u001b[32m\"2. If tag\"\u001b[39m... 434 more characters" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { XMLOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "// We will add these instructions to the prompt below\n", + "const parser = new XMLOutputParser();\n", + "\n", + "parser.getFormatInstructions();" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "87ba8d11", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"filmography\": [\n", + " {\n", + " \"actor\": [\n", + " {\n", + " \"name\": \"Tom Hanks\"\n", + " },\n", + " {\n", + " \"films\": [\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"title\": \"Forrest Gump\"\n", + " },\n", + " {\n", + " \"year\": \"1994\"\n", + " },\n", + " {\n", + " \"role\": \"Forrest Gump\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"title\": \"Saving Private Ryan\"\n", + " },\n", + " {\n", + " \"year\": \"1998\"\n", + " },\n", + " {\n", + " \"role\": \"Captain Miller\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"title\": \"Cast Away\"\n", + " },\n", + " {\n", + " \"year\": \"2000\"\n", + " },\n", + " {\n", + " \"role\": \"Chuck Noland\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"title\": \"Catch Me If You Can\"\n", + " },\n", + " {\n", + " \"year\": \"2002\"\n", + " },\n", + " {\n", + " \"role\": \"Carl Hanratty\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"title\": \"The Terminal\"\n", + " },\n", + " {\n", + " \"year\": \"2004\"\n", + " },\n", + " {\n", + " \"role\": \"Viktor Navorski\"\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`{query}\\n{format_instructions}`);\n", + "const partialedPrompt = await prompt.partial({\n", + " format_instructions: parser.getFormatInstructions(),\n", + "});\n", + "\n", + "const chain = partialedPrompt.pipe(model).pipe(parser);\n", + "\n", + "const output = await chain.invoke({\n", + " query: \"Generate the shortened filmograph for Tom Hanks.\",\n", + "});\n", + "\n", + "console.log(JSON.stringify(output, null, 2));" + ] + }, + { + "cell_type": "markdown", + "id": "327f5479-77e0-4549-8393-2cd7a286d491", + "metadata": {}, + "source": [ + "You'll notice above that our output is no longer just between `movie` tags. We can also add some tags to tailor the output to our needs:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "4af50494", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"The output should be formatted as a XML file.\\n\"\u001b[39m +\n", + " \u001b[32m\"1. Output should conform to the tags below. \\n\"\u001b[39m +\n", + " \u001b[32m\"2. If tag\"\u001b[39m... 460 more characters" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const parserWithTags = new XMLOutputParser({ tags: [\"movies\", \"actor\", \"film\", \"name\", \"genre\"] });\n", + "\n", + "// We will add these instructions to the prompt below\n", + "parserWithTags.getFormatInstructions();" + ] + }, + { + "cell_type": "markdown", + "id": "6563ca36", + "metadata": {}, + "source": [ + "You can and should experiment with adding your own formatting hints in the other parts of your prompt to either augment or replace the default instructions.\n", + "\n", + "Here's the result when we invoke it:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "b722a235", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"movies\": [\n", + " {\n", + " \"actor\": [\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"name\": \"Forrest Gump\"\n", + " },\n", + " {\n", + " \"genre\": \"Drama\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"name\": \"Saving Private Ryan\"\n", + " },\n", + " {\n", + " \"genre\": \"War\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"name\": \"Cast Away\"\n", + " },\n", + " {\n", + " \"genre\": \"Drama\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"name\": \"Catch Me If You Can\"\n", + " },\n", + " {\n", + " \"genre\": \"Biography\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"film\": [\n", + " {\n", + " \"name\": \"The Terminal\"\n", + " },\n", + " {\n", + " \"genre\": \"Comedy-drama\"\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const promptWithTags = ChatPromptTemplate.fromTemplate(`{query}\\n{format_instructions}`);\n", + "const partialedPromptWithTags = await promptWithTags.partial({\n", + " format_instructions: parserWithTags.getFormatInstructions(),\n", + "});\n", + "\n", + "const chainWithTags = partialedPromptWithTags.pipe(model).pipe(parserWithTags);\n", + "\n", + "const outputWithTags = await chainWithTags.invoke({\n", + " query: \"Generate the shortened filmograph for Tom Hanks.\",\n", + "});\n", + "\n", + "console.log(JSON.stringify(outputWithTags, null, 2));" + ] + }, + { + "cell_type": "markdown", + "id": "6902fe6f", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned how to prompt a model to return XML. Next, check out the [broader guide on obtaining structured output](/docs/how_to/structured_output) for other related techniques." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const promptWithTags = ChatPromptTemplate.fromTemplate(`{query}\\n{format_instructions}`);\n", - "const partialedPromptWithTags = await promptWithTags.partial({\n", - " format_instructions: parserWithTags.getFormatInstructions(),\n", - "});\n", - "\n", - "const chainWithTags = partialedPromptWithTags.pipe(model).pipe(parserWithTags);\n", - "\n", - "const outputWithTags = await chainWithTags.invoke({\n", - " query: \"Generate the shortened filmograph for Tom Hanks.\",\n", - "});\n", - "\n", - "console.log(JSON.stringify(outputWithTags, null, 2));" - ] - }, - { - "cell_type": "markdown", - "id": "6902fe6f", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned how to prompt a model to return XML. Next, check out the [broader guide on obtaining structured output](/docs/how_to/structured_output) for other related techniques." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/parallel.mdx b/docs/core_docs/docs/how_to/parallel.mdx index 4ba2c4117c7b..cd7cb75b42e5 100644 --- a/docs/core_docs/docs/how_to/parallel.mdx +++ b/docs/core_docs/docs/how_to/parallel.mdx @@ -4,7 +4,7 @@ This guide assumes familiarity with the following concepts: -- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language) +- [LangChain Expression Language (LCEL)](/docs/concepts/lcel) - [Chaining runnables](/docs/how_to/sequence/) ::: diff --git a/docs/core_docs/docs/how_to/parent_document_retriever.mdx b/docs/core_docs/docs/how_to/parent_document_retriever.mdx index e5671e7dd3bc..2b99b857b30f 100644 --- a/docs/core_docs/docs/how_to/parent_document_retriever.mdx +++ b/docs/core_docs/docs/how_to/parent_document_retriever.mdx @@ -10,8 +10,8 @@ import ExampleWithRerank from "@examples/retrievers/parent_document_retriever_re This guide assumes familiarity with the following concepts: -- [Retrievers](/docs/concepts/#retrievers) -- [Text splitters](/docs/concepts/#text-splitters) +- [Retrievers](/docs/concepts/retrievers) +- [Text splitters](/docs/concepts/text_splitters) - [Retrieval-augmented generation (RAG)](/docs/tutorials/rag) ::: diff --git a/docs/core_docs/docs/how_to/passthrough.ipynb b/docs/core_docs/docs/how_to/passthrough.ipynb index 1a59ca6885b8..2c215a8da943 100644 --- a/docs/core_docs/docs/how_to/passthrough.ipynb +++ b/docs/core_docs/docs/how_to/passthrough.ipynb @@ -1,182 +1,182 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "d35de667-0352-4bfb-a890-cebe7f676fe7", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 5\n", - "keywords: [RunnablePassthrough, LCEL]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "b022ab74-794d-4c54-ad47-ff9549ddb9d2", - "metadata": {}, - "source": [ - "# How to pass through arguments from one step to the next\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n", - "- [Chaining runnables](/docs/how_to/sequence/)\n", - "- [Calling runnables in parallel](/docs/how_to/parallel/)\n", - "- [Custom functions](/docs/how_to/functions/)\n", - "\n", - ":::\n", - "\n", - "\n", - "When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n", - "\n", - "Let's look at an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "03988b8d-d54c-4492-8707-1594372cf093", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "{ passed: { num: \u001b[33m1\u001b[39m }, modified: \u001b[33m2\u001b[39m }" + "cell_type": "raw", + "id": "d35de667-0352-4bfb-a890-cebe7f676fe7", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 5\n", + "keywords: [RunnablePassthrough, LCEL]\n", + "---" ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { RunnableParallel, RunnablePassthrough } from \"@langchain/core/runnables\";\n", - "\n", - "const runnable = RunnableParallel.from({\n", - " passed: new RunnablePassthrough<{ num: number }>(),\n", - " modified: (input: { num: number }) => input.num + 1,\n", - "});\n", - "\n", - "await runnable.invoke({ num: 1 });" - ] - }, - { - "cell_type": "markdown", - "id": "702c7acc-cd31-4037-9489-647df192fd7c", - "metadata": {}, - "source": [ - "As seen above, `passed` key was called with `RunnablePassthrough()` and so it simply passed on `{'num': 1}`. \n", - "\n", - "We also set a second key in the map with `modified`. This uses a lambda to set a single value adding 1 to the num, which resulted in `modified` key with the value of `2`." - ] - }, - { - "cell_type": "markdown", - "id": "15187a3b-d666-4b9b-a258-672fc51fe0e2", - "metadata": {}, - "source": [ - "## Retrieval Example\n", - "\n", - "In the example below, we see a more real-world use case where we use `RunnablePassthrough` along with `RunnableParallel` in a chain to properly format inputs to a prompt:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "267d1460-53c1-4fdb-b2c3-b6a1eb7fccff", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "id": "b022ab74-794d-4c54-ad47-ff9549ddb9d2", + "metadata": {}, + "source": [ + "# How to pass through arguments from one step to the next\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n", + "- [Chaining runnables](/docs/how_to/sequence/)\n", + "- [Calling runnables in parallel](/docs/how_to/parallel/)\n", + "- [Custom functions](/docs/how_to/functions/)\n", + "\n", + ":::\n", + "\n", + "\n", + "When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n", + "\n", + "Let's look at an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "03988b8d-d54c-4492-8707-1594372cf093", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{ passed: { num: \u001b[33m1\u001b[39m }, modified: \u001b[33m2\u001b[39m }" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { RunnableParallel, RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "\n", + "const runnable = RunnableParallel.from({\n", + " passed: new RunnablePassthrough<{ num: number }>(),\n", + " modified: (input: { num: number }) => input.num + 1,\n", + "});\n", + "\n", + "await runnable.invoke({ num: 1 });" + ] + }, + { + "cell_type": "markdown", + "id": "702c7acc-cd31-4037-9489-647df192fd7c", + "metadata": {}, + "source": [ + "As seen above, `passed` key was called with `RunnablePassthrough()` and so it simply passed on `{'num': 1}`. \n", + "\n", + "We also set a second key in the map with `modified`. This uses a lambda to set a single value adding 1 to the num, which resulted in `modified` key with the value of `2`." + ] + }, + { + "cell_type": "markdown", + "id": "15187a3b-d666-4b9b-a258-672fc51fe0e2", + "metadata": {}, + "source": [ + "## Retrieval Example\n", + "\n", + "In the example below, we see a more real-world use case where we use `RunnablePassthrough` along with `RunnableParallel` in a chain to properly format inputs to a prompt:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "```" + ] + }, { - "data": { - "text/plain": [ - "\u001b[32m\"Harrison worked at Kensho.\"\u001b[39m" + "cell_type": "code", + "execution_count": 3, + "id": "267d1460-53c1-4fdb-b2c3-b6a1eb7fccff", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Harrison worked at Kensho.\"\u001b[39m" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { ChatOpenAI, OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments([\n", + " { pageContent: \"harrison worked at kensho\", metadata: {} }\n", + "], new OpenAIEmbeddings());\n", + "\n", + "const retriever = vectorstore.asRetriever();\n", + "\n", + "const template = `Answer the question based only on the following context:\n", + "{context}\n", + "\n", + "Question: {question}\n", + "`;\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(template);\n", + "\n", + "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + "const retrievalChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe((docs) => docs[0].pageContent),\n", + " question: new RunnablePassthrough()\n", + " },\n", + " prompt,\n", + " model,\n", + " new StringOutputParser(),\n", + "]);\n", + "\n", + "await retrievalChain.invoke(\"where did harrison work?\");" + ] + }, + { + "cell_type": "markdown", + "id": "392cd4c4-e7ed-4ab8-934d-f7a4eca55ee1", + "metadata": {}, + "source": [ + "Here the input to prompt is expected to be a map with keys `\"context\"` and `\"question\"`. The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the `\"question\"` key. The `RunnablePassthrough` allows us to pass on the user's question to the prompt and model.\n", + "\n", + "## Next steps\n", + "\n", + "Now you've learned how to pass data through your chains to help to help format the data flowing through your chains.\n", + "\n", + "To learn more, see the other how-to guides on runnables in this section." ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { ChatOpenAI, OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments([\n", - " { pageContent: \"harrison worked at kensho\", metadata: {} }\n", - "], new OpenAIEmbeddings());\n", - "\n", - "const retriever = vectorstore.asRetriever();\n", - "\n", - "const template = `Answer the question based only on the following context:\n", - "{context}\n", - "\n", - "Question: {question}\n", - "`;\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(template);\n", - "\n", - "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n", - "\n", - "const retrievalChain = RunnableSequence.from([\n", - " {\n", - " context: retriever.pipe((docs) => docs[0].pageContent),\n", - " question: new RunnablePassthrough()\n", - " },\n", - " prompt,\n", - " model,\n", - " new StringOutputParser(),\n", - "]);\n", - "\n", - "await retrievalChain.invoke(\"where did harrison work?\");" - ] - }, - { - "cell_type": "markdown", - "id": "392cd4c4-e7ed-4ab8-934d-f7a4eca55ee1", - "metadata": {}, - "source": [ - "Here the input to prompt is expected to be a map with keys `\"context\"` and `\"question\"`. The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the `\"question\"` key. The `RunnablePassthrough` allows us to pass on the user's question to the prompt and model.\n", - "\n", - "## Next steps\n", - "\n", - "Now you've learned how to pass data through your chains to help to help format the data flowing through your chains.\n", - "\n", - "To learn more, see the other how-to guides on runnables in this section." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/prompts_composition.ipynb b/docs/core_docs/docs/how_to/prompts_composition.ipynb index b821657aea45..d1794282189c 100644 --- a/docs/core_docs/docs/how_to/prompts_composition.ipynb +++ b/docs/core_docs/docs/how_to/prompts_composition.ipynb @@ -1,343 +1,343 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "02a1c8fb", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 5\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "4de4e022", - "metadata": {}, - "source": [ - "# How to compose prompts together\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "\n", - ":::\n", - "\n", - "LangChain provides a user friendly interface for composing different parts of prompts together. You can do this with either string prompts or chat prompts. Constructing prompts this way allows for easy reuse of components." - ] - }, - { - "cell_type": "markdown", - "id": "c3190650", - "metadata": {}, - "source": [ - "## String prompt composition\n", - "\n", - "When working with string prompts, each template is joined together. You can work with either prompts directly or strings (the first element in the list needs to be a prompt)." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "69b17f05", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "PromptTemplate {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " inputVariables: [ \u001b[32m\"topic\"\u001b[39m, \u001b[32m\"language\"\u001b[39m ],\n", - " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", - " template: \u001b[32m\"Tell me a joke about {topic}, make it funny and in {language}\"\u001b[39m\n", - " },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"prompt\"\u001b[39m ],\n", - " inputVariables: [ \u001b[32m\"topic\"\u001b[39m, \u001b[32m\"language\"\u001b[39m ],\n", - " outputParser: \u001b[90mundefined\u001b[39m,\n", - " partialVariables: \u001b[90mundefined\u001b[39m,\n", - " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", - " template: \u001b[32m\"Tell me a joke about {topic}, make it funny and in {language}\"\u001b[39m,\n", - " validateTemplate: \u001b[33mtrue\u001b[39m\n", - "}" + "cell_type": "raw", + "id": "02a1c8fb", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 5\n", + "---" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(`Tell me a joke about {topic}, make it funny and in {language}`)\n", - "\n", - "prompt" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "dbba24ba", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "\u001b[32m\"Tell me a joke about sports, make it funny and in spanish\"\u001b[39m" + "cell_type": "markdown", + "id": "4de4e022", + "metadata": {}, + "source": [ + "# How to compose prompts together\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "\n", + ":::\n", + "\n", + "LangChain provides a user friendly interface for composing different parts of prompts together. You can do this with either string prompts or chat prompts. Constructing prompts this way allows for easy reuse of components." ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await prompt.format({ topic: \"sports\", language: \"spanish\" })" - ] - }, - { - "cell_type": "markdown", - "id": "4e4f6a8a", - "metadata": {}, - "source": [ - "## Chat prompt composition" - ] - }, - { - "cell_type": "markdown", - "id": "8554bae5", - "metadata": {}, - "source": [ - "A chat prompt is made up a of a list of messages. Similarly to the above example, we can concatenate chat prompt templates. Each new element is a new message in the final prompt.\n", - "\n", - "First, let's initialize the a [`ChatPromptTemplate`](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) with a [`SystemMessage`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html)." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "cab8dd65", - "metadata": {}, - "outputs": [], - "source": [ - "import { AIMessage, HumanMessage, SystemMessage} from \"@langchain/core/messages\"\n", - "\n", - "const prompt = new SystemMessage(\"You are a nice pirate\")" - ] - }, - { - "cell_type": "markdown", - "id": "30656ef8", - "metadata": {}, - "source": [ - "You can then easily create a pipeline combining it with other messages *or* message templates.\n", - "Use a `BaseMessage` when there are no variables to be formatted, use a `MessageTemplate` when there are variables to be formatted. You can also use just a string (note: this will automatically get inferred as a [`HumanMessagePromptTemplate`](https://api.js.langchain.com/classes/langchain_core.prompts.HumanMessagePromptTemplate.html).)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "a2ddd0a1", - "metadata": {}, - "outputs": [], - "source": [ - "import { HumanMessagePromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const newPrompt = HumanMessagePromptTemplate.fromTemplate([prompt, new HumanMessage(\"Hi\"), new AIMessage(\"what?\"), \"{input}\"])" - ] - }, - { - "cell_type": "markdown", - "id": "72294e1b", - "metadata": {}, - "source": [ - "Under the hood, this creates an instance of the ChatPromptTemplate class, so you can use it just as you did before!" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "297932de", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: [\n", - " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"You are a nice pirate\"\u001b[39m },\n", - " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"Hi\"\u001b[39m },\n", - " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"what?\"\u001b[39m },\n", - " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"i said hi\"\u001b[39m }\n", - " ],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: [\n", - " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"You are a nice pirate\"\u001b[39m },\n", - " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"Hi\"\u001b[39m },\n", - " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"what?\"\u001b[39m },\n", - " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"i said hi\"\u001b[39m }\n", - " ],\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "c3190650", + "metadata": {}, + "source": [ + "## String prompt composition\n", + "\n", + "When working with string prompts, each template is joined together. You can work with either prompts directly or strings (the first element in the list needs to be a prompt)." ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await newPrompt.formatMessages({ input: \"i said hi\" })" - ] - }, - { - "cell_type": "markdown", - "id": "0e1d47e3-b05a-4aef-a58c-3057fa628c1c", - "metadata": {}, - "source": [ - "## Using PipelinePrompt" - ] - }, - { - "cell_type": "markdown", - "id": "0a5892f9-e4d8-4b7c-b6a5-4651539b9734", - "metadata": {}, - "source": [ - "LangChain includes a class called [`PipelinePromptTemplate`](https://api.js.langchain.com/classes/_langchain_core.prompts.PipelinePromptTemplate.html), which can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts:\n", - "\n", - "- Final prompt: The final prompt that is returned\n", - "- Pipeline prompts: A list of tuples, consisting of a string name and a prompt template. Each prompt template will be formatted and then passed to future prompt templates as a variable with the same name." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "4face631-74d7-49ca-93b1-1e6e66fa58e2", - "metadata": {}, - "outputs": [], - "source": [ - "import {\n", - " PromptTemplate,\n", - " PipelinePromptTemplate,\n", - " } from \"@langchain/core/prompts\";\n", - " \n", - "const fullPrompt = PromptTemplate.fromTemplate(`{introduction}\n", - "\n", - "{example}\n", - "\n", - "{start}`);\n", - "\n", - "const introductionPrompt = PromptTemplate.fromTemplate(\n", - "`You are impersonating {person}.`\n", - ");\n", - "\n", - "const examplePrompt =\n", - "PromptTemplate.fromTemplate(`Here's an example of an interaction:\n", - "Q: {example_q}\n", - "A: {example_a}`);\n", - "\n", - "const startPrompt = PromptTemplate.fromTemplate(`Now, do this for real!\n", - "Q: {input}\n", - "A:`);\n", - "\n", - "const composedPrompt = new PipelinePromptTemplate({\n", - "pipelinePrompts: [\n", - " {\n", - " name: \"introduction\",\n", - " prompt: introductionPrompt,\n", - " },\n", - " {\n", - " name: \"example\",\n", - " prompt: examplePrompt,\n", - " },\n", - " {\n", - " name: \"start\",\n", - " prompt: startPrompt,\n", - " },\n", - "],\n", - "finalPrompt: fullPrompt,\n", - "});\n", - " \n", - "\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "c6cabb16-ea30-4de0-8548-dcce84df8421", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "69b17f05", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "PromptTemplate {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " inputVariables: [ \u001b[32m\"topic\"\u001b[39m, \u001b[32m\"language\"\u001b[39m ],\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " template: \u001b[32m\"Tell me a joke about {topic}, make it funny and in {language}\"\u001b[39m\n", + " },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"prompt\"\u001b[39m ],\n", + " inputVariables: [ \u001b[32m\"topic\"\u001b[39m, \u001b[32m\"language\"\u001b[39m ],\n", + " outputParser: \u001b[90mundefined\u001b[39m,\n", + " partialVariables: \u001b[90mundefined\u001b[39m,\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " template: \u001b[32m\"Tell me a joke about {topic}, make it funny and in {language}\"\u001b[39m,\n", + " validateTemplate: \u001b[33mtrue\u001b[39m\n", + "}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(`Tell me a joke about {topic}, make it funny and in {language}`)\n", + "\n", + "prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "dbba24ba", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Tell me a joke about sports, make it funny and in spanish\"\u001b[39m" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await prompt.format({ topic: \"sports\", language: \"spanish\" })" + ] + }, + { + "cell_type": "markdown", + "id": "4e4f6a8a", + "metadata": {}, + "source": [ + "## Chat prompt composition" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "You are impersonating Elon Musk.\n", - "\n", - "Here's an example of an interaction:\n", - "Q: What's your favorite car?\n", - "A: Telsa\n", - "\n", - "Now, do this for real!\n", - "Q: What's your favorite social media site?\n", - "A:\n" - ] + "cell_type": "markdown", + "id": "8554bae5", + "metadata": {}, + "source": [ + "A chat prompt is made up a of a list of messages. Similarly to the above example, we can concatenate chat prompt templates. Each new element is a new message in the final prompt.\n", + "\n", + "First, let's initialize the a [`ChatPromptTemplate`](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) with a [`SystemMessage`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.system.SystemMessage.html)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "cab8dd65", + "metadata": {}, + "outputs": [], + "source": [ + "import { AIMessage, HumanMessage, SystemMessage} from \"@langchain/core/messages\"\n", + "\n", + "const prompt = new SystemMessage(\"You are a nice pirate\")" + ] + }, + { + "cell_type": "markdown", + "id": "30656ef8", + "metadata": {}, + "source": [ + "You can then easily create a pipeline combining it with other messages *or* message templates.\n", + "Use a `BaseMessage` when there are no variables to be formatted, use a `MessageTemplate` when there are variables to be formatted. You can also use just a string (note: this will automatically get inferred as a [`HumanMessagePromptTemplate`](https://api.js.langchain.com/classes/langchain_core.prompts.HumanMessagePromptTemplate.html).)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "a2ddd0a1", + "metadata": {}, + "outputs": [], + "source": [ + "import { HumanMessagePromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const newPrompt = HumanMessagePromptTemplate.fromTemplate([prompt, new HumanMessage(\"Hi\"), new AIMessage(\"what?\"), \"{input}\"])" + ] + }, + { + "cell_type": "markdown", + "id": "72294e1b", + "metadata": {}, + "source": [ + "Under the hood, this creates an instance of the ChatPromptTemplate class, so you can use it just as you did before!" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "297932de", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " HumanMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: [\n", + " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"You are a nice pirate\"\u001b[39m },\n", + " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"Hi\"\u001b[39m },\n", + " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"what?\"\u001b[39m },\n", + " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"i said hi\"\u001b[39m }\n", + " ],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: [\n", + " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"You are a nice pirate\"\u001b[39m },\n", + " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"Hi\"\u001b[39m },\n", + " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"what?\"\u001b[39m },\n", + " { type: \u001b[32m\"text\"\u001b[39m, text: \u001b[32m\"i said hi\"\u001b[39m }\n", + " ],\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " }\n", + "]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await newPrompt.formatMessages({ input: \"i said hi\" })" + ] + }, + { + "cell_type": "markdown", + "id": "0e1d47e3-b05a-4aef-a58c-3057fa628c1c", + "metadata": {}, + "source": [ + "## Using PipelinePrompt" + ] + }, + { + "cell_type": "markdown", + "id": "0a5892f9-e4d8-4b7c-b6a5-4651539b9734", + "metadata": {}, + "source": [ + "LangChain includes a class called [`PipelinePromptTemplate`](https://api.js.langchain.com/classes/_langchain_core.prompts.PipelinePromptTemplate.html), which can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts:\n", + "\n", + "- Final prompt: The final prompt that is returned\n", + "- Pipeline prompts: A list of tuples, consisting of a string name and a prompt template. Each prompt template will be formatted and then passed to future prompt templates as a variable with the same name." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4face631-74d7-49ca-93b1-1e6e66fa58e2", + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " PromptTemplate,\n", + " PipelinePromptTemplate,\n", + " } from \"@langchain/core/prompts\";\n", + " \n", + "const fullPrompt = PromptTemplate.fromTemplate(`{introduction}\n", + "\n", + "{example}\n", + "\n", + "{start}`);\n", + "\n", + "const introductionPrompt = PromptTemplate.fromTemplate(\n", + "`You are impersonating {person}.`\n", + ");\n", + "\n", + "const examplePrompt =\n", + "PromptTemplate.fromTemplate(`Here's an example of an interaction:\n", + "Q: {example_q}\n", + "A: {example_a}`);\n", + "\n", + "const startPrompt = PromptTemplate.fromTemplate(`Now, do this for real!\n", + "Q: {input}\n", + "A:`);\n", + "\n", + "const composedPrompt = new PipelinePromptTemplate({\n", + "pipelinePrompts: [\n", + " {\n", + " name: \"introduction\",\n", + " prompt: introductionPrompt,\n", + " },\n", + " {\n", + " name: \"example\",\n", + " prompt: examplePrompt,\n", + " },\n", + " {\n", + " name: \"start\",\n", + " prompt: startPrompt,\n", + " },\n", + "],\n", + "finalPrompt: fullPrompt,\n", + "});\n", + " \n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "c6cabb16-ea30-4de0-8548-dcce84df8421", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are impersonating Elon Musk.\n", + "\n", + "Here's an example of an interaction:\n", + "Q: What's your favorite car?\n", + "A: Telsa\n", + "\n", + "Now, do this for real!\n", + "Q: What's your favorite social media site?\n", + "A:\n" + ] + } + ], + "source": [ + "const formattedPrompt = await composedPrompt.format({\n", + " person: \"Elon Musk\",\n", + " example_q: `What's your favorite car?`,\n", + " example_a: \"Telsa\",\n", + " input: `What's your favorite social media site?`,\n", + " });\n", + " \n", + " \n", + "console.log(formattedPrompt);\n" + ] + }, + { + "cell_type": "markdown", + "id": "96922030", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned how to compose prompts together.\n", + "\n", + "Next, check out the other how-to guides on prompt templates in this section, like [adding few-shot examples to your prompt templates](/docs/how_to/few_shot_examples_chat)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "const formattedPrompt = await composedPrompt.format({\n", - " person: \"Elon Musk\",\n", - " example_q: `What's your favorite car?`,\n", - " example_a: \"Telsa\",\n", - " input: `What's your favorite social media site?`,\n", - " });\n", - " \n", - " \n", - "console.log(formattedPrompt);\n" - ] - }, - { - "cell_type": "markdown", - "id": "96922030", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned how to compose prompts together.\n", - "\n", - "Next, check out the other how-to guides on prompt templates in this section, like [adding few-shot examples to your prompt templates](/docs/how_to/few_shot_examples_chat)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/prompts_partial.mdx b/docs/core_docs/docs/how_to/prompts_partial.mdx index de93000f9c48..e782132d32a4 100644 --- a/docs/core_docs/docs/how_to/prompts_partial.mdx +++ b/docs/core_docs/docs/how_to/prompts_partial.mdx @@ -4,7 +4,7 @@ This guide assumes familiarity with the following concepts: -- [Prompt templates](/docs/concepts/#prompt-templates) +- [Prompt templates](/docs/concepts/prompt_templates) ::: diff --git a/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb b/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb index 4ed8e4bd849e..e2cc59b8e9ba 100644 --- a/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb +++ b/docs/core_docs/docs/how_to/qa_chat_history_how_to.ipynb @@ -36,7 +36,7 @@ "## Setup\n", "### Dependencies\n", "\n", - "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers).\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/chat_models) or [LLM](/docs/concepts/text_llms), [Embeddings](/docs/concepts/embedding_models), and [VectorStore](/docs/concepts/vectorstores) or [Retriever](/docs/concepts/retrievers).\n", "\n", "We’ll use the following packages:\n", "\n", @@ -592,7 +592,7 @@ "source": [ "### Agent constructor\n", "\n", - "Now that we have defined the tools and the LLM, we can create the agent. We will be using [LangGraph](/docs/concepts/#langgraph) to construct the agent. \n", + "Now that we have defined the tools and the LLM, we can create the agent. We will be using [LangGraph](https://langchain-ai.github.io/langgraphjs) to construct the agent. \n", "Currently we are using a high level interface to construct the agent, but the nice thing about LangGraph is that this high-level interface is backed by a low-level, highly controllable API in case you want to modify the agent logic." ] }, @@ -1133,4 +1133,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/qa_citations.ipynb b/docs/core_docs/docs/how_to/qa_citations.ipynb index 8a3b3f300059..701ec0481652 100644 --- a/docs/core_docs/docs/how_to/qa_citations.ipynb +++ b/docs/core_docs/docs/how_to/qa_citations.ipynb @@ -27,7 +27,7 @@ "## Setup\n", "### Dependencies\n", "\n", - "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models/), and [VectorStore](/docs/concepts#vectorstores/) or [Retriever](/docs/concepts#retrievers).\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/chat_models) or [LLM](/docs/concepts/text_llms), [Embeddings](/docs/concepts/embedding_models/), and [VectorStore](/docs/concepts/vectorstores/) or [Retriever](/docs/concepts/retrievers).\n", "\n", "We’ll use the following packages:\n", "\n", @@ -980,4 +980,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/qa_sources.ipynb b/docs/core_docs/docs/how_to/qa_sources.ipynb index dde30197c953..c3efb34496ab 100644 --- a/docs/core_docs/docs/how_to/qa_sources.ipynb +++ b/docs/core_docs/docs/how_to/qa_sources.ipynb @@ -26,7 +26,7 @@ "## Setup\n", "### Dependencies\n", "\n", - "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers).\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/chat_models) or [LLM](/docs/concepts/text_llms), [Embeddings](/docs/concepts/embedding_models), and [VectorStore](/docs/concepts/vectorstores) or [Retriever](/docs/concepts/retrievers).\n", "\n", "We’ll use the following packages:\n", "\n", @@ -287,4 +287,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/qa_streaming.ipynb b/docs/core_docs/docs/how_to/qa_streaming.ipynb index 882938a3eb9c..5b2f8ce78565 100644 --- a/docs/core_docs/docs/how_to/qa_streaming.ipynb +++ b/docs/core_docs/docs/how_to/qa_streaming.ipynb @@ -26,7 +26,7 @@ "## Setup\n", "### Dependencies\n", "\n", - "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers).\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/chat_models) or [LLM](/docs/concepts/text_llms), [Embeddings](/docs/concepts/embedding_models), and [VectorStore](/docs/concepts/vectorstores) or [Retriever](/docs/concepts/retrievers).\n", "\n", "We’ll use the following packages:\n", "\n", @@ -199,7 +199,7 @@ "source": [ "## Streaming final outputs\n", "\n", - "With [LCEL](/docs/concepts#langchain-expression-language), we can stream outputs as they are generated:" + "With [LCEL](/docs/concepts/lcel), we can stream outputs as they are generated:" ] }, { @@ -455,4 +455,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/query_high_cardinality.ipynb b/docs/core_docs/docs/how_to/query_high_cardinality.ipynb index 3cf6b25aaa27..717ee2df9e7e 100644 --- a/docs/core_docs/docs/how_to/query_high_cardinality.ipynb +++ b/docs/core_docs/docs/how_to/query_high_cardinality.ipynb @@ -459,7 +459,7 @@ "source": [ "### Find and all relevant values\n", "\n", - "Instead, what we can do is create a [vector store index](/docs/concepts#vectorstores) over the relevant values and then query that for the N most relevant values," + "Instead, what we can do is create a [vector store index](/docs/concepts/vectorstores) over the relevant values and then query that for the N most relevant values," ] }, { @@ -635,4 +635,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/recursive_text_splitter.ipynb b/docs/core_docs/docs/how_to/recursive_text_splitter.ipynb index 4d5adf15470a..dce905aaf625 100644 --- a/docs/core_docs/docs/how_to/recursive_text_splitter.ipynb +++ b/docs/core_docs/docs/how_to/recursive_text_splitter.ipynb @@ -1,227 +1,227 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "keywords: [recursivecharactertextsplitter]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to recursively split text by characters\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Text splitters](/docs/concepts#text-splitters)\n", - "\n", - ":::\n", - "\n", - "This text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is `[\"\\n\\n\", \"\\n\", \" \", \"\"]`. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.\n", - "\n", - "1. How the text is split: by list of characters.\n", - "2. How the chunk size is measured: by number of characters.\n", - "\n", - "Below we show example usage.\n", - "\n", - "To obtain the string content directly, use `.splitText`.\n", - "\n", - "To create LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments`." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: \"Hi.\",\n", - " metadata: { loc: { lines: { from: 1, to: 1 } } }\n", - " },\n", - " Document {\n", - " pageContent: \"I'm\",\n", - " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", - " },\n", - " Document {\n", - " pageContent: \"Harrison.\",\n", - " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", - "\n", - "const text = `Hi.\\n\\nI'm Harrison.\\n\\nHow? Are? You?\\nOkay then f f f f.\n", - "This is a weird text to write, but gotta test the splittingggg some how.\\n\\n\n", - "Bye!\\n\\n-H.`;\n", - "const splitter = new RecursiveCharacterTextSplitter({\n", - " chunkSize: 10,\n", - " chunkOverlap: 1,\n", - "});\n", - "\n", - "const output = await splitter.createDocuments([text]);\n", - "\n", - "console.log(output.slice(0, 3));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You'll note that in the above example we are splitting a raw text string and getting back a list of documents. We can also split documents directly." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [recursivecharactertextsplitter]\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: \"Hi.\",\n", - " metadata: { loc: { lines: { from: 1, to: 1 } } }\n", - " },\n", - " Document {\n", - " pageContent: \"I'm\",\n", - " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", - " },\n", - " Document {\n", - " pageContent: \"Harrison.\",\n", - " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { Document } from \"@langchain/core/documents\";\n", - "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", - "\n", - "const text = `Hi.\\n\\nI'm Harrison.\\n\\nHow? Are? You?\\nOkay then f f f f.\n", - "This is a weird text to write, but gotta test the splittingggg some how.\\n\\n\n", - "Bye!\\n\\n-H.`;\n", - "const splitter = new RecursiveCharacterTextSplitter({\n", - " chunkSize: 10,\n", - " chunkOverlap: 1,\n", - "});\n", - "\n", - "const docOutput = await splitter.splitDocuments([\n", - " new Document({ pageContent: text }),\n", - "]);\n", - "\n", - "console.log(docOutput.slice(0, 3));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can customize the `RecursiveCharacterTextSplitter` with arbitrary separators by passing a `separators` parameter like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to recursively split text by characters\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Text splitters](/docs/concepts/text_splitters)\n", + "\n", + ":::\n", + "\n", + "This text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is `[\"\\n\\n\", \"\\n\", \" \", \"\"]`. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.\n", + "\n", + "1. How the text is split: by list of characters.\n", + "2. How the chunk size is measured: by number of characters.\n", + "\n", + "Below we show example usage.\n", + "\n", + "To obtain the string content directly, use `.splitText`.\n", + "\n", + "To create LangChain [Document](https://api.js.langchain.com/classes/langchain_core.documents.Document.html) objects (e.g., for use in downstream tasks), use `.createDocuments`." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: \"Hi.\",\n", + " metadata: { loc: { lines: { from: 1, to: 1 } } }\n", + " },\n", + " Document {\n", + " pageContent: \"I'm\",\n", + " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", + " },\n", + " Document {\n", + " pageContent: \"Harrison.\",\n", + " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", + "\n", + "const text = `Hi.\\n\\nI'm Harrison.\\n\\nHow? Are? You?\\nOkay then f f f f.\n", + "This is a weird text to write, but gotta test the splittingggg some how.\\n\\n\n", + "Bye!\\n\\n-H.`;\n", + "const splitter = new RecursiveCharacterTextSplitter({\n", + " chunkSize: 10,\n", + " chunkOverlap: 1,\n", + "});\n", + "\n", + "const output = await splitter.createDocuments([text]);\n", + "\n", + "console.log(output.slice(0, 3));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You'll note that in the above example we are splitting a raw text string and getting back a list of documents. We can also split documents directly." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: \"Hi.\",\n", + " metadata: { loc: { lines: { from: 1, to: 1 } } }\n", + " },\n", + " Document {\n", + " pageContent: \"I'm\",\n", + " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", + " },\n", + " Document {\n", + " pageContent: \"Harrison.\",\n", + " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { Document } from \"@langchain/core/documents\";\n", + "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", + "\n", + "const text = `Hi.\\n\\nI'm Harrison.\\n\\nHow? Are? You?\\nOkay then f f f f.\n", + "This is a weird text to write, but gotta test the splittingggg some how.\\n\\n\n", + "Bye!\\n\\n-H.`;\n", + "const splitter = new RecursiveCharacterTextSplitter({\n", + " chunkSize: 10,\n", + " chunkOverlap: 1,\n", + "});\n", + "\n", + "const docOutput = await splitter.splitDocuments([\n", + " new Document({ pageContent: text }),\n", + "]);\n", + "\n", + "console.log(docOutput.slice(0, 3));" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: \"Some other considerations include:\",\n", - " metadata: { loc: { lines: { from: 1, to: 1 } } }\n", - " },\n", - " Document {\n", - " pageContent: \"- Do you deploy your backend and frontend together\",\n", - " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", - " },\n", - " Document {\n", - " pageContent: \"r, or separately?\",\n", - " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can customize the `RecursiveCharacterTextSplitter` with arbitrary separators by passing a `separators` parameter like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: \"Some other considerations include:\",\n", + " metadata: { loc: { lines: { from: 1, to: 1 } } }\n", + " },\n", + " Document {\n", + " pageContent: \"- Do you deploy your backend and frontend together\",\n", + " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", + " },\n", + " Document {\n", + " pageContent: \"r, or separately?\",\n", + " metadata: { loc: { lines: { from: 3, to: 3 } } }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { Document } from \"@langchain/core/documents\";\n", + "\n", + "const text = `Some other considerations include:\n", + "\n", + "- Do you deploy your backend and frontend together, or separately?\n", + "- Do you deploy your backend co-located with your database, or separately?\n", + "\n", + "**Production Support:** As you move your LangChains into production, we'd love to offer more hands-on support.\n", + "Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to share more about what you're building, and our team will get in touch.\n", + "\n", + "## Deployment Options\n", + "\n", + "See below for a list of deployment options for your LangChain app. If you don't see your preferred option, please get in touch and we can add it to this list.`;\n", + "\n", + "const splitter = new RecursiveCharacterTextSplitter({\n", + " chunkSize: 50,\n", + " chunkOverlap: 1,\n", + " separators: [\"|\", \"##\", \">\", \"-\"],\n", + "});\n", + "\n", + "const docOutput = await splitter.splitDocuments([\n", + " new Document({ pageContent: text }),\n", + "]);\n", + "\n", + "console.log(docOutput.slice(0, 3));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now learned a method for splitting text by character.\n", + "\n", + "Next, check out [specific techinques for splitting on code](/docs/how_to/code_splitter) or the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", - "import { Document } from \"@langchain/core/documents\";\n", - "\n", - "const text = `Some other considerations include:\n", - "\n", - "- Do you deploy your backend and frontend together, or separately?\n", - "- Do you deploy your backend co-located with your database, or separately?\n", - "\n", - "**Production Support:** As you move your LangChains into production, we'd love to offer more hands-on support.\n", - "Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to share more about what you're building, and our team will get in touch.\n", - "\n", - "## Deployment Options\n", - "\n", - "See below for a list of deployment options for your LangChain app. If you don't see your preferred option, please get in touch and we can add it to this list.`;\n", - "\n", - "const splitter = new RecursiveCharacterTextSplitter({\n", - " chunkSize: 50,\n", - " chunkOverlap: 1,\n", - " separators: [\"|\", \"##\", \">\", \"-\"],\n", - "});\n", - "\n", - "const docOutput = await splitter.splitDocuments([\n", - " new Document({ pageContent: text }),\n", - "]);\n", - "\n", - "console.log(docOutput.slice(0, 3));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now learned a method for splitting text by character.\n", - "\n", - "Next, check out [specific techinques for splitting on code](/docs/how_to/code_splitter) or the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx b/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx index a4ac40937a56..3e24ca475044 100644 --- a/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx +++ b/docs/core_docs/docs/how_to/reduce_retrieval_latency.mdx @@ -4,8 +4,8 @@ This guide assumes familiarity with the following concepts: -- [Retrievers](/docs/concepts/#retrievers) -- [Embeddings](/docs/concepts/#embedding-models) +- [Retrievers](/docs/concepts/retrievers) +- [Embeddings](/docs/concepts/embedding_models) - [Vector stores](/docs/concepts/#vectorstores) - [Retrieval-augmented generation (RAG)](/docs/tutorials/rag) diff --git a/docs/core_docs/docs/how_to/routing.mdx b/docs/core_docs/docs/how_to/routing.mdx index be0aa6c3ff53..7d75c4fdb242 100644 --- a/docs/core_docs/docs/how_to/routing.mdx +++ b/docs/core_docs/docs/how_to/routing.mdx @@ -4,11 +4,11 @@ This guide assumes familiarity with the following concepts: -- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language) +- [LangChain Expression Language (LCEL)](/docs/concepts/lcel) - [Chaining runnables](/docs/how_to/sequence/) - [Configuring chain parameters at runtime](/docs/how_to/binding) -- [Prompt templates](/docs/concepts/#prompt-templates) -- [Chat Messages](/docs/concepts/#message-types) +- [Prompt templates](/docs/concepts/prompt_templates) +- [Chat Messages](/docs/concepts/messages) ::: diff --git a/docs/core_docs/docs/how_to/self_query.ipynb b/docs/core_docs/docs/how_to/self_query.ipynb index 2b01e7fcb972..a25163f1df43 100644 --- a/docs/core_docs/docs/how_to/self_query.ipynb +++ b/docs/core_docs/docs/how_to/self_query.ipynb @@ -1,345 +1,345 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "c0bc3390-4bed-49d3-96ce-072badb4110b", - "metadata": {}, - "source": [ - "# How to do \"self-querying\" retrieval\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Retrievers](/docs/concepts#retrievers)\n", - "- [Vector stores](/docs/concepts#vectorstores)\n", - "\n", - ":::\n", - "\n", - "A self-querying retriever is one that, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses an LLM to write a structured query and then applies that structured query to its underlying vector store. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documents but to also extract filters from the user query on the metadata of stored documents and to execute those filters.\n", - "\n", - "![](../../static/img/self_querying.jpeg)\n", - "\n", - ":::info\n", - "\n", - "Head to [Integrations](/docs/integrations/retrievers/self_query) for documentation on vector stores with built-in support for self-querying.\n", - "\n", - ":::\n", - "\n", - "## Get started\n", - "\n", - "For demonstration purposes, we'll use an in-memory, unoptimized vector store. You should swap it out for a supported production-ready vector store when seriously building.\n", - "\n", - "The self-query retriever requires you to have the [`peggy`](https://www.npmjs.com/package/peggy) package installed as a peer dep, and we'll also use OpenAI for this example:\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from '@theme/Npm2Yarn';\n", - "\n", - "\n", - " peggy @langchain/openai @langchain/core\n", - "\n", - "```\n", - "\n", - "We've created a small demo set of documents that contain summaries of movies:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "beec3e35-3750-408c-9f2a-d92cf0a9a321", - "metadata": {}, - "outputs": [], - "source": [ - "import \"peggy\";\n", - "import { Document } from \"@langchain/core/documents\";\n", - "\n", - "/**\n", - " * First, we create a bunch of documents. You can load your own documents here instead.\n", - " * Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.\n", - " */\n", - "const docs = [\n", - " new Document({\n", - " pageContent:\n", - " \"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n", - " metadata: { year: 1993, rating: 7.7, genre: \"science fiction\", length: 122 },\n", - " }),\n", - " new Document({\n", - " pageContent:\n", - " \"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n", - " metadata: { year: 2010, director: \"Christopher Nolan\", rating: 8.2, length: 148 },\n", - " }),\n", - " new Document({\n", - " pageContent:\n", - " \"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n", - " metadata: { year: 2006, director: \"Satoshi Kon\", rating: 8.6 },\n", - " }),\n", - " new Document({\n", - " pageContent:\n", - " \"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n", - " metadata: { year: 2019, director: \"Greta Gerwig\", rating: 8.3, length: 135 },\n", - " }),\n", - " new Document({\n", - " pageContent: \"Toys come alive and have a blast doing so\",\n", - " metadata: { year: 1995, genre: \"animated\", length: 77 },\n", - " }),\n", - " new Document({\n", - " pageContent: \"Three men walk into the Zone, three men walk out of the Zone\",\n", - " metadata: {\n", - " year: 1979,\n", - " director: \"Andrei Tarkovsky\",\n", - " genre: \"science fiction\",\n", - " rating: 9.9,\n", - " },\n", - " }),\n", - "];" - ] - }, - { - "cell_type": "markdown", - "id": "99771131-1efb-42e2-95f8-2aaa95f37677", - "metadata": {}, - "source": [ - "### Creating our self-querying retriever\n", - "\n", - "Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "7832ca43-cc17-4375-bf4e-679b99584568", - "metadata": {}, - "outputs": [], - "source": [ - "import { OpenAIEmbeddings, OpenAI } from \"@langchain/openai\";\n", - "import { FunctionalTranslator } from \"@langchain/core/structured_query\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { SelfQueryRetriever } from \"langchain/retrievers/self_query\";\n", - "import type { AttributeInfo } from \"langchain/chains/query_constructor\";\n", - "\n", - "/**\n", - " * We define the attributes we want to be able to query on.\n", - " * in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.\n", - " * We also provide a description of each attribute and the type of the attribute.\n", - " * This is used to generate the query prompts.\n", - " */\n", - "const attributeInfo: AttributeInfo[] = [\n", - " {\n", - " name: \"genre\",\n", - " description: \"The genre of the movie\",\n", - " type: \"string or array of strings\",\n", - " },\n", - " {\n", - " name: \"year\",\n", - " description: \"The year the movie was released\",\n", - " type: \"number\",\n", - " },\n", - " {\n", - " name: \"director\",\n", - " description: \"The director of the movie\",\n", - " type: \"string\",\n", - " },\n", - " {\n", - " name: \"rating\",\n", - " description: \"The rating of the movie (1-10)\",\n", - " type: \"number\",\n", - " },\n", - " {\n", - " name: \"length\",\n", - " description: \"The length of the movie in minutes\",\n", - " type: \"number\",\n", - " },\n", - "];\n", - "\n", - "\n", - "\n", - "/**\n", - " * Next, we instantiate a vector store. This is where we store the embeddings of the documents.\n", - " * We also need to provide an embeddings object. This is used to embed the documents.\n", - " */\n", - "const embeddings = new OpenAIEmbeddings();\n", - "const llm = new OpenAI();\n", - "const documentContents = \"Brief summary of a movie\";\n", - "const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);\n", - "const selfQueryRetriever = SelfQueryRetriever.fromLLM({\n", - " llm,\n", - " vectorStore,\n", - " documentContents,\n", - " attributeInfo,\n", - " /**\n", - " * We need to use a translator that translates the queries into a\n", - " * filter format that the vector store can understand. We provide a basic translator\n", - " * translator here, but you can create your own translator by extending BaseTranslator\n", - " * abstract class. Note that the vector store needs to support filtering on the metadata\n", - " * attributes you want to query on.\n", - " */\n", - " structuredQueryTranslator: new FunctionalTranslator(),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "9c66f4c8-3682-46ac-8f17-0839194888a3", - "metadata": {}, - "source": [ - "### Testing it out\n", - "\n", - "And now we can actually try using our retriever!\n", - "\n", - "We can ask questions like \"Which movies are less than 90 minutes?\" or \"Which movies are rated higher than 8.5?\".\n", - "We can also ask questions like \"Which movies are either comedy or drama and are less than 90 minutes?\".\n", - "The translator within the retriever will automatically convert these questions into vector store filters that can be used to retrieve documents." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "21c5df28-ea78-4f4e-99d6-489c864d1a04", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"Toys come alive and have a blast doing so\"\u001b[39m,\n", - " metadata: { year: \u001b[33m1995\u001b[39m, genre: \u001b[32m\"animated\"\u001b[39m, length: \u001b[33m77\u001b[39m }\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "c0bc3390-4bed-49d3-96ce-072badb4110b", + "metadata": {}, + "source": [ + "# How to do \"self-querying\" retrieval\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Retrievers](/docs/concepts/retrievers)\n", + "- [Vector stores](/docs/concepts/vectorstores)\n", + "\n", + ":::\n", + "\n", + "A self-querying retriever is one that, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses an LLM to write a structured query and then applies that structured query to its underlying vector store. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documents but to also extract filters from the user query on the metadata of stored documents and to execute those filters.\n", + "\n", + "![](../../static/img/self_querying.jpeg)\n", + "\n", + ":::info\n", + "\n", + "Head to [Integrations](/docs/integrations/retrievers/self_query) for documentation on vector stores with built-in support for self-querying.\n", + "\n", + ":::\n", + "\n", + "## Get started\n", + "\n", + "For demonstration purposes, we'll use an in-memory, unoptimized vector store. You should swap it out for a supported production-ready vector store when seriously building.\n", + "\n", + "The self-query retriever requires you to have the [`peggy`](https://www.npmjs.com/package/peggy) package installed as a peer dep, and we'll also use OpenAI for this example:\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from '@theme/Npm2Yarn';\n", + "\n", + "\n", + " peggy @langchain/openai @langchain/core\n", + "\n", + "```\n", + "\n", + "We've created a small demo set of documents that contain summaries of movies:" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await selfQueryRetriever.invoke(\n", - " \"Which movies are less than 90 minutes?\"\n", - ");" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "228e5d70-d4cf-43bb-bc8e-3d6f11e784f2", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception\"\u001b[39m... 16 more characters,\n", - " metadata: { year: \u001b[33m2006\u001b[39m, director: \u001b[32m\"Satoshi Kon\"\u001b[39m, rating: \u001b[33m8.6\u001b[39m }\n", - " },\n", - " Document {\n", - " pageContent: \u001b[32m\"Three men walk into the Zone, three men walk out of the Zone\"\u001b[39m,\n", - " metadata: {\n", - " year: \u001b[33m1979\u001b[39m,\n", - " director: \u001b[32m\"Andrei Tarkovsky\"\u001b[39m,\n", - " genre: \u001b[32m\"science fiction\"\u001b[39m,\n", - " rating: \u001b[33m9.9\u001b[39m\n", - " }\n", - " }\n", - "]" + "cell_type": "code", + "execution_count": null, + "id": "beec3e35-3750-408c-9f2a-d92cf0a9a321", + "metadata": {}, + "outputs": [], + "source": [ + "import \"peggy\";\n", + "import { Document } from \"@langchain/core/documents\";\n", + "\n", + "/**\n", + " * First, we create a bunch of documents. You can load your own documents here instead.\n", + " * Each document has a pageContent and a metadata field. Make sure your metadata matches the AttributeInfo below.\n", + " */\n", + "const docs = [\n", + " new Document({\n", + " pageContent:\n", + " \"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n", + " metadata: { year: 1993, rating: 7.7, genre: \"science fiction\", length: 122 },\n", + " }),\n", + " new Document({\n", + " pageContent:\n", + " \"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n", + " metadata: { year: 2010, director: \"Christopher Nolan\", rating: 8.2, length: 148 },\n", + " }),\n", + " new Document({\n", + " pageContent:\n", + " \"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n", + " metadata: { year: 2006, director: \"Satoshi Kon\", rating: 8.6 },\n", + " }),\n", + " new Document({\n", + " pageContent:\n", + " \"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n", + " metadata: { year: 2019, director: \"Greta Gerwig\", rating: 8.3, length: 135 },\n", + " }),\n", + " new Document({\n", + " pageContent: \"Toys come alive and have a blast doing so\",\n", + " metadata: { year: 1995, genre: \"animated\", length: 77 },\n", + " }),\n", + " new Document({\n", + " pageContent: \"Three men walk into the Zone, three men walk out of the Zone\",\n", + " metadata: {\n", + " year: 1979,\n", + " director: \"Andrei Tarkovsky\",\n", + " genre: \"science fiction\",\n", + " rating: 9.9,\n", + " },\n", + " }),\n", + "];" ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await selfQueryRetriever.invoke(\n", - " \"Which movies are rated higher than 8.5?\"\n", - ");" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "8244591e-97b5-4aba-b1e5-fe5e1996cb99", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"A bunch of normal-sized women are supremely wholesome and some men pine after them\"\u001b[39m,\n", - " metadata: { year: \u001b[33m2019\u001b[39m, director: \u001b[32m\"Greta Gerwig\"\u001b[39m, rating: \u001b[33m8.3\u001b[39m, length: \u001b[33m135\u001b[39m }\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "99771131-1efb-42e2-95f8-2aaa95f37677", + "metadata": {}, + "source": [ + "### Creating our self-querying retriever\n", + "\n", + "Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents." ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await selfQueryRetriever.invoke(\n", - " \"Which movies are directed by Greta Gerwig?\"\n", - ");" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "420a6906-66fb-449f-8626-2e399ae5e6a8", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "7832ca43-cc17-4375-bf4e-679b99584568", + "metadata": {}, + "outputs": [], + "source": [ + "import { OpenAIEmbeddings, OpenAI } from \"@langchain/openai\";\n", + "import { FunctionalTranslator } from \"@langchain/core/structured_query\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { SelfQueryRetriever } from \"langchain/retrievers/self_query\";\n", + "import type { AttributeInfo } from \"langchain/chains/query_constructor\";\n", + "\n", + "/**\n", + " * We define the attributes we want to be able to query on.\n", + " * in this case, we want to be able to query on the genre, year, director, rating, and length of the movie.\n", + " * We also provide a description of each attribute and the type of the attribute.\n", + " * This is used to generate the query prompts.\n", + " */\n", + "const attributeInfo: AttributeInfo[] = [\n", + " {\n", + " name: \"genre\",\n", + " description: \"The genre of the movie\",\n", + " type: \"string or array of strings\",\n", + " },\n", + " {\n", + " name: \"year\",\n", + " description: \"The year the movie was released\",\n", + " type: \"number\",\n", + " },\n", + " {\n", + " name: \"director\",\n", + " description: \"The director of the movie\",\n", + " type: \"string\",\n", + " },\n", + " {\n", + " name: \"rating\",\n", + " description: \"The rating of the movie (1-10)\",\n", + " type: \"number\",\n", + " },\n", + " {\n", + " name: \"length\",\n", + " description: \"The length of the movie in minutes\",\n", + " type: \"number\",\n", + " },\n", + "];\n", + "\n", + "\n", + "\n", + "/**\n", + " * Next, we instantiate a vector store. This is where we store the embeddings of the documents.\n", + " * We also need to provide an embeddings object. This is used to embed the documents.\n", + " */\n", + "const embeddings = new OpenAIEmbeddings();\n", + "const llm = new OpenAI();\n", + "const documentContents = \"Brief summary of a movie\";\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);\n", + "const selfQueryRetriever = SelfQueryRetriever.fromLLM({\n", + " llm,\n", + " vectorStore,\n", + " documentContents,\n", + " attributeInfo,\n", + " /**\n", + " * We need to use a translator that translates the queries into a\n", + " * filter format that the vector store can understand. We provide a basic translator\n", + " * translator here, but you can create your own translator by extending BaseTranslator\n", + " * abstract class. Note that the vector store needs to support filtering on the metadata\n", + " * attributes you want to query on.\n", + " */\n", + " structuredQueryTranslator: new FunctionalTranslator(),\n", + "});" + ] + }, { - "data": { - "text/plain": [ - "[\n", - " Document {\n", - " pageContent: \u001b[32m\"Toys come alive and have a blast doing so\"\u001b[39m,\n", - " metadata: { year: \u001b[33m1995\u001b[39m, genre: \u001b[32m\"animated\"\u001b[39m, length: \u001b[33m77\u001b[39m }\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "9c66f4c8-3682-46ac-8f17-0839194888a3", + "metadata": {}, + "source": [ + "### Testing it out\n", + "\n", + "And now we can actually try using our retriever!\n", + "\n", + "We can ask questions like \"Which movies are less than 90 minutes?\" or \"Which movies are rated higher than 8.5?\".\n", + "We can also ask questions like \"Which movies are either comedy or drama and are less than 90 minutes?\".\n", + "The translator within the retriever will automatically convert these questions into vector store filters that can be used to retrieve documents." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "21c5df28-ea78-4f4e-99d6-489c864d1a04", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m\"Toys come alive and have a blast doing so\"\u001b[39m,\n", + " metadata: { year: \u001b[33m1995\u001b[39m, genre: \u001b[32m\"animated\"\u001b[39m, length: \u001b[33m77\u001b[39m }\n", + " }\n", + "]" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await selfQueryRetriever.invoke(\n", + " \"Which movies are less than 90 minutes?\"\n", + ");" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "228e5d70-d4cf-43bb-bc8e-3d6f11e784f2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception\"\u001b[39m... 16 more characters,\n", + " metadata: { year: \u001b[33m2006\u001b[39m, director: \u001b[32m\"Satoshi Kon\"\u001b[39m, rating: \u001b[33m8.6\u001b[39m }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Three men walk into the Zone, three men walk out of the Zone\"\u001b[39m,\n", + " metadata: {\n", + " year: \u001b[33m1979\u001b[39m,\n", + " director: \u001b[32m\"Andrei Tarkovsky\"\u001b[39m,\n", + " genre: \u001b[32m\"science fiction\"\u001b[39m,\n", + " rating: \u001b[33m9.9\u001b[39m\n", + " }\n", + " }\n", + "]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await selfQueryRetriever.invoke(\n", + " \"Which movies are rated higher than 8.5?\"\n", + ");" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8244591e-97b5-4aba-b1e5-fe5e1996cb99", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m\"A bunch of normal-sized women are supremely wholesome and some men pine after them\"\u001b[39m,\n", + " metadata: { year: \u001b[33m2019\u001b[39m, director: \u001b[32m\"Greta Gerwig\"\u001b[39m, rating: \u001b[33m8.3\u001b[39m, length: \u001b[33m135\u001b[39m }\n", + " }\n", + "]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await selfQueryRetriever.invoke(\n", + " \"Which movies are directed by Greta Gerwig?\"\n", + ");" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "420a6906-66fb-449f-8626-2e399ae5e6a8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " Document {\n", + " pageContent: \u001b[32m\"Toys come alive and have a blast doing so\"\u001b[39m,\n", + " metadata: { year: \u001b[33m1995\u001b[39m, genre: \u001b[32m\"animated\"\u001b[39m, length: \u001b[33m77\u001b[39m }\n", + " }\n", + "]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await selfQueryRetriever.invoke(\n", + " \"Which movies are either comedy or drama and are less than 90 minutes?\"\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "f7f646a2", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now seen how to use the `SelfQueryRetriever` to to generate vector store filters based on an original question.\n", + "\n", + "Next, you can check out the list of [vector stores that currently support self-querying](/docs/integrations/retrievers/self_query/)." ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "await selfQueryRetriever.invoke(\n", - " \"Which movies are either comedy or drama and are less than 90 minutes?\"\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "f7f646a2", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now seen how to use the `SelfQueryRetriever` to to generate vector store filters based on an original question.\n", - "\n", - "Next, you can check out the list of [vector stores that currently support self-querying](/docs/integrations/retrievers/self_query/)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/sequence.ipynb b/docs/core_docs/docs/how_to/sequence.ipynb index c8f429a3f9a0..1d5fc1a6755f 100644 --- a/docs/core_docs/docs/how_to/sequence.ipynb +++ b/docs/core_docs/docs/how_to/sequence.ipynb @@ -1,252 +1,252 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "keywords: [chain, chaining, runnablesequence]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to chain runnables\n", - "\n", - "One point about [LangChain Expression Language](/docs/concepts/#langchain-expression-language) is that any two runnables can be \"chained\" together into sequences. The output of the previous runnable's `.invoke()` call is passed as input to the next runnable. This can be done using the `.pipe()` method.\n", - "\n", - "The resulting [`RunnableSequence`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n", - "- [Prompt templates](/docs/concepts/#prompt-templates)\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Output parser](/docs/concepts/#output-parsers)\n", - "\n", - ":::\n", - "\n", - "## The pipe method\n", - "\n", - "To show off how this works, let's go through an example. We'll walk through a common pattern in LangChain: using a [prompt template](/docs/concepts#prompt-templates) to format input into a [chat model](/docs/concepts/#chat-models), and finally converting the chat message output into a string with an [output parser](/docs/concepts#output-parsers.\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/core @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "import { ChatOpenAI } from '@langchain/openai';\n", - "\n", - "const model = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(\"tell me a joke about {topic}\")\n", - "\n", - "const chain = prompt.pipe(model).pipe(new StringOutputParser())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Prompts and models are both runnable, and the output type from the prompt call is the same as the input type of the chat model, so we can chain them together. We can then invoke the resulting sequence like any other runnable:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "\"Here's a bear joke for you:\\n\\nWhy did the bear dissolve in water?\\nBecause it was a polar bear!\"" + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [chain, chaining, runnablesequence]\n", + "---" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await chain.invoke({ topic: \"bears\" })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Coercion\n", - "\n", - "We can even combine this chain with more runnables to create another chain. This may involve some input/output formatting using other types of runnables, depending on the required inputs and outputs of the chain components.\n", - "\n", - "For example, let's say we wanted to compose the joke generating chain with another chain that evaluates whether or not the generated joke was funny.\n", - "\n", - "We would need to be careful with how we format the input into the next chain. In the below example, the dict in the chain is automatically parsed and converted into a [`RunnableParallel`](/docs/how_to/parallel), which runs all of its values in parallel and returns a dict with the results.\n", - "\n", - "This happens to be the same format the next prompt template expects. Here it is in action:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "'Haha, that\\'s a clever play on words! Using \"polar\" to imply the bear dissolved or became polar/polarized when put in water. Not the most hilarious joke ever, but it has a cute, groan-worthy pun that makes it mildly amusing. I appreciate a good pun or wordplay joke.'" + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to chain runnables\n", + "\n", + "One point about [LangChain Expression Language](/docs/concepts/lcel) is that any two runnables can be \"chained\" together into sequences. The output of the previous runnable's `.invoke()` call is passed as input to the next runnable. This can be done using the `.pipe()` method.\n", + "\n", + "The resulting [`RunnableSequence`](https://api.js.langchain.com/classes/langchain_core.runnables.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n", + "- [Prompt templates](/docs/concepts/prompt_templates)\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Output parser](/docs/concepts/output_parsers)\n", + "\n", + ":::\n", + "\n", + "## The pipe method\n", + "\n", + "To show off how this works, let's go through an example. We'll walk through a common pattern in LangChain: using a [prompt template](/docs/concepts/prompt_templates) to format input into a [chat model](/docs/concepts/chat_models), and finally converting the chat message output into a string with an [output parser](/docs/concepts/output_parsers.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "\n", - "const analysisPrompt = ChatPromptTemplate.fromTemplate(\"is this a funny joke? {joke}\")\n", - "\n", - "const composedChain = new RunnableLambda({\n", - " func: async (input: { topic: string }) => {\n", - " const result = await chain.invoke(input);\n", - " return { joke: result };\n", - " }\n", - "}).pipe(analysisPrompt).pipe(model).pipe(new StringOutputParser())\n", - "\n", - "await composedChain.invoke({ topic: \"bears\" })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Functions will also be coerced into runnables, so you can add custom logic to your chains too. The below chain results in the same logical flow as before:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/core @langchain/core\n", + "\n", + "```" + ] + }, { - "data": { - "text/plain": [ - "\"Haha, that's a cute and punny joke! I like how it plays on the idea of beets blushing or turning red like someone blushing. Food puns can be quite amusing. While not a total knee-slapper, it's a light-hearted, groan-worthy dad joke that would make me chuckle and shake my head. Simple vegetable humor!\"" + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "import { ChatOpenAI } from '@langchain/openai';\n", + "\n", + "const model = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(\"tell me a joke about {topic}\")\n", + "\n", + "const chain = prompt.pipe(model).pipe(new StringOutputParser())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Prompts and models are both runnable, and the output type from the prompt call is the same as the input type of the chat model, so we can chain them together. We can then invoke the resulting sequence like any other runnable:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"Here's a bear joke for you:\\n\\nWhy did the bear dissolve in water?\\nBecause it was a polar bear!\"" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chain.invoke({ topic: \"bears\" })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Coercion\n", + "\n", + "We can even combine this chain with more runnables to create another chain. This may involve some input/output formatting using other types of runnables, depending on the required inputs and outputs of the chain components.\n", + "\n", + "For example, let's say we wanted to compose the joke generating chain with another chain that evaluates whether or not the generated joke was funny.\n", + "\n", + "We would need to be careful with how we format the input into the next chain. In the below example, the dict in the chain is automatically parsed and converted into a [`RunnableParallel`](/docs/how_to/parallel), which runs all of its values in parallel and returns a dict with the results.\n", + "\n", + "This happens to be the same format the next prompt template expects. Here it is in action:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Haha, that\\'s a clever play on words! Using \"polar\" to imply the bear dissolved or became polar/polarized when put in water. Not the most hilarious joke ever, but it has a cute, groan-worthy pun that makes it mildly amusing. I appreciate a good pun or wordplay joke.'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "\n", + "const analysisPrompt = ChatPromptTemplate.fromTemplate(\"is this a funny joke? {joke}\")\n", + "\n", + "const composedChain = new RunnableLambda({\n", + " func: async (input: { topic: string }) => {\n", + " const result = await chain.invoke(input);\n", + " return { joke: result };\n", + " }\n", + "}).pipe(analysisPrompt).pipe(model).pipe(new StringOutputParser())\n", + "\n", + "await composedChain.invoke({ topic: \"bears\" })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Functions will also be coerced into runnables, so you can add custom logic to your chains too. The below chain results in the same logical flow as before:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"Haha, that's a cute and punny joke! I like how it plays on the idea of beets blushing or turning red like someone blushing. Food puns can be quite amusing. While not a total knee-slapper, it's a light-hearted, groan-worthy dad joke that would make me chuckle and shake my head. Simple vegetable humor!\"" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { RunnableSequence } from \"@langchain/core/runnables\";\n", + "\n", + "const composedChainWithLambda = RunnableSequence.from([\n", + " chain,\n", + " (input) => ({ joke: input }),\n", + " analysisPrompt,\n", + " model,\n", + " new StringOutputParser()\n", + "])\n", + "\n", + "await composedChainWithLambda.invoke({ topic: \"beets\" })" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> See the LangSmith trace for the run above [here](https://smith.langchain.com/public/ef1bf347-a243-4da6-9be6-54f5d73e6da2/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "However, keep in mind that using functions like this may interfere with operations like streaming. See [this section](/docs/how_to/functions) for more information." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You now know some ways to chain two runnables together.\n", + "\n", + "To learn more, see the other how-to guides on runnables in [this section](/docs/how_to/#langchain-expression-language-lcel)." ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { RunnableSequence } from \"@langchain/core/runnables\";\n", - "\n", - "const composedChainWithLambda = RunnableSequence.from([\n", - " chain,\n", - " (input) => ({ joke: input }),\n", - " analysisPrompt,\n", - " model,\n", - " new StringOutputParser()\n", - "])\n", - "\n", - "await composedChainWithLambda.invoke({ topic: \"beets\" })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> See the LangSmith trace for the run above [here](https://smith.langchain.com/public/ef1bf347-a243-4da6-9be6-54f5d73e6da2/r)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "However, keep in mind that using functions like this may interfere with operations like streaming. See [this section](/docs/how_to/functions) for more information." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You now know some ways to chain two runnables together.\n", - "\n", - "To learn more, see the other how-to guides on runnables in [this section](/docs/how_to/#langchain-expression-language-lcel)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "typescript", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.1" + } }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "typescript", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.1" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/split_by_token.ipynb b/docs/core_docs/docs/how_to/split_by_token.ipynb index c6af9a6c54d3..05b228a9b30c 100644 --- a/docs/core_docs/docs/how_to/split_by_token.ipynb +++ b/docs/core_docs/docs/how_to/split_by_token.ipynb @@ -1,110 +1,110 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "a05c860c", - "metadata": {}, - "source": [ - "# How to split text by tokens \n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Text splitters](/docs/concepts#text-splitters)\n", - "\n", - ":::\n", - "\n", - "Language models have a token limit. You should not exceed the token limit. When you split your text into chunks it is therefore a good idea to count the number of tokens. There are many tokenizers. When you count tokens in your text you should use the same tokenizer as used in the language model." - ] - }, - { - "cell_type": "markdown", - "id": "7683b36a", - "metadata": {}, - "source": [ - "## `js-tiktoken`\n", - "\n", - ":::{.callout-note}\n", - "[js-tiktoken](https://github.com/openai/js-tiktoken) is a JavaScript version of the `BPE` tokenizer created by OpenAI.\n", - ":::\n", - "\n", - "\n", - "We can use `js-tiktoken` to estimate tokens used. It is tuned to OpenAI models.\n", - "\n", - "1. How the text is split: by character passed in.\n", - "2. How the chunk size is measured: by the `js-tiktoken` tokenizer.\n", - "\n", - "You can use the [`TokenTextSplitter`](https://api.js.langchain.com/classes/langchain_textsplitters.TokenTextSplitter.html) like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "4454c70e", - "metadata": {}, - "outputs": [ + "cells": [ + { + "cell_type": "markdown", + "id": "a05c860c", + "metadata": {}, + "source": [ + "# How to split text by tokens \n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Text splitters](/docs/concepts/text_splitters)\n", + "\n", + ":::\n", + "\n", + "Language models have a token limit. You should not exceed the token limit. When you split your text into chunks it is therefore a good idea to count the number of tokens. There are many tokenizers. When you count tokens in your text you should use the same tokenizer as used in the language model." + ] + }, + { + "cell_type": "markdown", + "id": "7683b36a", + "metadata": {}, + "source": [ + "## `js-tiktoken`\n", + "\n", + ":::{.callout-note}\n", + "[js-tiktoken](https://github.com/openai/js-tiktoken) is a JavaScript version of the `BPE` tokenizer created by OpenAI.\n", + ":::\n", + "\n", + "\n", + "We can use `js-tiktoken` to estimate tokens used. It is tuned to OpenAI models.\n", + "\n", + "1. How the text is split: by character passed in.\n", + "2. How the chunk size is measured: by the `js-tiktoken` tokenizer.\n", + "\n", + "You can use the [`TokenTextSplitter`](https://api.js.langchain.com/classes/langchain_textsplitters.TokenTextSplitter.html) like this:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Madam Speaker, Madam Vice President, our\n" - ] + "cell_type": "code", + "execution_count": 1, + "id": "4454c70e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Madam Speaker, Madam Vice President, our\n" + ] + } + ], + "source": [ + "import { TokenTextSplitter } from \"@langchain/textsplitters\";\n", + "import * as fs from \"node:fs\";\n", + "\n", + "// Load an example document\n", + "const rawData = await fs.readFileSync(\"../../../../examples/state_of_the_union.txt\");\n", + "const stateOfTheUnion = rawData.toString();\n", + "\n", + "const textSplitter = new TokenTextSplitter({\n", + " chunkSize: 10,\n", + " chunkOverlap: 0,\n", + "});\n", + "\n", + "const texts = await textSplitter.splitText(stateOfTheUnion);\n", + "\n", + "console.log(texts[0]);" + ] + }, + { + "cell_type": "markdown", + "id": "3bc155d0", + "metadata": {}, + "source": [ + "**Note:** Some written languages (e.g. Chinese and Japanese) have characters which encode to 2 or more tokens. Using the `TokenTextSplitter` directly can split the tokens for a character between two chunks causing malformed Unicode characters.\n", + "\n", + "## Next steps\n", + "\n", + "You've now learned a method for splitting text based on token count.\n", + "\n", + "Next, check out the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + }, + "vscode": { + "interpreter": { + "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" + } } - ], - "source": [ - "import { TokenTextSplitter } from \"@langchain/textsplitters\";\n", - "import * as fs from \"node:fs\";\n", - "\n", - "// Load an example document\n", - "const rawData = await fs.readFileSync(\"../../../../examples/state_of_the_union.txt\");\n", - "const stateOfTheUnion = rawData.toString();\n", - "\n", - "const textSplitter = new TokenTextSplitter({\n", - " chunkSize: 10,\n", - " chunkOverlap: 0,\n", - "});\n", - "\n", - "const texts = await textSplitter.splitText(stateOfTheUnion);\n", - "\n", - "console.log(texts[0]);" - ] - }, - { - "cell_type": "markdown", - "id": "3bc155d0", - "metadata": {}, - "source": [ - "**Note:** Some written languages (e.g. Chinese and Japanese) have characters which encode to 2 or more tokens. Using the `TokenTextSplitter` directly can split the tokens for a character between two chunks causing malformed Unicode characters.\n", - "\n", - "## Next steps\n", - "\n", - "You've now learned a method for splitting text based on token count.\n", - "\n", - "Next, check out the [full tutorial on retrieval-augmented generation](/docs/tutorials/rag)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" - }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" }, - "vscode": { - "interpreter": { - "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/stream_agent_client.mdx b/docs/core_docs/docs/how_to/stream_agent_client.mdx index 19d9875adbd6..6ce18f7081e5 100644 --- a/docs/core_docs/docs/how_to/stream_agent_client.mdx +++ b/docs/core_docs/docs/how_to/stream_agent_client.mdx @@ -8,10 +8,10 @@ and [here for the client file](https://github.com/langchain-ai/langchain-nextjs- This guide assumes familiarity with the following concepts: -- [LangChain Expression Language](/docs/concepts#langchain-expression-language) -- [Chat models](/docs/concepts#chat-models) -- [Tool calling](/docs/concepts#functiontool-calling) -- [Agents](/docs/concepts#agents) +- [LangChain Expression Language](/docs/concepts/lcel) +- [Chat models](/docs/concepts/chat_models) +- [Tool calling](/docs/concepts/tool_calling) +- [Agents](/docs/concepts/agents) ::: diff --git a/docs/core_docs/docs/how_to/stream_tool_client.mdx b/docs/core_docs/docs/how_to/stream_tool_client.mdx index f4484e6f1f66..c9b83af470c1 100644 --- a/docs/core_docs/docs/how_to/stream_tool_client.mdx +++ b/docs/core_docs/docs/how_to/stream_tool_client.mdx @@ -8,9 +8,9 @@ and [here for the client file](https://github.com/langchain-ai/langchain-nextjs- This guide assumes familiarity with the following concepts: -> - [LangChain Expression Language](/docs/concepts#langchain-expression-language) -> - [Chat models](/docs/concepts#chat-models) -> - [Tool calling](/docs/concepts#functiontool-calling) +> - [LangChain Expression Language](/docs/concepts/lcel) +> - [Chat models](/docs/concepts/chat_models) +> - [Tool calling](/docs/concepts/tool_calling) ::: diff --git a/docs/core_docs/docs/how_to/streaming.ipynb b/docs/core_docs/docs/how_to/streaming.ipynb index f9817104e5ed..5a107df5d71e 100644 --- a/docs/core_docs/docs/how_to/streaming.ipynb +++ b/docs/core_docs/docs/how_to/streaming.ipynb @@ -1,2079 +1,2079 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to stream\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [LangChain Expression Language](/docs/concepts/#langchain-expression-language)\n", - "- [Output parsers](/docs/concepts/#output-parsers)\n", - "\n", - ":::\n", - "\n", - "Streaming is critical in making applications based on LLMs feel responsive to end-users.\n", - "\n", - "Important LangChain primitives like LLMs, parsers, prompts, retrievers, and agents implement the LangChain Runnable Interface.\n", - "\n", - "This interface provides two general approaches to stream content:\n", - "\n", - "- `.stream()`: a default implementation of streaming that streams the final output from the chain.\n", - "- `streamEvents()` and `streamLog()`: these provide a way to stream both intermediate steps and final output from the chain.\n", - "\n", - "Let’s take a look at both approaches!\n", - "\n", - ":::info\n", - "For a higher-level overview of streaming techniques in LangChain, see [this section of the conceptual guide](/docs/concepts/#streaming).\n", - ":::\n", - "\n", - "# Using Stream\n", - "\n", - "All `Runnable` objects implement a method called stream.\n", - "\n", - "These methods are designed to stream the final output in chunks, yielding each chunk as soon as it is available.\n", - "\n", - "Streaming is only possible if all steps in the program know how to process an **input stream**; i.e., process an input chunk one at a time, and yield a corresponding output chunk.\n", - "\n", - "The complexity of this processing can vary, from straightforward tasks like emitting tokens produced by an LLM, to more challenging ones like streaming parts of JSON results before the entire JSON is complete.\n", - "\n", - "The best place to start exploring streaming is with the single most important components in LLM apps – the models themselves!\n", - "\n", - "## LLMs and Chat Models\n", - "\n", - "Large language models can take several seconds to generate a complete response to a query. This is far slower than the **~200-300 ms** threshold at which an application feels responsive to an end user.\n", - "\n", - "The key strategy to make the application feel more responsive is to show intermediate progress; e.g., to stream the output from the model token by token." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import \"dotenv/config\";" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const model = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "|\n", - "Hello|\n", - "!|\n", - " I'm|\n", - " a|\n", - " large|\n", - " language|\n", - " model|\n", - " developed|\n", - " by|\n", - " Open|\n", - "AI|\n", - " called|\n", - " GPT|\n", - "-|\n", - "4|\n", - ",|\n", - " based|\n", - " on|\n", - " the|\n", - " Gener|\n", - "ative|\n", - " Pre|\n", - "-trained|\n", - " Transformer|\n", - " architecture|\n", - ".|\n", - " I'm|\n", - " designed|\n", - " to|\n", - " understand|\n", - " and|\n", - " generate|\n", - " human|\n", - "-like|\n", - " text|\n", - " based|\n", - " on|\n", - " the|\n", - " input|\n", - " I|\n", - " receive|\n", - ".|\n", - " My|\n", - " primary|\n", - " function|\n", - " is|\n", - " to|\n", - " assist|\n", - " with|\n", - " answering|\n", - " questions|\n", - ",|\n", - " providing|\n", - " information|\n", - ",|\n", - " and|\n", - " engaging|\n", - " in|\n", - " various|\n", - " types|\n", - " of|\n", - " conversations|\n", - ".|\n", - " While|\n", - " I|\n", - " don't|\n", - " have|\n", - " personal|\n", - " experiences|\n", - " or|\n", - " emotions|\n", - ",|\n", - " I'm|\n", - " trained|\n", - " on|\n", - " diverse|\n", - " datasets|\n", - " that|\n", - " enable|\n", - " me|\n", - " to|\n", - " provide|\n", - " useful|\n", - " and|\n", - " relevant|\n", - " information|\n", - " across|\n", - " a|\n", - " wide|\n", - " array|\n", - " of|\n", - " topics|\n", - ".|\n", - " How|\n", - " can|\n", - " I|\n", - " assist|\n", - " you|\n", - " today|\n", - "?|\n", - "|\n", - "|\n" - ] - } - ], - "source": [ - "const stream = await model.stream(\"Hello! Tell me about yourself.\");\n", - "const chunks = [];\n", - "for await (const chunk of stream) {\n", - " chunks.push(chunk);\n", - " console.log(`${chunk.content}|`)\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's have a look at one of the raw chunks:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to stream\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [LangChain Expression Language](/docs/concepts/lcel)\n", + "- [Output parsers](/docs/concepts/output_parsers)\n", + "\n", + ":::\n", + "\n", + "Streaming is critical in making applications based on LLMs feel responsive to end-users.\n", + "\n", + "Important LangChain primitives like LLMs, parsers, prompts, retrievers, and agents implement the LangChain Runnable Interface.\n", + "\n", + "This interface provides two general approaches to stream content:\n", + "\n", + "- `.stream()`: a default implementation of streaming that streams the final output from the chain.\n", + "- `streamEvents()` and `streamLog()`: these provide a way to stream both intermediate steps and final output from the chain.\n", + "\n", + "Let’s take a look at both approaches!\n", + "\n", + ":::info\n", + "For a higher-level overview of streaming techniques in LangChain, see [this section of the conceptual guide](/docs/concepts/streaming).\n", + ":::\n", + "\n", + "# Using Stream\n", + "\n", + "All `Runnable` objects implement a method called stream.\n", + "\n", + "These methods are designed to stream the final output in chunks, yielding each chunk as soon as it is available.\n", + "\n", + "Streaming is only possible if all steps in the program know how to process an **input stream**; i.e., process an input chunk one at a time, and yield a corresponding output chunk.\n", + "\n", + "The complexity of this processing can vary, from straightforward tasks like emitting tokens produced by an LLM, to more challenging ones like streaming parts of JSON results before the entire JSON is complete.\n", + "\n", + "The best place to start exploring streaming is with the single most important components in LLM apps – the models themselves!\n", + "\n", + "## LLMs and Chat Models\n", + "\n", + "Large language models can take several seconds to generate a complete response to a query. This is far slower than the **~200-300 ms** threshold at which an application feels responsive to an end user.\n", + "\n", + "The key strategy to make the application feel more responsive is to show intermediate progress; e.g., to stream the output from the model token by token." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: '',\n", - " tool_call_chunks: [],\n", - " additional_kwargs: {},\n", - " id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: '',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: { prompt: 0, completion: 0, finish_reason: null },\n", - " id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - "}\n" - ] - } - ], - "source": [ - "chunks[0]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We got back something called an `AIMessageChunk`. This chunk represents a part of an `AIMessage`.\n", - "\n", - "Message chunks are additive by design – one can simply add them up using the `.concat()` method to get the state of the response so far!" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import \"dotenv/config\";" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"Hello! I'm a\",\n", - " additional_kwargs: {},\n", - " response_metadata: { prompt: 0, completion: 0, finish_reason: null },\n", - " tool_call_chunks: [],\n", - " id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: \"Hello! I'm a\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: { prompt: 0, completion: 0, finish_reason: null },\n", - " id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - "}\n" - ] - } - ], - "source": [ - "let finalChunk = chunks[0];\n", - "\n", - "for (const chunk of chunks.slice(1, 5)) {\n", - " finalChunk = finalChunk.concat(chunk);\n", - "}\n", - "\n", - "finalChunk" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Chains\n", - "\n", - "Virtually all LLM applications involve more steps than just a call to a language model.\n", - "\n", - "Let’s build a simple chain using `LangChain Expression Language` (`LCEL`) that combines a prompt, model and a parser and verify that streaming works.\n", - "\n", - "We will use `StringOutputParser` to parse the output from the model. This is a simple parser that extracts the content field from an `AIMessageChunk`, giving us the `token` returned by the model.\n", - "\n", - ":::{.callout-tip}\n", - "LCEL is a declarative way to specify a “program” by chainining together different LangChain primitives. Chains created using LCEL benefit from an automatic implementation of stream, allowing streaming of the final output. In fact, chains created with LCEL implement the entire standard Runnable interface.\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "|\n", - "Sure|\n", - ",|\n", - " here's|\n", - " a|\n", - " joke|\n", - " for|\n", - " you|\n", - ":\n", - "\n", - "|\n", - "Why|\n", - " did|\n", - " the|\n", - " par|\n", - "rot|\n", - " sit|\n", - " on|\n", - " the|\n", - " stick|\n", - "?\n", - "\n", - "|\n", - "Because|\n", - " it|\n", - " wanted|\n", - " to|\n", - " be|\n", - " a|\n", - " \"|\n", - "pol|\n", - "ly|\n", - "-stick|\n", - "-al|\n", - "\"|\n", - " observer|\n", - "!|\n", - "|\n", - "|\n" - ] - } - ], - "source": [ - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(\"Tell me a joke about {topic}\");\n", - "\n", - "const parser = new StringOutputParser();\n", - "\n", - "const chain = prompt.pipe(model).pipe(parser);\n", - "\n", - "const stream = await chain.stream({\n", - " topic: \"parrot\",\n", - "});\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(`${chunk}|`)\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{.callout-note}\n", - "You do not have to use the `LangChain Expression Language` to use LangChain and can instead rely on a standard **imperative** programming approach by\n", - "caling `invoke`, `batch` or `stream` on each component individually, assigning the results to variables and then using them downstream as you see fit.\n", - "\n", - "If that works for your needs, then that's fine by us 👌!\n", - ":::\n", - "\n", - "### Working with Input Streams\n", - "\n", - "What if you wanted to stream JSON from the output as it was being generated?\n", - "\n", - "If you were to rely on `JSON.parse` to parse the partial json, the parsing would fail as the partial json wouldn't be valid json.\n", - "\n", - "You'd likely be at a complete loss of what to do and claim that it wasn't possible to stream JSON.\n", - "\n", - "Well, turns out there is a way to do it - the parser needs to operate on the **input stream**, and attempt to \"auto-complete\" the partial json into a valid state.\n", - "\n", - "Let's see such a parser in action to understand what this means." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const model = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " countries: [\n", - " { name: 'France', population: 67390000 },\n", - " { name: 'Spain', population: 47350000 },\n", - " { name: 'Japan', population: 125800000 }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "import { JsonOutputParser } from \"@langchain/core/output_parsers\"\n", - "\n", - "const chain = model.pipe(new JsonOutputParser());\n", - "const stream = await chain.stream(\n", - " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`\n", - ");\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(chunk);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, let's **break** streaming. We'll use the previous example and append an extraction function at the end that extracts the country names from the finalized JSON. Since this new last step is just a function call with no defined streaming behavior, the streaming output from previous steps is aggregated, then passed as a single input to the function.\n", - "\n", - ":::{.callout-warning}\n", - "Any steps in the chain that operate on **finalized inputs** rather than on **input streams** can break streaming functionality via `stream`.\n", - ":::\n", - "\n", - ":::{.callout-tip}\n", - "Later, we will discuss the `streamEvents` API which streams results from intermediate steps. This API will stream results from intermediate steps even if the chain contains steps that only operate on **finalized inputs**.\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "|\n", + "Hello|\n", + "!|\n", + " I'm|\n", + " a|\n", + " large|\n", + " language|\n", + " model|\n", + " developed|\n", + " by|\n", + " Open|\n", + "AI|\n", + " called|\n", + " GPT|\n", + "-|\n", + "4|\n", + ",|\n", + " based|\n", + " on|\n", + " the|\n", + " Gener|\n", + "ative|\n", + " Pre|\n", + "-trained|\n", + " Transformer|\n", + " architecture|\n", + ".|\n", + " I'm|\n", + " designed|\n", + " to|\n", + " understand|\n", + " and|\n", + " generate|\n", + " human|\n", + "-like|\n", + " text|\n", + " based|\n", + " on|\n", + " the|\n", + " input|\n", + " I|\n", + " receive|\n", + ".|\n", + " My|\n", + " primary|\n", + " function|\n", + " is|\n", + " to|\n", + " assist|\n", + " with|\n", + " answering|\n", + " questions|\n", + ",|\n", + " providing|\n", + " information|\n", + ",|\n", + " and|\n", + " engaging|\n", + " in|\n", + " various|\n", + " types|\n", + " of|\n", + " conversations|\n", + ".|\n", + " While|\n", + " I|\n", + " don't|\n", + " have|\n", + " personal|\n", + " experiences|\n", + " or|\n", + " emotions|\n", + ",|\n", + " I'm|\n", + " trained|\n", + " on|\n", + " diverse|\n", + " datasets|\n", + " that|\n", + " enable|\n", + " me|\n", + " to|\n", + " provide|\n", + " useful|\n", + " and|\n", + " relevant|\n", + " information|\n", + " across|\n", + " a|\n", + " wide|\n", + " array|\n", + " of|\n", + " topics|\n", + ".|\n", + " How|\n", + " can|\n", + " I|\n", + " assist|\n", + " you|\n", + " today|\n", + "?|\n", + "|\n", + "|\n" + ] + } + ], + "source": [ + "const stream = await model.stream(\"Hello! Tell me about yourself.\");\n", + "const chunks = [];\n", + "for await (const chunk of stream) {\n", + " chunks.push(chunk);\n", + " console.log(`${chunk.content}|`)\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\"France\",\"Spain\",\"Japan\"]\n" - ] - } - ], - "source": [ - "// A function that operates on finalized inputs\n", - "// rather than on an input_stream\n", - "\n", - "// A function that does not operates on input streams and breaks streaming.\n", - "const extractCountryNames = (inputs: Record) => {\n", - " if (!Array.isArray(inputs.countries)) {\n", - " return \"\";\n", - " }\n", - " return JSON.stringify(inputs.countries.map((country) => country.name));\n", - "}\n", - "\n", - "const chain = model.pipe(new JsonOutputParser()).pipe(extractCountryNames);\n", - "\n", - "const stream = await chain.stream(\n", - " `output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`\n", - ");\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(chunk);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Non-streaming components\n", - "\n", - "Like the above example, some built-in components like Retrievers do not offer any streaming. What happens if we try to `stream` them?" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's have a look at one of the raw chunks:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " [\n", - " Document {\n", - " pageContent: 'mitochondria is the powerhouse of the cell',\n", - " metadata: {},\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'buildings are made of brick',\n", - " metadata: {},\n", - " id: undefined\n", - " }\n", - " ]\n", - "]\n" - ] - } - ], - "source": [ - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const template = `Answer the question based only on the following context:\n", - "{context}\n", - "\n", - "Question: {question}\n", - "`;\n", - "const prompt = ChatPromptTemplate.fromTemplate(template);\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromTexts(\n", - " [\"mitochondria is the powerhouse of the cell\", \"buildings are made of brick\"],\n", - " [{}, {}],\n", - " new OpenAIEmbeddings(),\n", - ");\n", - "\n", - "const retriever = vectorstore.asRetriever();\n", - "\n", - "const chunks = [];\n", - "\n", - "for await (const chunk of await retriever.stream(\"What is the powerhouse of the cell?\")) {\n", - " chunks.push(chunk);\n", - "}\n", - "\n", - "console.log(chunks);\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Stream just yielded the final result from that component. \n", - "\n", - "This is OK! Not all components have to implement streaming -- in some cases streaming is either unnecessary, difficult or just doesn't make sense.\n", - "\n", - ":::{.callout-tip}\n", - "An LCEL chain constructed using some non-streaming components will still be able to stream in a lot of cases, with streaming of partial output starting after the last non-streaming step in the chain.\n", - ":::\n", - "\n", - "Here's an example of this:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: '',\n", + " tool_call_chunks: [],\n", + " additional_kwargs: {},\n", + " id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: '',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: { prompt: 0, completion: 0, finish_reason: null },\n", + " id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + "}\n" + ] + } + ], + "source": [ + "chunks[0]" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "|\n", - "M|\n", - "ito|\n", - "ch|\n", - "ond|\n", - "ria|\n", - " is|\n", - " the|\n", - " powerhouse|\n", - " of|\n", - " the|\n", - " cell|\n", - ".|\n", - "|\n", - "|\n" - ] - } - ], - "source": [ - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import type { Document } from \"@langchain/core/documents\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "const formatDocs = (docs: Document[]) => {\n", - " return docs.map((doc) => doc.pageContent).join(\"\\n-----\\n\")\n", - "}\n", - "\n", - "const retrievalChain = RunnableSequence.from([\n", - " {\n", - " context: retriever.pipe(formatDocs),\n", - " question: new RunnablePassthrough()\n", - " },\n", - " prompt,\n", - " model,\n", - " new StringOutputParser(),\n", - "]);\n", - "\n", - "const stream = await retrievalChain.stream(\"What is the powerhouse of the cell?\");\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(`${chunk}|`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we've seen how the `stream` method works, let's venture into the world of streaming events!\n", - "\n", - "## Using Stream Events\n", - "\n", - "Event Streaming is a **beta** API. This API may change a bit based on feedback.\n", - "\n", - ":::{.callout-note}\n", - "Introduced in @langchain/core **0.1.27**.\n", - ":::\n", - "\n", - "For the `streamEvents` method to work properly:\n", - "\n", - "* Any custom functions / runnables must propragate callbacks \n", - "* Set proper parameters on models to force the LLM to stream tokens.\n", - "* Let us know if anything doesn't work as expected!\n", - "\n", - "### Event Reference\n", - "\n", - "Below is a reference table that shows some events that might be emitted by the various Runnable objects.\n", - "\n", - ":::{.callout-note}\n", - "When streaming is implemented properly, the inputs to a runnable will not be known until after the input stream has been entirely consumed. This means that `inputs` will often be included only for `end` events and rather than for `start` events.\n", - ":::\n", - "\n", - "| event | name | chunk | input | output |\n", - "|----------------------|------------------|---------------------------------|-----------------------------------------------|-------------------------------------------------|\n", - "| on_llm_start | [model name] | | {'input': 'hello'} | |\n", - "| on_llm_stream | [model name] | 'Hello' `or` AIMessageChunk(content=\"hello\") | | |\n", - "| on_llm_end | [model name] | | 'Hello human!' | {\"generations\": [...], \"llmOutput\": None, ...} |\n", - "| on_chain_start | format_docs | | | |\n", - "| on_chain_stream | format_docs | \"hello world!, goodbye world!\" | | |\n", - "| on_chain_end | format_docs | | [Document(...)] | \"hello world!, goodbye world!\" |\n", - "| on_tool_start | some_tool | | {\"x\": 1, \"y\": \"2\"} | |\n", - "| on_tool_stream | some_tool | {\"x\": 1, \"y\": \"2\"} | | |\n", - "| on_tool_end | some_tool | | | {\"x\": 1, \"y\": \"2\"} |\n", - "| on_retriever_start | [retriever name] | | {\"query\": \"hello\"} | |\n", - "| on_retriever_chunk | [retriever name] | {documents: [...]} | | |\n", - "| on_retriever_end | [retriever name] | | {\"query\": \"hello\"} | {documents: [...]} |\n", - "| on_prompt_start | [template_name] | | {\"question\": \"hello\"} | |\n", - "| on_prompt_end | [template_name] | | {\"question\": \"hello\"} | ChatPromptValue(messages: [SystemMessage, ...]) |\n", - "\n", - "`streamEvents` will also emit dispatched custom events in `v2`. Please see [this guide](/docs/how_to/callbacks_custom_events/) for more.\n", - "\n", - "### Chat Model\n", - "\n", - "Let's start off by looking at the events produced by a chat model." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We got back something called an `AIMessageChunk`. This chunk represents a part of an `AIMessage`.\n", + "\n", + "Message chunks are additive by design – one can simply add them up using the `.concat()` method to get the state of the response so far!" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "25\n" - ] - } - ], - "source": [ - "const events = [];\n", - "\n", - "const eventStream = await model.streamEvents(\"hello\", { version: \"v2\" });\n", - "\n", - "for await (const event of eventStream) {\n", - " events.push(event);\n", - "}\n", - "\n", - "console.log(events.length)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{.callout-note}\n", - "\n", - "Hey what's that funny version=\"v2\" parameter in the API?! 😾\n", - "\n", - "This is a **beta API**, and we're almost certainly going to make some changes to it.\n", - "\n", - "This version parameter will allow us to minimize such breaking changes to your code. \n", - "\n", - "In short, we are annoying you now, so we don't have to annoy you later.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's take a look at the few of the start event and a few of the end events." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"Hello! I'm a\",\n", + " additional_kwargs: {},\n", + " response_metadata: { prompt: 0, completion: 0, finish_reason: null },\n", + " tool_call_chunks: [],\n", + " id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: \"Hello! I'm a\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: { prompt: 0, completion: 0, finish_reason: null },\n", + " id: 'chatcmpl-9lO8YUEcX7rqaxxevelHBtl1GaWoo',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + "}\n" + ] + } + ], + "source": [ + "let finalChunk = chunks[0];\n", + "\n", + "for (const chunk of chunks.slice(1, 5)) {\n", + " finalChunk = finalChunk.concat(chunk);\n", + "}\n", + "\n", + "finalChunk" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " event: 'on_chat_model_start',\n", - " data: { input: 'hello' },\n", - " name: 'ChatOpenAI',\n", - " tags: [],\n", - " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - " },\n", - " {\n", - " event: 'on_chat_model_stream',\n", - " data: { chunk: [AIMessageChunk] },\n", - " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", - " name: 'ChatOpenAI',\n", - " tags: [],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - " },\n", - " {\n", - " event: 'on_chat_model_stream',\n", - " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", - " name: 'ChatOpenAI',\n", - " tags: [],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " },\n", - " data: { chunk: [AIMessageChunk] }\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "events.slice(0, 3);" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chains\n", + "\n", + "Virtually all LLM applications involve more steps than just a call to a language model.\n", + "\n", + "Let’s build a simple chain using `LangChain Expression Language` (`LCEL`) that combines a prompt, model and a parser and verify that streaming works.\n", + "\n", + "We will use `StringOutputParser` to parse the output from the model. This is a simple parser that extracts the content field from an `AIMessageChunk`, giving us the `token` returned by the model.\n", + "\n", + ":::{.callout-tip}\n", + "LCEL is a declarative way to specify a “program” by chainining together different LangChain primitives. Chains created using LCEL benefit from an automatic implementation of stream, allowing streaming of the final output. In fact, chains created with LCEL implement the entire standard Runnable interface.\n", + ":::" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " event: 'on_chat_model_stream',\n", - " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", - " name: 'ChatOpenAI',\n", - " tags: [],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " },\n", - " data: { chunk: [AIMessageChunk] }\n", - " },\n", - " {\n", - " event: 'on_chat_model_end',\n", - " data: { output: [AIMessageChunk] },\n", - " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", - " name: 'ChatOpenAI',\n", - " tags: [],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "events.slice(-2);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Chain\n", - "\n", - "Let's revisit the example chain that parsed streaming JSON to explore the streaming events API." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "|\n", + "Sure|\n", + ",|\n", + " here's|\n", + " a|\n", + " joke|\n", + " for|\n", + " you|\n", + ":\n", + "\n", + "|\n", + "Why|\n", + " did|\n", + " the|\n", + " par|\n", + "rot|\n", + " sit|\n", + " on|\n", + " the|\n", + " stick|\n", + "?\n", + "\n", + "|\n", + "Because|\n", + " it|\n", + " wanted|\n", + " to|\n", + " be|\n", + " a|\n", + " \"|\n", + "pol|\n", + "ly|\n", + "-stick|\n", + "-al|\n", + "\"|\n", + " observer|\n", + "!|\n", + "|\n", + "|\n" + ] + } + ], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(\"Tell me a joke about {topic}\");\n", + "\n", + "const parser = new StringOutputParser();\n", + "\n", + "const chain = prompt.pipe(model).pipe(parser);\n", + "\n", + "const stream = await chain.stream({\n", + " topic: \"parrot\",\n", + "});\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(`${chunk}|`)\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "83\n" - ] - } - ], - "source": [ - "const chain = model.pipe(new JsonOutputParser());\n", - "const eventStream = await chain.streamEvents(\n", - " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", - " { version: \"v2\" },\n", - ");\n", - "\n", - "\n", - "const events = [];\n", - "for await (const event of eventStream) {\n", - " events.push(event);\n", - "}\n", - "\n", - "console.log(events.length)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you examine at the first few events, you'll notice that there are **3** different start events rather than **2** start events.\n", - "\n", - "The three start events correspond to:\n", - "\n", - "1. The chain (model + parser)\n", - "2. The model\n", - "3. The parser" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{.callout-note}\n", + "You do not have to use the `LangChain Expression Language` to use LangChain and can instead rely on a standard **imperative** programming approach by\n", + "caling `invoke`, `batch` or `stream` on each component individually, assigning the results to variables and then using them downstream as you see fit.\n", + "\n", + "If that works for your needs, then that's fine by us 👌!\n", + ":::\n", + "\n", + "### Working with Input Streams\n", + "\n", + "What if you wanted to stream JSON from the output as it was being generated?\n", + "\n", + "If you were to rely on `JSON.parse` to parse the partial json, the parsing would fail as the partial json wouldn't be valid json.\n", + "\n", + "You'd likely be at a complete loss of what to do and claim that it wasn't possible to stream JSON.\n", + "\n", + "Well, turns out there is a way to do it - the parser needs to operate on the **input stream**, and attempt to \"auto-complete\" the partial json into a valid state.\n", + "\n", + "Let's see such a parser in action to understand what this means." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " event: 'on_chain_start',\n", - " data: {\n", - " input: 'Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"'\n", - " },\n", - " name: 'RunnableSequence',\n", - " tags: [],\n", - " run_id: '5dd960b8-4341-4401-8993-7d04d49fcc08',\n", - " metadata: {}\n", - " },\n", - " {\n", - " event: 'on_chat_model_start',\n", - " data: { input: [Object] },\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1' ],\n", - " run_id: '5d2917b1-886a-47a1-807d-8a0ba4cb4f65',\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - " },\n", - " {\n", - " event: 'on_parser_start',\n", - " data: {},\n", - " name: 'JsonOutputParser',\n", - " tags: [ 'seq:step:2' ],\n", - " run_id: '756c57d6-d455-484f-a556-79a82c4e1d40',\n", - " metadata: {}\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "events.slice(0, 3);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "What do you think you'd see if you looked at the last 3 events? what about the middle?\n", - "\n", - "Let's use this API to take output the stream events from the model and the parser. We're ignoring start events, end events and events from the chain." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " countries: [\n", + " { name: 'France', population: 67390000 },\n", + " { name: 'Spain', population: 47350000 },\n", + " { name: 'Japan', population: 125800000 }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "import { JsonOutputParser } from \"@langchain/core/output_parsers\"\n", + "\n", + "const chain = model.pipe(new JsonOutputParser());\n", + "const stream = await chain.stream(\n", + " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`\n", + ");\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(chunk);\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Chat model chunk: \n", - "Chat model chunk: ```\n", - "Chat model chunk: json\n", - "Chat model chunk: \n", - "\n", - "Chat model chunk: {\n", - "\n", - "Chat model chunk: \n", - "Chat model chunk: \"\n", - "Chat model chunk: countries\n", - "Chat model chunk: \":\n", - "Chat model chunk: [\n", - "\n", - "Chat model chunk: \n", - "Chat model chunk: {\n", - "\n", - "Chat model chunk: \n", - "Chat model chunk: \"\n", - "Chat model chunk: name\n", - "Chat model chunk: \":\n", - "Chat model chunk: \"\n", - "Chat model chunk: France\n", - "Chat model chunk: \",\n", - "\n", - "Chat model chunk: \n", - "Chat model chunk: \"\n", - "Chat model chunk: population\n", - "Chat model chunk: \":\n", - "Chat model chunk: \n", - "Chat model chunk: 652\n", - "Chat model chunk: 735\n", - "Chat model chunk: 11\n", - "Chat model chunk: \n", - "\n" - ] - } - ], - "source": [ - "let eventCount = 0;\n", - "\n", - "const eventStream = await chain.streamEvents(\n", - " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", - " { version: \"v1\" },\n", - ");\n", - "\n", - "for await (const event of eventStream) {\n", - " // Truncate the output\n", - " if (eventCount > 30) {\n", - " continue;\n", - " }\n", - " const eventType = event.event;\n", - " if (eventType === \"on_llm_stream\") {\n", - " console.log(`Chat model chunk: ${event.data.chunk.message.content}`);\n", - " } else if (eventType === \"on_parser_stream\") {\n", - " console.log(`Parser chunk: ${JSON.stringify(event.data.chunk)}`);\n", - " }\n", - " eventCount += 1;\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Because both the model and the parser support streaming, we see streaming events from both components in real time! Neat! 🦜\n", - "\n", - "### Filtering Events\n", - "\n", - "Because this API produces so many events, it is useful to be able to filter on events.\n", - "\n", - "You can filter by either component `name`, component `tags` or component `type`.\n", - "\n", - "#### By Name\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's **break** streaming. We'll use the previous example and append an extraction function at the end that extracts the country names from the finalized JSON. Since this new last step is just a function call with no defined streaming behavior, the streaming output from previous steps is aggregated, then passed as a single input to the function.\n", + "\n", + ":::{.callout-warning}\n", + "Any steps in the chain that operate on **finalized inputs** rather than on **input streams** can break streaming functionality via `stream`.\n", + ":::\n", + "\n", + ":::{.callout-tip}\n", + "Later, we will discuss the `streamEvents` API which streams results from intermediate steps. This API will stream results from intermediate steps even if the chain contains steps that only operate on **finalized inputs**.\n", + ":::" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " event: 'on_parser_start',\n", - " data: {\n", - " input: 'Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"'\n", - " },\n", - " name: 'my_parser',\n", - " tags: [ 'seq:step:2' ],\n", - " run_id: '0a605976-a8f8-4259-8ef6-b3d7e52b3d4e',\n", - " metadata: {}\n", - "}\n", - "{\n", - " event: 'on_parser_stream',\n", - " run_id: '0a605976-a8f8-4259-8ef6-b3d7e52b3d4e',\n", - " name: 'my_parser',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {},\n", - " data: { chunk: { countries: [Array] } }\n", - "}\n", - "{\n", - " event: 'on_parser_end',\n", - " data: { output: { countries: [Array] } },\n", - " run_id: '0a605976-a8f8-4259-8ef6-b3d7e52b3d4e',\n", - " name: 'my_parser',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {}\n", - "}\n" - ] - } - ], - "source": [ - "const chain = model.withConfig({ runName: \"model\" })\n", - " .pipe(\n", - " new JsonOutputParser().withConfig({ runName: \"my_parser\" })\n", - " );\n", - "\n", - "\n", - "const eventStream = await chain.streamEvents(\n", - " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", - " { version: \"v2\" },\n", - " { includeNames: [\"my_parser\"] },\n", - ");\n", - "\n", - "let eventCount = 0;\n", - "\n", - "for await (const event of eventStream) {\n", - " // Truncate the output\n", - " if (eventCount > 10) {\n", - " continue;\n", - " }\n", - " console.log(event);\n", - " eventCount += 1;\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### By type" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\"France\",\"Spain\",\"Japan\"]\n" + ] + } + ], + "source": [ + "// A function that operates on finalized inputs\n", + "// rather than on an input_stream\n", + "\n", + "// A function that does not operates on input streams and breaks streaming.\n", + "const extractCountryNames = (inputs: Record) => {\n", + " if (!Array.isArray(inputs.countries)) {\n", + " return \"\";\n", + " }\n", + " return JSON.stringify(inputs.countries.map((country) => country.name));\n", + "}\n", + "\n", + "const chain = model.pipe(new JsonOutputParser()).pipe(extractCountryNames);\n", + "\n", + "const stream = await chain.stream(\n", + " `output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`\n", + ");\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(chunk);\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " event: 'on_chat_model_start',\n", - " data: {\n", - " input: 'Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"'\n", - " },\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: '',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: '```',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: 'json',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: '\\n',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: '{\\n',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' ',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' \"',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: 'countries',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: '\":',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' [\\n',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", - " name: 'model',\n", - " tags: [ 'seq:step:1' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const chain = model.withConfig({ runName: \"model\" })\n", - " .pipe(\n", - " new JsonOutputParser().withConfig({ runName: \"my_parser\" })\n", - " );\n", - "\n", - "\n", - "const eventStream = await chain.streamEvents(\n", - " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", - " { version: \"v2\" },\n", - " { includeTypes: [\"chat_model\"] },\n", - ");\n", - "\n", - "let eventCount = 0;\n", - "\n", - "for await (const event of eventStream) {\n", - " // Truncate the output\n", - " if (eventCount > 10) {\n", - " continue;\n", - " }\n", - " console.log(event);\n", - " eventCount += 1;\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### By Tags\n", - "\n", - ":::{.callout-caution}\n", - "\n", - "Tags are inherited by child components of a given runnable. \n", - "\n", - "If you're using tags to filter, make sure that this is what you want.\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Non-streaming components\n", + "\n", + "Like the above example, some built-in components like Retrievers do not offer any streaming. What happens if we try to `stream` them?" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " event: 'on_chain_start',\n", - " data: {\n", - " input: 'Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"'\n", - " },\n", - " name: 'RunnableSequence',\n", - " tags: [ 'my_chain' ],\n", - " run_id: '1fed60d6-e0b7-4d5e-8ec7-cd7d3ee5c69f',\n", - " metadata: {}\n", - "}\n", - "{\n", - " event: 'on_chat_model_start',\n", - " data: { input: { messages: [Array] } },\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_parser_start',\n", - " data: {},\n", - " name: 'my_parser',\n", - " tags: [ 'seq:step:2', 'my_chain' ],\n", - " run_id: 'caf24a1e-255c-4937-9f38-6e46275d854a',\n", - " metadata: {}\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: '',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: 'Certainly',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: '!',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: \" Here's\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' the',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' JSON',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' format',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' output',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", - " name: 'ChatOpenAI',\n", - " tags: [ 'seq:step:1', 'my_chain' ],\n", - " metadata: {\n", - " ls_provider: 'openai',\n", - " ls_model_name: 'gpt-4o',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 1,\n", - " ls_max_tokens: undefined,\n", - " ls_stop: undefined\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const chain = model\n", - " .pipe(new JsonOutputParser().withConfig({ runName: \"my_parser\" }))\n", - " .withConfig({ tags: [\"my_chain\"] });\n", - "\n", - "\n", - "const eventStream = await chain.streamEvents(\n", - " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", - " { version: \"v2\" },\n", - " { includeTags: [\"my_chain\"] },\n", - ");\n", - "\n", - "let eventCount = 0;\n", - "\n", - "for await (const event of eventStream) {\n", - " // Truncate the output\n", - " if (eventCount > 10) {\n", - " continue;\n", - " }\n", - " console.log(event);\n", - " eventCount += 1;\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Streaming events over HTTP\n", - "\n", - "For convenience, `streamEvents` supports encoding streamed intermediate events as HTTP [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events), encoded as bytes. Here's what that looks like (using a [`TextDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder) to reconvert the binary data back into a human readable string):" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " [\n", + " Document {\n", + " pageContent: 'mitochondria is the powerhouse of the cell',\n", + " metadata: {},\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'buildings are made of brick',\n", + " metadata: {},\n", + " id: undefined\n", + " }\n", + " ]\n", + "]\n" + ] + } + ], + "source": [ + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const template = `Answer the question based only on the following context:\n", + "{context}\n", + "\n", + "Question: {question}\n", + "`;\n", + "const prompt = ChatPromptTemplate.fromTemplate(template);\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromTexts(\n", + " [\"mitochondria is the powerhouse of the cell\", \"buildings are made of brick\"],\n", + " [{}, {}],\n", + " new OpenAIEmbeddings(),\n", + ");\n", + "\n", + "const retriever = vectorstore.asRetriever();\n", + "\n", + "const chunks = [];\n", + "\n", + "for await (const chunk of await retriever.stream(\"What is the powerhouse of the cell?\")) {\n", + " chunks.push(chunk);\n", + "}\n", + "\n", + "console.log(chunks);\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "event: data\n", - "data: {\"event\":\"on_chain_start\",\"data\":{\"input\":\"Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \\\"countries\\\" which contains a list of countries. Each country should have the key \\\"name\\\" and \\\"population\\\"\"},\"name\":\"RunnableSequence\",\"tags\":[\"my_chain\"],\"run_id\":\"41cd92f8-9b8c-4365-8aa0-fda3abdae03d\",\"metadata\":{}}\n", - "\n", - "\n", - "event: data\n", - "data: {\"event\":\"on_chat_model_start\",\"data\":{\"input\":{\"messages\":[[{\"lc\":1,\"type\":\"constructor\",\"id\":[\"langchain_core\",\"messages\",\"HumanMessage\"],\"kwargs\":{\"content\":\"Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \\\"countries\\\" which contains a list of countries. Each country should have the key \\\"name\\\" and \\\"population\\\"\",\"additional_kwargs\":{},\"response_metadata\":{}}}]]}},\"name\":\"ChatOpenAI\",\"tags\":[\"seq:step:1\",\"my_chain\"],\"run_id\":\"a6c2bc61-c868-4570-a143-164e64529ee0\",\"metadata\":{\"ls_provider\":\"openai\",\"ls_model_name\":\"gpt-4o\",\"ls_model_type\":\"chat\",\"ls_temperature\":1}}\n", - "\n", - "\n", - "event: data\n", - "data: {\"event\":\"on_parser_start\",\"data\":{},\"name\":\"my_parser\",\"tags\":[\"seq:step:2\",\"my_chain\"],\"run_id\":\"402533c5-0e4e-425d-a556-c30a350972d0\",\"metadata\":{}}\n", - "\n", - "\n", - "event: data\n", - "data: {\"event\":\"on_chat_model_stream\",\"data\":{\"chunk\":{\"lc\":1,\"type\":\"constructor\",\"id\":[\"langchain_core\",\"messages\",\"AIMessageChunk\"],\"kwargs\":{\"content\":\"\",\"tool_call_chunks\":[],\"additional_kwargs\":{},\"id\":\"chatcmpl-9lO9BAQwbKDy2Ou2RNFUVi0VunAsL\",\"tool_calls\":[],\"invalid_tool_calls\":[],\"response_metadata\":{\"prompt\":0,\"completion\":0,\"finish_reason\":null}}}},\"run_id\":\"a6c2bc61-c868-4570-a143-164e64529ee0\",\"name\":\"ChatOpenAI\",\"tags\":[\"seq:step:1\",\"my_chain\"],\"metadata\":{\"ls_provider\":\"openai\",\"ls_model_name\":\"gpt-4o\",\"ls_model_type\":\"chat\",\"ls_temperature\":1}}\n", - "\n", - "\n" - ] - } - ], - "source": [ - "const chain = model\n", - " .pipe(new JsonOutputParser().withConfig({ runName: \"my_parser\" }))\n", - " .withConfig({ tags: [\"my_chain\"] });\n", - "\n", - "\n", - "const eventStream = await chain.streamEvents(\n", - " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", - " {\n", - " version: \"v2\",\n", - " encoding: \"text/event-stream\",\n", - " },\n", - ");\n", - "\n", - "let eventCount = 0;\n", - "\n", - "const textDecoder = new TextDecoder();\n", - "\n", - "for await (const event of eventStream) {\n", - " // Truncate the output\n", - " if (eventCount > 3) {\n", - " continue;\n", - " }\n", - " console.log(textDecoder.decode(event));\n", - " eventCount += 1;\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A nice feature of this format is that you can pass the resulting stream directly into a native [HTTP response object](https://developer.mozilla.org/en-US/docs/Web/API/Response) with the correct headers (commonly used by frameworks like [Hono](https://hono.dev/) and [Next.js](https://nextjs.org/)), then parse that stream on the frontend. Your server-side handler would look something like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [], - "source": [ - "const handler = async () => {\n", - " const eventStream = await chain.streamEvents(\n", - " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", - " {\n", - " version: \"v2\",\n", - " encoding: \"text/event-stream\",\n", - " },\n", - " );\n", - " return new Response(eventStream, {\n", - " headers: {\n", - " \"content-type\": \"text/event-stream\",\n", - " }\n", - " });\n", - "};" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And your frontend could look like this (using the [`@microsoft/fetch-event-source`](https://www.npmjs.com/package/@microsoft/fetch-event-source) pacakge to fetch and parse the event source):" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "import { fetchEventSource } from \"@microsoft/fetch-event-source\";\n", - "\n", - "const makeChainRequest = async () => {\n", - " await fetchEventSource(\"https://your_url_here\", {\n", - " method: \"POST\",\n", - " body: JSON.stringify({\n", - " foo: 'bar'\n", - " }),\n", - " onmessage: (message) => {\n", - " if (message.event === \"data\") {\n", - " console.log(message.data);\n", - " }\n", - " },\n", - " onerror: (err) => {\n", - " console.log(err);\n", - " }\n", - " });\n", - "};" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Non-streaming components\n", - "\n", - "Remember how some components don't stream well because they don't operate on **input streams**?\n", - "\n", - "While such components can break streaming of the final output when using `stream`, `streamEvents` will still yield streaming events from intermediate steps that support streaming!" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Stream just yielded the final result from that component. \n", + "\n", + "This is OK! Not all components have to implement streaming -- in some cases streaming is either unnecessary, difficult or just doesn't make sense.\n", + "\n", + ":::{.callout-tip}\n", + "An LCEL chain constructed using some non-streaming components will still be able to stream in a lot of cases, with streaming of partial output starting after the last non-streaming step in the chain.\n", + ":::\n", + "\n", + "Here's an example of this:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "|\n", + "M|\n", + "ito|\n", + "ch|\n", + "ond|\n", + "ria|\n", + " is|\n", + " the|\n", + " powerhouse|\n", + " of|\n", + " the|\n", + " cell|\n", + ".|\n", + "|\n", + "|\n" + ] + } + ], + "source": [ + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import type { Document } from \"@langchain/core/documents\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "const formatDocs = (docs: Document[]) => {\n", + " return docs.map((doc) => doc.pageContent).join(\"\\n-----\\n\")\n", + "}\n", + "\n", + "const retrievalChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocs),\n", + " question: new RunnablePassthrough()\n", + " },\n", + " prompt,\n", + " model,\n", + " new StringOutputParser(),\n", + "]);\n", + "\n", + "const stream = await retrievalChain.stream(\"What is the powerhouse of the cell?\");\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(`${chunk}|`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we've seen how the `stream` method works, let's venture into the world of streaming events!\n", + "\n", + "## Using Stream Events\n", + "\n", + "Event Streaming is a **beta** API. This API may change a bit based on feedback.\n", + "\n", + ":::{.callout-note}\n", + "Introduced in @langchain/core **0.1.27**.\n", + ":::\n", + "\n", + "For the `streamEvents` method to work properly:\n", + "\n", + "* Any custom functions / runnables must propragate callbacks \n", + "* Set proper parameters on models to force the LLM to stream tokens.\n", + "* Let us know if anything doesn't work as expected!\n", + "\n", + "### Event Reference\n", + "\n", + "Below is a reference table that shows some events that might be emitted by the various Runnable objects.\n", + "\n", + ":::{.callout-note}\n", + "When streaming is implemented properly, the inputs to a runnable will not be known until after the input stream has been entirely consumed. This means that `inputs` will often be included only for `end` events and rather than for `start` events.\n", + ":::\n", + "\n", + "| event | name | chunk | input | output |\n", + "|----------------------|------------------|---------------------------------|-----------------------------------------------|-------------------------------------------------|\n", + "| on_llm_start | [model name] | | {'input': 'hello'} | |\n", + "| on_llm_stream | [model name] | 'Hello' `or` AIMessageChunk(content=\"hello\") | | |\n", + "| on_llm_end | [model name] | | 'Hello human!' | {\"generations\": [...], \"llmOutput\": None, ...} |\n", + "| on_chain_start | format_docs | | | |\n", + "| on_chain_stream | format_docs | \"hello world!, goodbye world!\" | | |\n", + "| on_chain_end | format_docs | | [Document(...)] | \"hello world!, goodbye world!\" |\n", + "| on_tool_start | some_tool | | {\"x\": 1, \"y\": \"2\"} | |\n", + "| on_tool_stream | some_tool | {\"x\": 1, \"y\": \"2\"} | | |\n", + "| on_tool_end | some_tool | | | {\"x\": 1, \"y\": \"2\"} |\n", + "| on_retriever_start | [retriever name] | | {\"query\": \"hello\"} | |\n", + "| on_retriever_chunk | [retriever name] | {documents: [...]} | | |\n", + "| on_retriever_end | [retriever name] | | {\"query\": \"hello\"} | {documents: [...]} |\n", + "| on_prompt_start | [template_name] | | {\"question\": \"hello\"} | |\n", + "| on_prompt_end | [template_name] | | {\"question\": \"hello\"} | ChatPromptValue(messages: [SystemMessage, ...]) |\n", + "\n", + "`streamEvents` will also emit dispatched custom events in `v2`. Please see [this guide](/docs/how_to/callbacks_custom_events/) for more.\n", + "\n", + "### Chat Model\n", + "\n", + "Let's start off by looking at the events produced by a chat model." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "25\n" + ] + } + ], + "source": [ + "const events = [];\n", + "\n", + "const eventStream = await model.streamEvents(\"hello\", { version: \"v2\" });\n", + "\n", + "for await (const event of eventStream) {\n", + " events.push(event);\n", + "}\n", + "\n", + "console.log(events.length)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{.callout-note}\n", + "\n", + "Hey what's that funny version=\"v2\" parameter in the API?! 😾\n", + "\n", + "This is a **beta API**, and we're almost certainly going to make some changes to it.\n", + "\n", + "This version parameter will allow us to minimize such breaking changes to your code. \n", + "\n", + "In short, we are annoying you now, so we don't have to annoy you later.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at the few of the start event and a few of the end events." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " event: 'on_chat_model_start',\n", + " data: { input: 'hello' },\n", + " name: 'ChatOpenAI',\n", + " tags: [],\n", + " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + " },\n", + " {\n", + " event: 'on_chat_model_stream',\n", + " data: { chunk: [AIMessageChunk] },\n", + " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", + " name: 'ChatOpenAI',\n", + " tags: [],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + " },\n", + " {\n", + " event: 'on_chat_model_stream',\n", + " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", + " name: 'ChatOpenAI',\n", + " tags: [],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " },\n", + " data: { chunk: [AIMessageChunk] }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "events.slice(0, 3);" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " event: 'on_chat_model_stream',\n", + " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", + " name: 'ChatOpenAI',\n", + " tags: [],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " },\n", + " data: { chunk: [AIMessageChunk] }\n", + " },\n", + " {\n", + " event: 'on_chat_model_end',\n", + " data: { output: [AIMessageChunk] },\n", + " run_id: 'c983e634-9f1d-4916-97d8-63c3a86102c2',\n", + " name: 'ChatOpenAI',\n", + " tags: [],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "events.slice(-2);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Chain\n", + "\n", + "Let's revisit the example chain that parsed streaming JSON to explore the streaming events API." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "83\n" + ] + } + ], + "source": [ + "const chain = model.pipe(new JsonOutputParser());\n", + "const eventStream = await chain.streamEvents(\n", + " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", + " { version: \"v2\" },\n", + ");\n", + "\n", + "\n", + "const events = [];\n", + "for await (const event of eventStream) {\n", + " events.push(event);\n", + "}\n", + "\n", + "console.log(events.length)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you examine at the first few events, you'll notice that there are **3** different start events rather than **2** start events.\n", + "\n", + "The three start events correspond to:\n", + "\n", + "1. The chain (model + parser)\n", + "2. The model\n", + "3. The parser" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\"France\",\"Spain\",\"Japan\"]\n" - ] + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " event: 'on_chain_start',\n", + " data: {\n", + " input: 'Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"'\n", + " },\n", + " name: 'RunnableSequence',\n", + " tags: [],\n", + " run_id: '5dd960b8-4341-4401-8993-7d04d49fcc08',\n", + " metadata: {}\n", + " },\n", + " {\n", + " event: 'on_chat_model_start',\n", + " data: { input: [Object] },\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1' ],\n", + " run_id: '5d2917b1-886a-47a1-807d-8a0ba4cb4f65',\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + " },\n", + " {\n", + " event: 'on_parser_start',\n", + " data: {},\n", + " name: 'JsonOutputParser',\n", + " tags: [ 'seq:step:2' ],\n", + " run_id: '756c57d6-d455-484f-a556-79a82c4e1d40',\n", + " metadata: {}\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "events.slice(0, 3);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "What do you think you'd see if you looked at the last 3 events? what about the middle?\n", + "\n", + "Let's use this API to take output the stream events from the model and the parser. We're ignoring start events, end events and events from the chain." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chat model chunk: \n", + "Chat model chunk: ```\n", + "Chat model chunk: json\n", + "Chat model chunk: \n", + "\n", + "Chat model chunk: {\n", + "\n", + "Chat model chunk: \n", + "Chat model chunk: \"\n", + "Chat model chunk: countries\n", + "Chat model chunk: \":\n", + "Chat model chunk: [\n", + "\n", + "Chat model chunk: \n", + "Chat model chunk: {\n", + "\n", + "Chat model chunk: \n", + "Chat model chunk: \"\n", + "Chat model chunk: name\n", + "Chat model chunk: \":\n", + "Chat model chunk: \"\n", + "Chat model chunk: France\n", + "Chat model chunk: \",\n", + "\n", + "Chat model chunk: \n", + "Chat model chunk: \"\n", + "Chat model chunk: population\n", + "Chat model chunk: \":\n", + "Chat model chunk: \n", + "Chat model chunk: 652\n", + "Chat model chunk: 735\n", + "Chat model chunk: 11\n", + "Chat model chunk: \n", + "\n" + ] + } + ], + "source": [ + "let eventCount = 0;\n", + "\n", + "const eventStream = await chain.streamEvents(\n", + " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", + " { version: \"v1\" },\n", + ");\n", + "\n", + "for await (const event of eventStream) {\n", + " // Truncate the output\n", + " if (eventCount > 30) {\n", + " continue;\n", + " }\n", + " const eventType = event.event;\n", + " if (eventType === \"on_llm_stream\") {\n", + " console.log(`Chat model chunk: ${event.data.chunk.message.content}`);\n", + " } else if (eventType === \"on_parser_stream\") {\n", + " console.log(`Parser chunk: ${JSON.stringify(event.data.chunk)}`);\n", + " }\n", + " eventCount += 1;\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Because both the model and the parser support streaming, we see streaming events from both components in real time! Neat! 🦜\n", + "\n", + "### Filtering Events\n", + "\n", + "Because this API produces so many events, it is useful to be able to filter on events.\n", + "\n", + "You can filter by either component `name`, component `tags` or component `type`.\n", + "\n", + "#### By Name\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " event: 'on_parser_start',\n", + " data: {\n", + " input: 'Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"'\n", + " },\n", + " name: 'my_parser',\n", + " tags: [ 'seq:step:2' ],\n", + " run_id: '0a605976-a8f8-4259-8ef6-b3d7e52b3d4e',\n", + " metadata: {}\n", + "}\n", + "{\n", + " event: 'on_parser_stream',\n", + " run_id: '0a605976-a8f8-4259-8ef6-b3d7e52b3d4e',\n", + " name: 'my_parser',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {},\n", + " data: { chunk: { countries: [Array] } }\n", + "}\n", + "{\n", + " event: 'on_parser_end',\n", + " data: { output: { countries: [Array] } },\n", + " run_id: '0a605976-a8f8-4259-8ef6-b3d7e52b3d4e',\n", + " name: 'my_parser',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {}\n", + "}\n" + ] + } + ], + "source": [ + "const chain = model.withConfig({ runName: \"model\" })\n", + " .pipe(\n", + " new JsonOutputParser().withConfig({ runName: \"my_parser\" })\n", + " );\n", + "\n", + "\n", + "const eventStream = await chain.streamEvents(\n", + " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", + " { version: \"v2\" },\n", + " { includeNames: [\"my_parser\"] },\n", + ");\n", + "\n", + "let eventCount = 0;\n", + "\n", + "for await (const event of eventStream) {\n", + " // Truncate the output\n", + " if (eventCount > 10) {\n", + " continue;\n", + " }\n", + " console.log(event);\n", + " eventCount += 1;\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### By type" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " event: 'on_chat_model_start',\n", + " data: {\n", + " input: 'Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"'\n", + " },\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: '',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: '```',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: 'json',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: '\\n',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: '{\\n',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' ',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' \"',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: 'countries',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: '\":',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' [\\n',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO98p55iuqUNwx4GZ6j2BkDak6Rr',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'fb6351eb-9537-445d-a1bd-24c2e11efd8e',\n", + " name: 'model',\n", + " tags: [ 'seq:step:1' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const chain = model.withConfig({ runName: \"model\" })\n", + " .pipe(\n", + " new JsonOutputParser().withConfig({ runName: \"my_parser\" })\n", + " );\n", + "\n", + "\n", + "const eventStream = await chain.streamEvents(\n", + " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", + " { version: \"v2\" },\n", + " { includeTypes: [\"chat_model\"] },\n", + ");\n", + "\n", + "let eventCount = 0;\n", + "\n", + "for await (const event of eventStream) {\n", + " // Truncate the output\n", + " if (eventCount > 10) {\n", + " continue;\n", + " }\n", + " console.log(event);\n", + " eventCount += 1;\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### By Tags\n", + "\n", + ":::{.callout-caution}\n", + "\n", + "Tags are inherited by child components of a given runnable. \n", + "\n", + "If you're using tags to filter, make sure that this is what you want.\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " event: 'on_chain_start',\n", + " data: {\n", + " input: 'Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"'\n", + " },\n", + " name: 'RunnableSequence',\n", + " tags: [ 'my_chain' ],\n", + " run_id: '1fed60d6-e0b7-4d5e-8ec7-cd7d3ee5c69f',\n", + " metadata: {}\n", + "}\n", + "{\n", + " event: 'on_chat_model_start',\n", + " data: { input: { messages: [Array] } },\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_parser_start',\n", + " data: {},\n", + " name: 'my_parser',\n", + " tags: [ 'seq:step:2', 'my_chain' ],\n", + " run_id: 'caf24a1e-255c-4937-9f38-6e46275d854a',\n", + " metadata: {}\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: '',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: 'Certainly',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: '!',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: \" Here's\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' the',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' JSON',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' format',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' output',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: [Object],\n", + " id: 'chatcmpl-9lO99nzUvCsZWCiq6vNtS1Soa1qNp',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: 'ecb99d6e-ce03-445f-aadf-73e6cbbc52fe',\n", + " name: 'ChatOpenAI',\n", + " tags: [ 'seq:step:1', 'my_chain' ],\n", + " metadata: {\n", + " ls_provider: 'openai',\n", + " ls_model_name: 'gpt-4o',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 1,\n", + " ls_max_tokens: undefined,\n", + " ls_stop: undefined\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const chain = model\n", + " .pipe(new JsonOutputParser().withConfig({ runName: \"my_parser\" }))\n", + " .withConfig({ tags: [\"my_chain\"] });\n", + "\n", + "\n", + "const eventStream = await chain.streamEvents(\n", + " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", + " { version: \"v2\" },\n", + " { includeTags: [\"my_chain\"] },\n", + ");\n", + "\n", + "let eventCount = 0;\n", + "\n", + "for await (const event of eventStream) {\n", + " // Truncate the output\n", + " if (eventCount > 10) {\n", + " continue;\n", + " }\n", + " console.log(event);\n", + " eventCount += 1;\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Streaming events over HTTP\n", + "\n", + "For convenience, `streamEvents` supports encoding streamed intermediate events as HTTP [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events), encoded as bytes. Here's what that looks like (using a [`TextDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder) to reconvert the binary data back into a human readable string):" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "event: data\n", + "data: {\"event\":\"on_chain_start\",\"data\":{\"input\":\"Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \\\"countries\\\" which contains a list of countries. Each country should have the key \\\"name\\\" and \\\"population\\\"\"},\"name\":\"RunnableSequence\",\"tags\":[\"my_chain\"],\"run_id\":\"41cd92f8-9b8c-4365-8aa0-fda3abdae03d\",\"metadata\":{}}\n", + "\n", + "\n", + "event: data\n", + "data: {\"event\":\"on_chat_model_start\",\"data\":{\"input\":{\"messages\":[[{\"lc\":1,\"type\":\"constructor\",\"id\":[\"langchain_core\",\"messages\",\"HumanMessage\"],\"kwargs\":{\"content\":\"Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \\\"countries\\\" which contains a list of countries. Each country should have the key \\\"name\\\" and \\\"population\\\"\",\"additional_kwargs\":{},\"response_metadata\":{}}}]]}},\"name\":\"ChatOpenAI\",\"tags\":[\"seq:step:1\",\"my_chain\"],\"run_id\":\"a6c2bc61-c868-4570-a143-164e64529ee0\",\"metadata\":{\"ls_provider\":\"openai\",\"ls_model_name\":\"gpt-4o\",\"ls_model_type\":\"chat\",\"ls_temperature\":1}}\n", + "\n", + "\n", + "event: data\n", + "data: {\"event\":\"on_parser_start\",\"data\":{},\"name\":\"my_parser\",\"tags\":[\"seq:step:2\",\"my_chain\"],\"run_id\":\"402533c5-0e4e-425d-a556-c30a350972d0\",\"metadata\":{}}\n", + "\n", + "\n", + "event: data\n", + "data: {\"event\":\"on_chat_model_stream\",\"data\":{\"chunk\":{\"lc\":1,\"type\":\"constructor\",\"id\":[\"langchain_core\",\"messages\",\"AIMessageChunk\"],\"kwargs\":{\"content\":\"\",\"tool_call_chunks\":[],\"additional_kwargs\":{},\"id\":\"chatcmpl-9lO9BAQwbKDy2Ou2RNFUVi0VunAsL\",\"tool_calls\":[],\"invalid_tool_calls\":[],\"response_metadata\":{\"prompt\":0,\"completion\":0,\"finish_reason\":null}}}},\"run_id\":\"a6c2bc61-c868-4570-a143-164e64529ee0\",\"name\":\"ChatOpenAI\",\"tags\":[\"seq:step:1\",\"my_chain\"],\"metadata\":{\"ls_provider\":\"openai\",\"ls_model_name\":\"gpt-4o\",\"ls_model_type\":\"chat\",\"ls_temperature\":1}}\n", + "\n", + "\n" + ] + } + ], + "source": [ + "const chain = model\n", + " .pipe(new JsonOutputParser().withConfig({ runName: \"my_parser\" }))\n", + " .withConfig({ tags: [\"my_chain\"] });\n", + "\n", + "\n", + "const eventStream = await chain.streamEvents(\n", + " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", + " {\n", + " version: \"v2\",\n", + " encoding: \"text/event-stream\",\n", + " },\n", + ");\n", + "\n", + "let eventCount = 0;\n", + "\n", + "const textDecoder = new TextDecoder();\n", + "\n", + "for await (const event of eventStream) {\n", + " // Truncate the output\n", + " if (eventCount > 3) {\n", + " continue;\n", + " }\n", + " console.log(textDecoder.decode(event));\n", + " eventCount += 1;\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A nice feature of this format is that you can pass the resulting stream directly into a native [HTTP response object](https://developer.mozilla.org/en-US/docs/Web/API/Response) with the correct headers (commonly used by frameworks like [Hono](https://hono.dev/) and [Next.js](https://nextjs.org/)), then parse that stream on the frontend. Your server-side handler would look something like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "const handler = async () => {\n", + " const eventStream = await chain.streamEvents(\n", + " `Output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`,\n", + " {\n", + " version: \"v2\",\n", + " encoding: \"text/event-stream\",\n", + " },\n", + " );\n", + " return new Response(eventStream, {\n", + " headers: {\n", + " \"content-type\": \"text/event-stream\",\n", + " }\n", + " });\n", + "};" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And your frontend could look like this (using the [`@microsoft/fetch-event-source`](https://www.npmjs.com/package/@microsoft/fetch-event-source) pacakge to fetch and parse the event source):" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "import { fetchEventSource } from \"@microsoft/fetch-event-source\";\n", + "\n", + "const makeChainRequest = async () => {\n", + " await fetchEventSource(\"https://your_url_here\", {\n", + " method: \"POST\",\n", + " body: JSON.stringify({\n", + " foo: 'bar'\n", + " }),\n", + " onmessage: (message) => {\n", + " if (message.event === \"data\") {\n", + " console.log(message.data);\n", + " }\n", + " },\n", + " onerror: (err) => {\n", + " console.log(err);\n", + " }\n", + " });\n", + "};" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Non-streaming components\n", + "\n", + "Remember how some components don't stream well because they don't operate on **input streams**?\n", + "\n", + "While such components can break streaming of the final output when using `stream`, `streamEvents` will still yield streaming events from intermediate steps that support streaming!" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\"France\",\"Spain\",\"Japan\"]\n" + ] + } + ], + "source": [ + "// A function that operates on finalized inputs\n", + "// rather than on an input_stream\n", + "import { JsonOutputParser } from \"@langchain/core/output_parsers\"\n", + "import { RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "\n", + "// A function that does not operates on input streams and breaks streaming.\n", + "const extractCountryNames = (inputs: Record) => {\n", + " if (!Array.isArray(inputs.countries)) {\n", + " return \"\";\n", + " }\n", + " return JSON.stringify(inputs.countries.map((country) => country.name));\n", + "}\n", + "\n", + "const chain = model.pipe(new JsonOutputParser()).pipe(extractCountryNames);\n", + "\n", + "const stream = await chain.stream(\n", + " `output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`\n", + ");\n", + "\n", + "for await (const chunk of stream) {\n", + " console.log(chunk);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As expected, the `stream` API doesn't work correctly because `extractCountryNames` doesn't operate on streams.\n", + "\n", + "Now, let's confirm that with `streamEvents` we're still seeing streaming output from the model and the parser." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "const eventStream = await chain.streamEvents(\n", + " `output a list of the countries france, spain and japan and their populations in JSON format.\n", + "Use a dict with an outer key of \"countries\" which contains a list of countries.\n", + "Each country should have the key \"name\" and \"population\"\n", + "Your output should ONLY contain valid JSON data. Do not include any other text or content in your output.`,\n", + " { version: \"v2\" },\n", + ");\n", + "\n", + "let eventCount = 0;\n", + "\n", + "for await (const event of eventStream) {\n", + " // Truncate the output\n", + " if (eventCount > 30) {\n", + " continue;\n", + " }\n", + " const eventType = event.event;\n", + " if (eventType === \"on_chat_model_stream\") {\n", + " console.log(`Chat model chunk: ${event.data.chunk.message.content}`);\n", + " } else if (eventType === \"on_parser_stream\") {\n", + " console.log(`Parser chunk: ${JSON.stringify(event.data.chunk)}`);\n", + " } else {\n", + " console.log(eventType)\n", + " }\n", + " eventCount += 1;\n", + "}" + ] + }, + { + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "Chat model chunk:\n", + "Chat model chunk: Here's\n", + "Chat model chunk: how\n", + "Chat model chunk: you\n", + "Chat model chunk: can\n", + "Chat model chunk: represent\n", + "Chat model chunk: the\n", + "Chat model chunk: countries\n", + "Chat model chunk: France\n", + "Chat model chunk: ,\n", + "Chat model chunk: Spain\n", + "Chat model chunk: ,\n", + "Chat model chunk: and\n", + "Chat model chunk: Japan\n", + "Chat model chunk: ,\n", + "Chat model chunk: along\n", + "Chat model chunk: with\n", + "Chat model chunk: their\n", + "Chat model chunk: populations\n", + "Chat model chunk: ,\n", + "Chat model chunk: in\n", + "Chat model chunk: JSON\n", + "Chat model chunk: format\n", + "Chat model chunk: :\n", + "\n", + "\n", + "Chat model chunk: ```\n", + "Chat model chunk: json\n", + "Chat model chunk:\n", + "\n", + "Chat model chunk: {" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "- [Dispatching custom events](/docs/how_to/callbacks_custom_events)" + ] } - ], - "source": [ - "// A function that operates on finalized inputs\n", - "// rather than on an input_stream\n", - "import { JsonOutputParser } from \"@langchain/core/output_parsers\"\n", - "import { RunnablePassthrough } from \"@langchain/core/runnables\";\n", - "\n", - "// A function that does not operates on input streams and breaks streaming.\n", - "const extractCountryNames = (inputs: Record) => {\n", - " if (!Array.isArray(inputs.countries)) {\n", - " return \"\";\n", - " }\n", - " return JSON.stringify(inputs.countries.map((country) => country.name));\n", - "}\n", - "\n", - "const chain = model.pipe(new JsonOutputParser()).pipe(extractCountryNames);\n", - "\n", - "const stream = await chain.stream(\n", - " `output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key \"name\" and \"population\"`\n", - ");\n", - "\n", - "for await (const chunk of stream) {\n", - " console.log(chunk);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As expected, the `stream` API doesn't work correctly because `extractCountryNames` doesn't operate on streams.\n", - "\n", - "Now, let's confirm that with `streamEvents` we're still seeing streaming output from the model and the parser." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "const eventStream = await chain.streamEvents(\n", - " `output a list of the countries france, spain and japan and their populations in JSON format.\n", - "Use a dict with an outer key of \"countries\" which contains a list of countries.\n", - "Each country should have the key \"name\" and \"population\"\n", - "Your output should ONLY contain valid JSON data. Do not include any other text or content in your output.`,\n", - " { version: \"v2\" },\n", - ");\n", - "\n", - "let eventCount = 0;\n", - "\n", - "for await (const event of eventStream) {\n", - " // Truncate the output\n", - " if (eventCount > 30) {\n", - " continue;\n", - " }\n", - " const eventType = event.event;\n", - " if (eventType === \"on_chat_model_stream\") {\n", - " console.log(`Chat model chunk: ${event.data.chunk.message.content}`);\n", - " } else if (eventType === \"on_parser_stream\") {\n", - " console.log(`Parser chunk: ${JSON.stringify(event.data.chunk)}`);\n", - " } else {\n", - " console.log(eventType)\n", - " }\n", - " eventCount += 1;\n", - "}" - ] - }, - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - }, - "source": [ - "Chat model chunk:\n", - "Chat model chunk: Here's\n", - "Chat model chunk: how\n", - "Chat model chunk: you\n", - "Chat model chunk: can\n", - "Chat model chunk: represent\n", - "Chat model chunk: the\n", - "Chat model chunk: countries\n", - "Chat model chunk: France\n", - "Chat model chunk: ,\n", - "Chat model chunk: Spain\n", - "Chat model chunk: ,\n", - "Chat model chunk: and\n", - "Chat model chunk: Japan\n", - "Chat model chunk: ,\n", - "Chat model chunk: along\n", - "Chat model chunk: with\n", - "Chat model chunk: their\n", - "Chat model chunk: populations\n", - "Chat model chunk: ,\n", - "Chat model chunk: in\n", - "Chat model chunk: JSON\n", - "Chat model chunk: format\n", - "Chat model chunk: :\n", - "\n", - "\n", - "Chat model chunk: ```\n", - "Chat model chunk: json\n", - "Chat model chunk:\n", - "\n", - "Chat model chunk: {" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "- [Dispatching custom events](/docs/how_to/callbacks_custom_events)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/structured_output.ipynb b/docs/core_docs/docs/how_to/structured_output.ipynb index b9a538adf53e..8adb09da1750 100644 --- a/docs/core_docs/docs/how_to/structured_output.ipynb +++ b/docs/core_docs/docs/how_to/structured_output.ipynb @@ -1,620 +1,620 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "27598444", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 3\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "6e3f0f72", - "metadata": {}, - "source": [ - "# How to return structured data from a model\n", - "```{=mdx}\n", - "\n", - "```\n", - "\n", - "It is often useful to have a model return output that matches some specific schema. One common use-case is extracting data from arbitrary text to insert into a traditional database or use with some other downstrem system. This guide will show you a few different strategies you can use to do this.\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "\n", - ":::\n", - "\n", - "## The `.withStructuredOutput()` method\n", - "\n", - "There are several strategies that models can use under the hood. For some of the most popular model providers, including [Anthropic](/docs/integrations/platforms/anthropic/), [Google VertexAI](/docs/integrations/platforms/google/), [Mistral](/docs/integrations/chat/mistral/), and [OpenAI](/docs/integrations/platforms/openai/) LangChain implements a common interface that abstracts away these strategies called `.withStructuredOutput`.\n", - "\n", - "By invoking this method (and passing in [JSON schema](https://json-schema.org/) or a [Zod schema](https://zod.dev/)) the model will add whatever model parameters + output parsers are necessary to get back structured output matching the requested schema. If the model supports more than one way to do this (e.g., function calling vs JSON mode) - you can configure which method to use by passing into that method.\n", - "\n", - "Let's look at some examples of this in action! We'll use Zod to create a simple response schema.\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "070bf702", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "{\n", - " setup: \u001b[32m\"Why don't cats play poker in the wild?\"\u001b[39m,\n", - " punchline: \u001b[32m\"Too many cheetahs.\"\u001b[39m,\n", - " rating: \u001b[33m7\u001b[39m\n", - "}" + "cell_type": "raw", + "id": "27598444", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 3\n", + "---" ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { z } from \"zod\";\n", - "\n", - "const joke = z.object({\n", - " setup: z.string().describe(\"The setup of the joke\"),\n", - " punchline: z.string().describe(\"The punchline to the joke\"),\n", - " rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\"),\n", - "});\n", - "\n", - "const structuredLlm = model.withStructuredOutput(joke);\n", - "\n", - "await structuredLlm.invoke(\"Tell me a joke about cats\")" - ] - }, - { - "cell_type": "markdown", - "id": "fe6efeab", - "metadata": {}, - "source": [ - "One key point is that though we set our Zod schema as a variable named `joke`, Zod is not able to access that variable name, and therefore cannot pass it to the model. Though it is not required, we can pass a name for our schema in order to give the model additional context as to what our schema represents, improving performance:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "f3d01a1d", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "{\n", - " setup: \u001b[32m\"Why don't cats play poker in the wild?\"\u001b[39m,\n", - " punchline: \u001b[32m\"Too many cheetahs!\"\u001b[39m,\n", - " rating: \u001b[33m7\u001b[39m\n", - "}" + "cell_type": "markdown", + "id": "6e3f0f72", + "metadata": {}, + "source": [ + "# How to return structured data from a model\n", + "```{=mdx}\n", + "\n", + "```\n", + "\n", + "It is often useful to have a model return output that matches some specific schema. One common use-case is extracting data from arbitrary text to insert into a traditional database or use with some other downstrem system. This guide will show you a few different strategies you can use to do this.\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "\n", + ":::\n", + "\n", + "## The `.withStructuredOutput()` method\n", + "\n", + "There are several strategies that models can use under the hood. For some of the most popular model providers, including [Anthropic](/docs/integrations/platforms/anthropic/), [Google VertexAI](/docs/integrations/platforms/google/), [Mistral](/docs/integrations/chat/mistral/), and [OpenAI](/docs/integrations/platforms/openai/) LangChain implements a common interface that abstracts away these strategies called `.withStructuredOutput`.\n", + "\n", + "By invoking this method (and passing in [JSON schema](https://json-schema.org/) or a [Zod schema](https://zod.dev/)) the model will add whatever model parameters + output parsers are necessary to get back structured output matching the requested schema. If the model supports more than one way to do this (e.g., function calling vs JSON mode) - you can configure which method to use by passing into that method.\n", + "\n", + "Let's look at some examples of this in action! We'll use Zod to create a simple response schema.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const structuredLlm = model.withStructuredOutput(joke, { name: \"joke\" });\n", - "\n", - "await structuredLlm.invoke(\"Tell me a joke about cats\")" - ] - }, - { - "cell_type": "markdown", - "id": "deddb6d3", - "metadata": {}, - "source": [ - "The result is a JSON object.\n", - "\n", - "We can also pass in an OpenAI-style JSON schema dict if you prefer not to use Zod. This object should contain three properties:\n", - "\n", - "- `name`: The name of the schema to output.\n", - "- `description`: A high level description of the schema to output.\n", - "- `parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict.\n", - "\n", - "In this case, the response is also a dict:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "6700994a", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "{\n", - " setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n", - " punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m\n", - "}" + "cell_type": "code", + "execution_count": 1, + "id": "070bf702", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " setup: \u001b[32m\"Why don't cats play poker in the wild?\"\u001b[39m,\n", + " punchline: \u001b[32m\"Too many cheetahs.\"\u001b[39m,\n", + " rating: \u001b[33m7\u001b[39m\n", + "}" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { z } from \"zod\";\n", + "\n", + "const joke = z.object({\n", + " setup: z.string().describe(\"The setup of the joke\"),\n", + " punchline: z.string().describe(\"The punchline to the joke\"),\n", + " rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\"),\n", + "});\n", + "\n", + "const structuredLlm = model.withStructuredOutput(joke);\n", + "\n", + "await structuredLlm.invoke(\"Tell me a joke about cats\")" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const structuredLlm = model.withStructuredOutput(\n", - " {\n", - " \"name\": \"joke\",\n", - " \"description\": \"Joke to tell user.\",\n", - " \"parameters\": {\n", - " \"title\": \"Joke\",\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"setup\": {\"type\": \"string\", \"description\": \"The setup for the joke\"},\n", - " \"punchline\": {\"type\": \"string\", \"description\": \"The joke's punchline\"},\n", - " },\n", - " \"required\": [\"setup\", \"punchline\"],\n", - " },\n", - " }\n", - ")\n", - "\n", - "await structuredLlm.invoke(\"Tell me a joke about cats\", { name: \"joke\" })" - ] - }, - { - "cell_type": "markdown", - "id": "e28c14d3", - "metadata": {}, - "source": [ - "If you are using JSON Schema, you can take advantage of other more complex schema descriptions to create a similar effect.\n", - "\n", - "You can also use tool calling directly to allow the model to choose between options, if your chosen model supports it. This involves a bit more parsing and setup. See [this how-to guide](/docs/how_to/tool_calling/) for more details." - ] - }, - { - "cell_type": "markdown", - "id": "39d7a555", - "metadata": {}, - "source": [ - "### Specifying the output method (Advanced)\n", - "\n", - "For models that support more than one means of outputting data, you can specify the preferred one like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "df0370e3", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "{\n", - " setup: \u001b[32m\"Why don't cats play poker in the jungle?\"\u001b[39m,\n", - " punchline: \u001b[32m\"Too many cheetahs!\"\u001b[39m\n", - "}" + "cell_type": "markdown", + "id": "fe6efeab", + "metadata": {}, + "source": [ + "One key point is that though we set our Zod schema as a variable named `joke`, Zod is not able to access that variable name, and therefore cannot pass it to the model. Though it is not required, we can pass a name for our schema in order to give the model additional context as to what our schema represents, improving performance:" ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const structuredLlm = model.withStructuredOutput(joke, {\n", - " method: \"json_mode\",\n", - " name: \"joke\",\n", - "})\n", - "\n", - "await structuredLlm.invoke(\n", - " \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "56278a82", - "metadata": {}, - "source": [ - "In the above example, we use OpenAI's alternate JSON mode capability along with a more specific prompt.\n", - "\n", - "For specifics about the model you choose, peruse its entry in the [API reference pages](https://api.js.langchain.com/).\n", - "\n", - "### (Advanced) Raw outputs\n", - "\n", - "LLMs aren't perfect at generating structured output, especially as schemas become complex. You can avoid raising exceptions and handle the raw output yourself by passing `includeRaw: true`. This changes the output format to contain the raw message output and the `parsed` value (if successful):" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "46b616a4", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "{\n", - " raw: AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"\"\u001b[39m,\n", - " tool_calls: [\n", - " {\n", - " name: \u001b[32m\"joke\"\u001b[39m,\n", - " args: \u001b[36m[Object]\u001b[39m,\n", - " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m\n", - " }\n", - " ],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: [ \u001b[36m[Object]\u001b[39m ] },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {\n", - " function_call: \u001b[90mundefined\u001b[39m,\n", - " tool_calls: [\n", - " {\n", - " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m,\n", - " type: \u001b[32m\"function\"\u001b[39m,\n", - " function: \u001b[36m[Object]\u001b[39m\n", - " }\n", - " ]\n", - " },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m33\u001b[39m, promptTokens: \u001b[33m88\u001b[39m, totalTokens: \u001b[33m121\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [\n", - " {\n", - " name: \u001b[32m\"joke\"\u001b[39m,\n", - " args: {\n", - " setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n", - " punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m,\n", - " rating: \u001b[33m7\u001b[39m\n", - " },\n", - " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m\n", - " }\n", - " ],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: { input_tokens: \u001b[33m88\u001b[39m, output_tokens: \u001b[33m33\u001b[39m, total_tokens: \u001b[33m121\u001b[39m }\n", - " },\n", - " parsed: {\n", - " setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n", - " punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m,\n", - " rating: \u001b[33m7\u001b[39m\n", - " }\n", - "}" + "cell_type": "code", + "execution_count": 2, + "id": "f3d01a1d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " setup: \u001b[32m\"Why don't cats play poker in the wild?\"\u001b[39m,\n", + " punchline: \u001b[32m\"Too many cheetahs!\"\u001b[39m,\n", + " rating: \u001b[33m7\u001b[39m\n", + "}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const structuredLlm = model.withStructuredOutput(joke, { name: \"joke\" });\n", + "\n", + "await structuredLlm.invoke(\"Tell me a joke about cats\")" ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const joke = z.object({\n", - " setup: z.string().describe(\"The setup of the joke\"),\n", - " punchline: z.string().describe(\"The punchline to the joke\"),\n", - " rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\"),\n", - "});\n", - "\n", - "const structuredLlm = model.withStructuredOutput(joke, { includeRaw: true, name: \"joke\" });\n", - "\n", - "await structuredLlm.invoke(\"Tell me a joke about cats\");" - ] - }, - { - "cell_type": "markdown", - "id": "5e92a98a", - "metadata": {}, - "source": [ - "## Prompting techniques\n", - "\n", - "You can also prompt models to outputting information in a given format. This approach relies on designing good prompts and then parsing the output of the models. This is the only option for models that don't support `.with_structured_output()` or other built-in approaches.\n", - "\n", - "### Using `JsonOutputParser`\n", - "\n", - "The following example uses the built-in [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) to parse the output of a chat model prompted to match a the given JSON schema. Note that we are adding `format_instructions` directly to the prompt from a method on the parser:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "6e514455", - "metadata": {}, - "outputs": [], - "source": [ - "import { JsonOutputParser } from \"@langchain/core/output_parsers\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "type Person = {\n", - " name: string;\n", - " height_in_meters: number;\n", - "};\n", - "\n", - "type People = {\n", - " people: Person[];\n", - "};\n", - "\n", - "const formatInstructions = `Respond only in valid JSON. The JSON object you return should match the following schema:\n", - "{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n", - "\n", - "Where people is an array of objects, each with a name and height_in_meters field.\n", - "`\n", - "\n", - "// Set up a parser\n", - "const parser = new JsonOutputParser();\n", - "\n", - "// Prompt\n", - "const prompt = await ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"Answer the user query. Wrap the output in `json` tags\\n{format_instructions}\",\n", - " ],\n", - " [\n", - " \"human\",\n", - " \"{query}\",\n", - " ]\n", - " ]\n", - ").partial({\n", - " format_instructions: formatInstructions,\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "082fa166", - "metadata": {}, - "source": [ - "Let’s take a look at what information is sent to the model:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "3d73d33d", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "System: Answer the user query. Wrap the output in `json` tags\n", - "Respond only in valid JSON. The JSON object you return should match the following schema:\n", - "{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n", - "\n", - "Where people is an array of objects, each with a name and height_in_meters field.\n", - "\n", - "Human: Anna is 23 years old and she is 6 feet tall\n" - ] - } - ], - "source": [ - "const query = \"Anna is 23 years old and she is 6 feet tall\"\n", - "\n", - "console.log((await prompt.format({ query })).toString())" - ] - }, - { - "cell_type": "markdown", - "id": "081956b9", - "metadata": {}, - "source": [ - "And now let's invoke it:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "8d6b3d17", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "deddb6d3", + "metadata": {}, + "source": [ + "The result is a JSON object.\n", + "\n", + "We can also pass in an OpenAI-style JSON schema dict if you prefer not to use Zod. This object should contain three properties:\n", + "\n", + "- `name`: The name of the schema to output.\n", + "- `description`: A high level description of the schema to output.\n", + "- `parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict.\n", + "\n", + "In this case, the response is also a dict:" + ] + }, { - "data": { - "text/plain": [ - "{ people: [ { name: \u001b[32m\"Anna\"\u001b[39m, height_in_meters: \u001b[33m1.83\u001b[39m } ] }" + "cell_type": "code", + "execution_count": 3, + "id": "6700994a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n", + " punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const structuredLlm = model.withStructuredOutput(\n", + " {\n", + " \"name\": \"joke\",\n", + " \"description\": \"Joke to tell user.\",\n", + " \"parameters\": {\n", + " \"title\": \"Joke\",\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"setup\": {\"type\": \"string\", \"description\": \"The setup for the joke\"},\n", + " \"punchline\": {\"type\": \"string\", \"description\": \"The joke's punchline\"},\n", + " },\n", + " \"required\": [\"setup\", \"punchline\"],\n", + " },\n", + " }\n", + ")\n", + "\n", + "await structuredLlm.invoke(\"Tell me a joke about cats\", { name: \"joke\" })" ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const chain = prompt.pipe(model).pipe(parser);\n", - "\n", - "await chain.invoke({ query })" - ] - }, - { - "cell_type": "markdown", - "id": "6732dd87", - "metadata": {}, - "source": [ - "For a deeper dive into using output parsers with prompting techniques for structured output, see [this guide](/docs/how_to/output_parser_structured).\n", - "\n", - "### Custom Parsing\n", - "\n", - "You can also create a custom prompt and parser with [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language), using a plain function to parse the output from the model:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "525721b3", - "metadata": {}, - "outputs": [], - "source": [ - "import { AIMessage } from \"@langchain/core/messages\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "type Person = {\n", - " name: string;\n", - " height_in_meters: number;\n", - "};\n", - "\n", - "type People = {\n", - " people: Person[];\n", - "};\n", - "\n", - "const schema = `{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}`\n", - "\n", - "// Prompt\n", - "const prompt = await ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " `Answer the user query. Output your answer as JSON that\n", - "matches the given schema: \\`\\`\\`json\\n{schema}\\n\\`\\`\\`.\n", - "Make sure to wrap the answer in \\`\\`\\`json and \\`\\`\\` tags`\n", - " ],\n", - " [\n", - " \"human\",\n", - " \"{query}\",\n", - " ]\n", - " ]\n", - ").partial({\n", - " schema\n", - "});\n", - "\n", - "/**\n", - " * Custom extractor\n", - " * \n", - " * Extracts JSON content from a string where\n", - " * JSON is embedded between ```json and ``` tags.\n", - " */\n", - "const extractJson = (output: AIMessage): Array => {\n", - " const text = output.content as string;\n", - " // Define the regular expression pattern to match JSON blocks\n", - " const pattern = /```json(.*?)```/gs;\n", - "\n", - " // Find all non-overlapping matches of the pattern in the string\n", - " const matches = text.match(pattern);\n", - "\n", - " // Process each match, attempting to parse it as JSON\n", - " try {\n", - " return matches?.map(match => {\n", - " // Remove the markdown code block syntax to isolate the JSON string\n", - " const jsonStr = match.replace(/```json|```/g, '').trim();\n", - " return JSON.parse(jsonStr);\n", - " }) ?? [];\n", - " } catch (error) {\n", - " throw new Error(`Failed to parse: ${output}`);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "9f1bc8f7", - "metadata": {}, - "source": [ - "Here is the prompt sent to the model:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "c8a30d0e", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "System: Answer the user query. Output your answer as JSON that\n", - "matches the given schema: ```json\n", - "{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n", - "```.\n", - "Make sure to wrap the answer in ```json and ``` tags\n", - "Human: Anna is 23 years old and she is 6 feet tall\n" - ] - } - ], - "source": [ - "const query = \"Anna is 23 years old and she is 6 feet tall\"\n", - "\n", - "console.log((await prompt.format({ query })).toString())" - ] - }, - { - "cell_type": "markdown", - "id": "ec018893", - "metadata": {}, - "source": [ - "And here's what it looks like when we invoke it:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "e1e7baf6", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e28c14d3", + "metadata": {}, + "source": [ + "If you are using JSON Schema, you can take advantage of other more complex schema descriptions to create a similar effect.\n", + "\n", + "You can also use tool calling directly to allow the model to choose between options, if your chosen model supports it. This involves a bit more parsing and setup. See [this how-to guide](/docs/how_to/tool_calling/) for more details." + ] + }, { - "data": { - "text/plain": [ - "[\n", - " { people: [ { name: \u001b[32m\"Anna\"\u001b[39m, height_in_meters: \u001b[33m1.83\u001b[39m } ] }\n", - "]" + "cell_type": "markdown", + "id": "39d7a555", + "metadata": {}, + "source": [ + "### Specifying the output method (Advanced)\n", + "\n", + "For models that support more than one means of outputting data, you can specify the preferred one like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "df0370e3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " setup: \u001b[32m\"Why don't cats play poker in the jungle?\"\u001b[39m,\n", + " punchline: \u001b[32m\"Too many cheetahs!\"\u001b[39m\n", + "}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const structuredLlm = model.withStructuredOutput(joke, {\n", + " method: \"json_mode\",\n", + " name: \"joke\",\n", + "})\n", + "\n", + "await structuredLlm.invoke(\n", + " \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "56278a82", + "metadata": {}, + "source": [ + "In the above example, we use OpenAI's alternate JSON mode capability along with a more specific prompt.\n", + "\n", + "For specifics about the model you choose, peruse its entry in the [API reference pages](https://api.js.langchain.com/).\n", + "\n", + "### (Advanced) Raw outputs\n", + "\n", + "LLMs aren't perfect at generating structured output, especially as schemas become complex. You can avoid raising exceptions and handle the raw output yourself by passing `includeRaw: true`. This changes the output format to contain the raw message output and the `parsed` value (if successful):" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "46b616a4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " raw: AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"\"\u001b[39m,\n", + " tool_calls: [\n", + " {\n", + " name: \u001b[32m\"joke\"\u001b[39m,\n", + " args: \u001b[36m[Object]\u001b[39m,\n", + " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m\n", + " }\n", + " ],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: [ \u001b[36m[Object]\u001b[39m ] },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {\n", + " function_call: \u001b[90mundefined\u001b[39m,\n", + " tool_calls: [\n", + " {\n", + " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: \u001b[36m[Object]\u001b[39m\n", + " }\n", + " ]\n", + " },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: \u001b[33m33\u001b[39m, promptTokens: \u001b[33m88\u001b[39m, totalTokens: \u001b[33m121\u001b[39m },\n", + " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", + " },\n", + " tool_calls: [\n", + " {\n", + " name: \u001b[32m\"joke\"\u001b[39m,\n", + " args: {\n", + " setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n", + " punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m,\n", + " rating: \u001b[33m7\u001b[39m\n", + " },\n", + " id: \u001b[32m\"call_0pEdltlfSXjq20RaBFKSQOeF\"\u001b[39m\n", + " }\n", + " ],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: { input_tokens: \u001b[33m88\u001b[39m, output_tokens: \u001b[33m33\u001b[39m, total_tokens: \u001b[33m121\u001b[39m }\n", + " },\n", + " parsed: {\n", + " setup: \u001b[32m\"Why was the cat sitting on the computer?\"\u001b[39m,\n", + " punchline: \u001b[32m\"Because it wanted to keep an eye on the mouse!\"\u001b[39m,\n", + " rating: \u001b[33m7\u001b[39m\n", + " }\n", + "}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const joke = z.object({\n", + " setup: z.string().describe(\"The setup of the joke\"),\n", + " punchline: z.string().describe(\"The punchline to the joke\"),\n", + " rating: z.number().optional().describe(\"How funny the joke is, from 1 to 10\"),\n", + "});\n", + "\n", + "const structuredLlm = model.withStructuredOutput(joke, { includeRaw: true, name: \"joke\" });\n", + "\n", + "await structuredLlm.invoke(\"Tell me a joke about cats\");" + ] + }, + { + "cell_type": "markdown", + "id": "5e92a98a", + "metadata": {}, + "source": [ + "## Prompting techniques\n", + "\n", + "You can also prompt models to outputting information in a given format. This approach relies on designing good prompts and then parsing the output of the models. This is the only option for models that don't support `.with_structured_output()` or other built-in approaches.\n", + "\n", + "### Using `JsonOutputParser`\n", + "\n", + "The following example uses the built-in [`JsonOutputParser`](https://api.js.langchain.com/classes/langchain_core.output_parsers.JsonOutputParser.html) to parse the output of a chat model prompted to match a the given JSON schema. Note that we are adding `format_instructions` directly to the prompt from a method on the parser:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "6e514455", + "metadata": {}, + "outputs": [], + "source": [ + "import { JsonOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "type Person = {\n", + " name: string;\n", + " height_in_meters: number;\n", + "};\n", + "\n", + "type People = {\n", + " people: Person[];\n", + "};\n", + "\n", + "const formatInstructions = `Respond only in valid JSON. The JSON object you return should match the following schema:\n", + "{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n", + "\n", + "Where people is an array of objects, each with a name and height_in_meters field.\n", + "`\n", + "\n", + "// Set up a parser\n", + "const parser = new JsonOutputParser();\n", + "\n", + "// Prompt\n", + "const prompt = await ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"Answer the user query. Wrap the output in `json` tags\\n{format_instructions}\",\n", + " ],\n", + " [\n", + " \"human\",\n", + " \"{query}\",\n", + " ]\n", + " ]\n", + ").partial({\n", + " format_instructions: formatInstructions,\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "082fa166", + "metadata": {}, + "source": [ + "Let’s take a look at what information is sent to the model:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "3d73d33d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "System: Answer the user query. Wrap the output in `json` tags\n", + "Respond only in valid JSON. The JSON object you return should match the following schema:\n", + "{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n", + "\n", + "Where people is an array of objects, each with a name and height_in_meters field.\n", + "\n", + "Human: Anna is 23 years old and she is 6 feet tall\n" + ] + } + ], + "source": [ + "const query = \"Anna is 23 years old and she is 6 feet tall\"\n", + "\n", + "console.log((await prompt.format({ query })).toString())" + ] + }, + { + "cell_type": "markdown", + "id": "081956b9", + "metadata": {}, + "source": [ + "And now let's invoke it:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "8d6b3d17", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{ people: [ { name: \u001b[32m\"Anna\"\u001b[39m, height_in_meters: \u001b[33m1.83\u001b[39m } ] }" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const chain = prompt.pipe(model).pipe(parser);\n", + "\n", + "await chain.invoke({ query })" + ] + }, + { + "cell_type": "markdown", + "id": "6732dd87", + "metadata": {}, + "source": [ + "For a deeper dive into using output parsers with prompting techniques for structured output, see [this guide](/docs/how_to/output_parser_structured).\n", + "\n", + "### Custom Parsing\n", + "\n", + "You can also create a custom prompt and parser with [LangChain Expression Language (LCEL)](/docs/concepts/lcel), using a plain function to parse the output from the model:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "525721b3", + "metadata": {}, + "outputs": [], + "source": [ + "import { AIMessage } from \"@langchain/core/messages\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "type Person = {\n", + " name: string;\n", + " height_in_meters: number;\n", + "};\n", + "\n", + "type People = {\n", + " people: Person[];\n", + "};\n", + "\n", + "const schema = `{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}`\n", + "\n", + "// Prompt\n", + "const prompt = await ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " `Answer the user query. Output your answer as JSON that\n", + "matches the given schema: \\`\\`\\`json\\n{schema}\\n\\`\\`\\`.\n", + "Make sure to wrap the answer in \\`\\`\\`json and \\`\\`\\` tags`\n", + " ],\n", + " [\n", + " \"human\",\n", + " \"{query}\",\n", + " ]\n", + " ]\n", + ").partial({\n", + " schema\n", + "});\n", + "\n", + "/**\n", + " * Custom extractor\n", + " * \n", + " * Extracts JSON content from a string where\n", + " * JSON is embedded between ```json and ``` tags.\n", + " */\n", + "const extractJson = (output: AIMessage): Array => {\n", + " const text = output.content as string;\n", + " // Define the regular expression pattern to match JSON blocks\n", + " const pattern = /```json(.*?)```/gs;\n", + "\n", + " // Find all non-overlapping matches of the pattern in the string\n", + " const matches = text.match(pattern);\n", + "\n", + " // Process each match, attempting to parse it as JSON\n", + " try {\n", + " return matches?.map(match => {\n", + " // Remove the markdown code block syntax to isolate the JSON string\n", + " const jsonStr = match.replace(/```json|```/g, '').trim();\n", + " return JSON.parse(jsonStr);\n", + " }) ?? [];\n", + " } catch (error) {\n", + " throw new Error(`Failed to parse: ${output}`);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "9f1bc8f7", + "metadata": {}, + "source": [ + "Here is the prompt sent to the model:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "c8a30d0e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "System: Answer the user query. Output your answer as JSON that\n", + "matches the given schema: ```json\n", + "{{ people: [{{ name: \"string\", height_in_meters: \"number\" }}] }}\n", + "```.\n", + "Make sure to wrap the answer in ```json and ``` tags\n", + "Human: Anna is 23 years old and she is 6 feet tall\n" + ] + } + ], + "source": [ + "const query = \"Anna is 23 years old and she is 6 feet tall\"\n", + "\n", + "console.log((await prompt.format({ query })).toString())" + ] + }, + { + "cell_type": "markdown", + "id": "ec018893", + "metadata": {}, + "source": [ + "And here's what it looks like when we invoke it:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "e1e7baf6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " { people: [ { name: \u001b[32m\"Anna\"\u001b[39m, height_in_meters: \u001b[33m1.83\u001b[39m } ] }\n", + "]" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "\n", + "const chain = prompt.pipe(model).pipe(new RunnableLambda({ func: extractJson }));\n", + "\n", + "await chain.invoke({ query })" + ] + }, + { + "cell_type": "markdown", + "id": "7a39221a", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "Now you've learned a few methods to make a model output structured data.\n", + "\n", + "To learn more, check out the other how-to guides in this section, or the conceptual guide on tool calling." ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "\n", - "const chain = prompt.pipe(model).pipe(new RunnableLambda({ func: extractJson }));\n", - "\n", - "await chain.invoke({ query })" - ] - }, - { - "cell_type": "markdown", - "id": "7a39221a", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "Now you've learned a few methods to make a model output structured data.\n", - "\n", - "To learn more, check out the other how-to guides in this section, or the conceptual guide on tool calling." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx b/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx index e40038a52e96..659501280dd1 100644 --- a/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx +++ b/docs/core_docs/docs/how_to/time_weighted_vectorstore.mdx @@ -4,7 +4,7 @@ This guide assumes familiarity with the following concepts: -- [Retrievers](/docs/concepts/#retrievers) +- [Retrievers](/docs/concepts/retrievers) - [Vector stores](/docs/concepts/#vectorstores) - [Retrieval-augmented generation (RAG)](/docs/tutorials/rag) diff --git a/docs/core_docs/docs/how_to/tool_artifacts.ipynb b/docs/core_docs/docs/how_to/tool_artifacts.ipynb index 44cf610980de..14c9b1cd6ac1 100644 --- a/docs/core_docs/docs/how_to/tool_artifacts.ipynb +++ b/docs/core_docs/docs/how_to/tool_artifacts.ipynb @@ -1,352 +1,352 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "503e36ae-ca62-4f8a-880c-4fe78ff5df93", - "metadata": {}, - "source": [ - "# How to return artifacts from a tool\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [ToolMessage](/docs/concepts/#toolmessage)\n", - "- [Tools](/docs/concepts/#tools)\n", - "- [Tool calling](/docs/concepts/#functiontool-calling)\n", - "\n", - ":::\n", - "```\n", - "\n", - "Tools are utilities that can be called by a model, and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself.\n", - "\n", - "For example if a tool returns something like a custom object or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n", - "\n", - "The Tool and [ToolMessage](https://api.js.langchain.com/classes/langchain_core.messages_tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the `ToolMessage.content`) and those parts which are meant for use outside the model (`ToolMessage.artifact`).\n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "\n", - "This functionality requires `@langchain/core>=0.2.16`. Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", - "\n", - ":::\n", - "```\n", - "\n", - "## Defining the tool\n", - "\n", - "If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format: \"content_and_artifact\"` when defining our tool and make sure that we return a tuple of [`content`, `artifact`]:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "b9eb179d-1f41-4748-9866-b3d3e8c73cd0", - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const randomIntToolSchema = z.object({\n", - " min: z.number(),\n", - " max: z.number(),\n", - " size: z.number(),\n", - "});\n", - "\n", - "const generateRandomInts = tool(async ({ min, max, size }) => {\n", - " const array: number[] = [];\n", - " for (let i = 0; i < size; i++) {\n", - " array.push(Math.floor(Math.random() * (max - min + 1)) + min);\n", - " }\n", - " return [\n", - " `Successfully generated array of ${size} random ints in [${min}, ${max}].`,\n", - " array,\n", - " ];\n", - "}, {\n", - " name: \"generateRandomInts\",\n", - " description: \"Generate size random ints in the range [min, max].\",\n", - " schema: randomIntToolSchema,\n", - " responseFormat: \"content_and_artifact\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "0ab05d25-af4a-4e5a-afe2-f090416d7ee7", - "metadata": {}, - "source": [ - "## Invoking the tool with ToolCall\n", - "\n", - "If we directly invoke our tool with just the tool arguments, you'll notice that we only get back the content part of the `Tool` output:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "5e7d5e77-3102-4a59-8ade-e4e699dd1817", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Successfully generated array of 10 random ints in [0, 9].\n" - ] - } - ], - "source": [ - "await generateRandomInts.invoke({min: 0, max: 9, size: 10});" - ] - }, - { - "cell_type": "markdown", - "id": "30db7228-f04c-489e-afda-9a572eaa90a1", - "metadata": {}, - "source": [ - "In order to get back both the content and the artifact, we need to invoke our model with a `ToolCall` (which is just a dictionary with `\"name\"`, `\"args\"`, `\"id\"` and `\"type\"` keys), which has additional info needed to generate a ToolMessage like the tool call ID:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "da1d939d-a900-4b01-92aa-d19011a6b034", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "503e36ae-ca62-4f8a-880c-4fe78ff5df93", + "metadata": {}, + "source": [ + "# How to return artifacts from a tool\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [ToolMessage](/docs/concepts/messages/#toolmessage)\n", + "- [Tools](/docs/concepts/tools)\n", + "- [Tool calling](/docs/concepts/tool_calling)\n", + "\n", + ":::\n", + "```\n", + "\n", + "Tools are utilities that can be called by a model, and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself.\n", + "\n", + "For example if a tool returns something like a custom object or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n", + "\n", + "The Tool and [ToolMessage](https://api.js.langchain.com/classes/langchain_core.messages_tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the `ToolMessage.content`) and those parts which are meant for use outside the model (`ToolMessage.artifact`).\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "\n", + "This functionality requires `@langchain/core>=0.2.16`. Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", + "\n", + ":::\n", + "```\n", + "\n", + "## Defining the tool\n", + "\n", + "If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format: \"content_and_artifact\"` when defining our tool and make sure that we return a tuple of [`content`, `artifact`]:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Successfully generated array of 10 random ints in [0, 9].',\n", - " artifact: [\n", - " 0, 6, 5, 5, 7,\n", - " 0, 6, 3, 7, 5\n", - " ],\n", - " tool_call_id: '123',\n", - " name: 'generateRandomInts',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Successfully generated array of 10 random ints in [0, 9].',\n", - " name: 'generateRandomInts',\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_call_id: '123',\n", - " artifact: [\n", - " 0, 6, 5, 5, 7,\n", - " 0, 6, 3, 7, 5\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "await generateRandomInts.invoke(\n", - " {\n", - " name: \"generate_random_ints\",\n", - " args: {min: 0, max: 9, size: 10},\n", - " id: \"123\", // Required\n", - " type: \"tool_call\", // Required\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "a3cfc03d-020b-42c7-b0f8-c824af19e45e", - "metadata": {}, - "source": [ - "## Using with a model\n", - "\n", - "With a [tool-calling model](/docs/how_to/tool_calling/), we can easily use a model to call our Tool and generate ToolMessages:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "8a67424b-d19c-43df-ac7b-690bca42146c", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "b9eb179d-1f41-4748-9866-b3d3e8c73cd0", + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const randomIntToolSchema = z.object({\n", + " min: z.number(),\n", + " max: z.number(),\n", + " size: z.number(),\n", + "});\n", + "\n", + "const generateRandomInts = tool(async ({ min, max, size }) => {\n", + " const array: number[] = [];\n", + " for (let i = 0; i < size; i++) {\n", + " array.push(Math.floor(Math.random() * (max - min + 1)) + min);\n", + " }\n", + " return [\n", + " `Successfully generated array of ${size} random ints in [${min}, ${max}].`,\n", + " array,\n", + " ];\n", + "}, {\n", + " name: \"generateRandomInts\",\n", + " description: \"Generate size random ints in the range [min, max].\",\n", + " schema: randomIntToolSchema,\n", + " responseFormat: \"content_and_artifact\",\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'generateRandomInts',\n", - " args: { min: 1, max: 24, size: 6 },\n", - " id: 'toolu_019ygj3YuoU6qFzR66juXALp',\n", - " type: 'tool_call'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const llmWithTools = llm.bindTools([generateRandomInts])\n", - "\n", - "const aiMessage = await llmWithTools.invoke(\"generate 6 positive ints less than 25\")\n", - "aiMessage.tool_calls" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "00c4e906-3ca8-41e8-a0be-65cb0db7d574", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "0ab05d25-af4a-4e5a-afe2-f090416d7ee7", + "metadata": {}, + "source": [ + "## Invoking the tool with ToolCall\n", + "\n", + "If we directly invoke our tool with just the tool arguments, you'll notice that we only get back the content part of the `Tool` output:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Successfully generated array of 6 random ints in [1, 24].',\n", - " artifact: [ 18, 20, 16, 15, 17, 19 ],\n", - " tool_call_id: 'toolu_019ygj3YuoU6qFzR66juXALp',\n", - " name: 'generateRandomInts',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Successfully generated array of 6 random ints in [1, 24].',\n", - " name: 'generateRandomInts',\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_call_id: 'toolu_019ygj3YuoU6qFzR66juXALp',\n", - " artifact: [ 18, 20, 16, 15, 17, 19 ]\n", - "}\n" - ] - } - ], - "source": [ - "await generateRandomInts.invoke(aiMessage.tool_calls[0])" - ] - }, - { - "cell_type": "markdown", - "id": "ddef2690-70de-4542-ab20-2337f77f3e46", - "metadata": {}, - "source": [ - "If we just pass in the tool call args, we'll only get back the content:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "f4a6c9a6-0ffc-4b0e-a59f-f3c3d69d824d", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "id": "5e7d5e77-3102-4a59-8ade-e4e699dd1817", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Successfully generated array of 10 random ints in [0, 9].\n" + ] + } + ], + "source": [ + "await generateRandomInts.invoke({min: 0, max: 9, size: 10});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Successfully generated array of 6 random ints in [1, 24].\n" - ] - } - ], - "source": [ - "await generateRandomInts.invoke(aiMessage.tool_calls[0][\"args\"])" - ] - }, - { - "cell_type": "markdown", - "id": "98d6443b-ff41-4d91-8523-b6274fc74ee5", - "metadata": {}, - "source": [ - "If we wanted to declaratively create a chain, we could do this:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "eb55ec23-95a4-464e-b886-d9679bf3aaa2", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "30db7228-f04c-489e-afda-9a572eaa90a1", + "metadata": {}, + "source": [ + "In order to get back both the content and the artifact, we need to invoke our model with a `ToolCall` (which is just a dictionary with `\"name\"`, `\"args\"`, `\"id\"` and `\"type\"` keys), which has additional info needed to generate a ToolMessage like the tool call ID:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "da1d939d-a900-4b01-92aa-d19011a6b034", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ToolMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Successfully generated array of 10 random ints in [0, 9].',\n", + " artifact: [\n", + " 0, 6, 5, 5, 7,\n", + " 0, 6, 3, 7, 5\n", + " ],\n", + " tool_call_id: '123',\n", + " name: 'generateRandomInts',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Successfully generated array of 10 random ints in [0, 9].',\n", + " name: 'generateRandomInts',\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_call_id: '123',\n", + " artifact: [\n", + " 0, 6, 5, 5, 7,\n", + " 0, 6, 3, 7, 5\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "await generateRandomInts.invoke(\n", + " {\n", + " name: \"generate_random_ints\",\n", + " args: {min: 0, max: 9, size: 10},\n", + " id: \"123\", // Required\n", + " type: \"tool_call\", // Required\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "a3cfc03d-020b-42c7-b0f8-c824af19e45e", + "metadata": {}, + "source": [ + "## Using with a model\n", + "\n", + "With a [tool-calling model](/docs/how_to/tool_calling/), we can easily use a model to call our Tool and generate ToolMessages:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "8a67424b-d19c-43df-ac7b-690bca42146c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'generateRandomInts',\n", + " args: { min: 1, max: 24, size: 6 },\n", + " id: 'toolu_019ygj3YuoU6qFzR66juXALp',\n", + " type: 'tool_call'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const llmWithTools = llm.bindTools([generateRandomInts])\n", + "\n", + "const aiMessage = await llmWithTools.invoke(\"generate 6 positive ints less than 25\")\n", + "aiMessage.tool_calls" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "00c4e906-3ca8-41e8-a0be-65cb0db7d574", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ToolMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Successfully generated array of 6 random ints in [1, 24].',\n", + " artifact: [ 18, 20, 16, 15, 17, 19 ],\n", + " tool_call_id: 'toolu_019ygj3YuoU6qFzR66juXALp',\n", + " name: 'generateRandomInts',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Successfully generated array of 6 random ints in [1, 24].',\n", + " name: 'generateRandomInts',\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_call_id: 'toolu_019ygj3YuoU6qFzR66juXALp',\n", + " artifact: [ 18, 20, 16, 15, 17, 19 ]\n", + "}\n" + ] + } + ], + "source": [ + "await generateRandomInts.invoke(aiMessage.tool_calls[0])" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Successfully generated array of 1 random ints in [1, 5].',\n", - " artifact: [Array],\n", - " tool_call_id: 'toolu_01CskofJCQW8chkUzmVR1APU',\n", - " name: 'generateRandomInts',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Successfully generated array of 1 random ints in [1, 5].',\n", - " name: 'generateRandomInts',\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_call_id: 'toolu_01CskofJCQW8chkUzmVR1APU',\n", - " artifact: [ 1 ]\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "ddef2690-70de-4542-ab20-2337f77f3e46", + "metadata": {}, + "source": [ + "If we just pass in the tool call args, we'll only get back the content:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f4a6c9a6-0ffc-4b0e-a59f-f3c3d69d824d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Successfully generated array of 6 random ints in [1, 24].\n" + ] + } + ], + "source": [ + "await generateRandomInts.invoke(aiMessage.tool_calls[0][\"args\"])" + ] + }, + { + "cell_type": "markdown", + "id": "98d6443b-ff41-4d91-8523-b6274fc74ee5", + "metadata": {}, + "source": [ + "If we wanted to declaratively create a chain, we could do this:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "eb55ec23-95a4-464e-b886-d9679bf3aaa2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " ToolMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Successfully generated array of 1 random ints in [1, 5].',\n", + " artifact: [Array],\n", + " tool_call_id: 'toolu_01CskofJCQW8chkUzmVR1APU',\n", + " name: 'generateRandomInts',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Successfully generated array of 1 random ints in [1, 5].',\n", + " name: 'generateRandomInts',\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_call_id: 'toolu_01CskofJCQW8chkUzmVR1APU',\n", + " artifact: [ 1 ]\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const extractToolCalls = (aiMessage) => aiMessage.tool_calls;\n", + "\n", + "const chain = llmWithTools.pipe(extractToolCalls).pipe(generateRandomInts.map());\n", + "\n", + "await chain.invoke(\"give me a random number between 1 and 5\");" + ] + }, + { + "cell_type": "markdown", + "id": "54f74020", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "You've now seen how to return additional artifacts from a tool call.\n", + "\n", + "These guides may interest you next:\n", + "\n", + "- [Creating custom tools](/docs/how_to/custom_tools)\n", + "- [Building agents with LangGraph](https://langchain-ai.github.io/langgraphjs/)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const extractToolCalls = (aiMessage) => aiMessage.tool_calls;\n", - "\n", - "const chain = llmWithTools.pipe(extractToolCalls).pipe(generateRandomInts.map());\n", - "\n", - "await chain.invoke(\"give me a random number between 1 and 5\");" - ] - }, - { - "cell_type": "markdown", - "id": "54f74020", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "You've now seen how to return additional artifacts from a tool call.\n", - "\n", - "These guides may interest you next:\n", - "\n", - "- [Creating custom tools](/docs/how_to/custom_tools)\n", - "- [Building agents with LangGraph](https://langchain-ai.github.io/langgraphjs/)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tool_calling.ipynb b/docs/core_docs/docs/how_to/tool_calling.ipynb index 4560e2d7ddbb..219deedad25c 100644 --- a/docs/core_docs/docs/how_to/tool_calling.ipynb +++ b/docs/core_docs/docs/how_to/tool_calling.ipynb @@ -24,14 +24,14 @@ "\n", "This guide assumes familiarity with the following concepts:\n", "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [Tool calling](/docs/concepts/#functiontool-calling)\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [Tool calling](/docs/concepts/tool_calling)\n", "\n", ":::\n", "```\n", "\n", - "[Tool calling](/docs/concepts/#functiontool-calling) allows a chat model to respond to a given prompt by \"calling a tool\".\n", + "[Tool calling](/docs/concepts/tool_calling) allows a chat model to respond to a given prompt by \"calling a tool\".\n", "\n", "Remember, while the name \"tool calling\" implies that the model is directly performing some action, this is actually not the case! The model only generates the arguments to a tool, and actually running the tool (or not) is up to the user.\n", "\n", @@ -441,4 +441,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tool_calling_parallel.ipynb b/docs/core_docs/docs/how_to/tool_calling_parallel.ipynb index bbe895997c7d..97f188d00549 100644 --- a/docs/core_docs/docs/how_to/tool_calling_parallel.ipynb +++ b/docs/core_docs/docs/how_to/tool_calling_parallel.ipynb @@ -1,223 +1,223 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to disable parallel tool calling\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [Tool calling](/docs/concepts/#functiontool-calling)\n", - "- [Custom tools](/docs/how_to/custom_tools)\n", - "\n", - ":::\n", - "```\n", - "\n", - ":::info OpenAI-specific\n", - "\n", - "This API is currently only supported by OpenAI.\n", - "\n", - ":::\n", - "\n", - "OpenAI models perform tool calling in parallel by default. That means that if we ask a question like `\"What is the weather in Tokyo, New York, and Chicago?\"` and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the `parallel_tool_call` call option." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First let's set up our tools and model:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const adderTool = tool(async ({ a, b }) => {\n", - " return a + b;\n", - "}, {\n", - " name: \"add\",\n", - " description: \"Adds a and b\",\n", - " schema: z.object({\n", - " a: z.number(),\n", - " b: z.number(),\n", - " })\n", - "});\n", - "\n", - "const multiplyTool = tool(async ({ a, b }) => {\n", - " return a + b;\n", - "}, {\n", - " name: \"multiply\",\n", - " description: \"Multiplies a and b\",\n", - " schema: z.object({\n", - " a: z.number(),\n", - " b: z.number(),\n", - " })\n", - "});\n", - "\n", - "const tools = [adderTool, multiplyTool];\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's show a quick example of how disabling parallel tool calls work:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'add',\n", - " args: { a: 5, b: 3 },\n", - " type: 'tool_call',\n", - " id: 'call_5bKOYerdQU6J5ERJJYnzYsGn'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const llmWithTools = llm.bindTools(tools, { parallel_tool_calls: false });\n", - "\n", - "const result = await llmWithTools.invoke(\"Please call the first tool two times\");\n", - "\n", - "result.tool_calls;" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, even though we explicitly told the model to call a tool twice, by disabling parallel tool calls the model was constrained to only calling one.\n", - "\n", - "Compare this to calling the model without passing `parallel_tool_calls` as false:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to disable parallel tool calling\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [Tool calling](/docs/concepts/tool_calling)\n", + "- [Custom tools](/docs/how_to/custom_tools)\n", + "\n", + ":::\n", + "```\n", + "\n", + ":::info OpenAI-specific\n", + "\n", + "This API is currently only supported by OpenAI.\n", + "\n", + ":::\n", + "\n", + "OpenAI models perform tool calling in parallel by default. That means that if we ask a question like `\"What is the weather in Tokyo, New York, and Chicago?\"` and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the `parallel_tool_call` call option." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'add',\n", - " args: { a: 1, b: 2 },\n", - " type: 'tool_call',\n", - " id: 'call_Ni0tF0nNtY66BBwB5vEP6oI4'\n", - " },\n", - " {\n", - " name: 'add',\n", - " args: { a: 3, b: 4 },\n", - " type: 'tool_call',\n", - " id: 'call_XucnTCfFqP1JBs3LtbOq5w3d'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const llmWithNoBoundParam = llm.bindTools(tools);\n", - "\n", - "const result2 = await llmWithNoBoundParam.invoke(\"Please call the first tool two times\");\n", - "\n", - "result2.tool_calls;" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see that you get two tool calls.\n", - "\n", - "You can also pass the parameter in at runtime like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First let's set up our tools and model:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'add',\n", - " args: { a: 1, b: 2 },\n", - " type: 'tool_call',\n", - " id: 'call_TWo6auul71NUg1p0suzBKARt'\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const adderTool = tool(async ({ a, b }) => {\n", + " return a + b;\n", + "}, {\n", + " name: \"add\",\n", + " description: \"Adds a and b\",\n", + " schema: z.object({\n", + " a: z.number(),\n", + " b: z.number(),\n", + " })\n", + "});\n", + "\n", + "const multiplyTool = tool(async ({ a, b }) => {\n", + " return a + b;\n", + "}, {\n", + " name: \"multiply\",\n", + " description: \"Multiplies a and b\",\n", + " schema: z.object({\n", + " a: z.number(),\n", + " b: z.number(),\n", + " })\n", + "});\n", + "\n", + "const tools = [adderTool, multiplyTool];\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's show a quick example of how disabling parallel tool calls work:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'add',\n", + " args: { a: 5, b: 3 },\n", + " type: 'tool_call',\n", + " id: 'call_5bKOYerdQU6J5ERJJYnzYsGn'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const llmWithTools = llm.bindTools(tools, { parallel_tool_calls: false });\n", + "\n", + "const result = await llmWithTools.invoke(\"Please call the first tool two times\");\n", + "\n", + "result.tool_calls;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, even though we explicitly told the model to call a tool twice, by disabling parallel tool calls the model was constrained to only calling one.\n", + "\n", + "Compare this to calling the model without passing `parallel_tool_calls` as false:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'add',\n", + " args: { a: 1, b: 2 },\n", + " type: 'tool_call',\n", + " id: 'call_Ni0tF0nNtY66BBwB5vEP6oI4'\n", + " },\n", + " {\n", + " name: 'add',\n", + " args: { a: 3, b: 4 },\n", + " type: 'tool_call',\n", + " id: 'call_XucnTCfFqP1JBs3LtbOq5w3d'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const llmWithNoBoundParam = llm.bindTools(tools);\n", + "\n", + "const result2 = await llmWithNoBoundParam.invoke(\"Please call the first tool two times\");\n", + "\n", + "result2.tool_calls;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see that you get two tool calls.\n", + "\n", + "You can also pass the parameter in at runtime like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'add',\n", + " args: { a: 1, b: 2 },\n", + " type: 'tool_call',\n", + " id: 'call_TWo6auul71NUg1p0suzBKARt'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const result3 = await llmWithNoBoundParam.invoke(\"Please call the first tool two times\", {\n", + " parallel_tool_calls: false,\n", + "});\n", + "\n", + "result3.tool_calls;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "- [How to: create custom tools](/docs/how_to/custom_tools)\n", + "- [How to: pass run time values to tools](/docs/how_to/tool_runtime)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const result3 = await llmWithNoBoundParam.invoke(\"Please call the first tool two times\", {\n", - " parallel_tool_calls: false,\n", - "});\n", - "\n", - "result3.tool_calls;" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "- [How to: create custom tools](/docs/how_to/custom_tools)\n", - "- [How to: pass run time values to tools](/docs/how_to/tool_runtime)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tool_calls_multimodal.ipynb b/docs/core_docs/docs/how_to/tool_calls_multimodal.ipynb index 6569208d97ec..f762ce774a2a 100644 --- a/docs/core_docs/docs/how_to/tool_calls_multimodal.ipynb +++ b/docs/core_docs/docs/how_to/tool_calls_multimodal.ipynb @@ -1,332 +1,332 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", - "metadata": {}, - "source": [ - "# How to call tools with multimodal data\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "\n", - ":::\n", - "\n", - "Here we demonstrate how to call tools with multimodal data, such as images.\n", - "\n", - "Some multimodal models, such as those that can reason over images or audio, support [tool calling](/docs/concepts/#tool-calling) features as well.\n", - "\n", - "To call tools using such models, simply bind tools to them in the [usual way](/docs/how_to/tool_calling), and invoke the model using content blocks of the desired type (e.g., containing image data).\n", - "\n", - "Below, we demonstrate examples using [OpenAI](/docs/integrations/platforms/openai) and [Anthropic](/docs/integrations/platforms/anthropic). We will use the same image and tool in all cases. Let's first select an image, and build a placeholder tool that expects as input the string \"sunny\", \"cloudy\", or \"rainy\". We will ask the models to describe the weather in the image.\n", - "\n", - ":::note\n", - "The `tool` function is available in `@langchain/core` version 0.2.7 and above.\n", - "\n", - "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) instead.\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", - "metadata": {}, - "outputs": [], - "source": [ - "import { tool } from \"@langchain/core/tools\";\n", - "import { z } from \"zod\";\n", - "\n", - "const imageUrl = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\";\n", - "\n", - "const weatherTool = tool(async ({ weather }) => {\n", - " console.log(weather);\n", - " return weather;\n", - "}, {\n", - " name: \"multiply\",\n", - " description: \"Describe the weather\",\n", - " schema: z.object({\n", - " weather: z.enum([\"sunny\", \"cloudy\", \"rainy\"])\n", - " }),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "8656018e-c56d-47d2-b2be-71e87827f90a", - "metadata": {}, - "source": [ - "## OpenAI\n", - "\n", - "For OpenAI, we can feed the image URL directly in a content block of type \"image_url\":" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: \"multiply\",\n", - " args: { weather: \"sunny\" },\n", - " id: \"call_ZaBYUggmrTSuDjcuZpMVKpMR\"\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const model = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - "}).bindTools([weatherTool]);\n", - "\n", - "const message = new HumanMessage({\n", - " content: [\n", - " {\n", - " type: \"text\",\n", - " text: \"describe the weather in this image\"\n", - " },\n", - " {\n", - " type: \"image_url\",\n", - " image_url: {\n", - " url: imageUrl\n", - " }\n", - " }\n", - " ],\n", - "});\n", - "\n", - "const response = await model.invoke([message]);\n", - "\n", - "console.log(response.tool_calls);" - ] - }, - { - "cell_type": "markdown", - "id": "e5738224-1109-4bf8-8976-ff1570dd1d46", - "metadata": {}, - "source": [ - "Note that we recover tool calls with parsed arguments in LangChain's [standard format](/docs/how_to/tool_calling) in the model response." - ] - }, - { - "cell_type": "markdown", - "id": "0cee63ff-e09f-4dd8-8323-912edbde94f6", - "metadata": {}, - "source": [ - "## Anthropic\n", - "\n", - "For Anthropic, we can format a base64-encoded image into a content block of type \"image\", as below:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d90c4590-71c8-42b1-99ff-03a9eca8082e", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", + "metadata": {}, + "source": [ + "# How to call tools with multimodal data\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "\n", + ":::\n", + "\n", + "Here we demonstrate how to call tools with multimodal data, such as images.\n", + "\n", + "Some multimodal models, such as those that can reason over images or audio, support [tool calling](/docs/concepts/#tool-calling) features as well.\n", + "\n", + "To call tools using such models, simply bind tools to them in the [usual way](/docs/how_to/tool_calling), and invoke the model using content blocks of the desired type (e.g., containing image data).\n", + "\n", + "Below, we demonstrate examples using [OpenAI](/docs/integrations/platforms/openai) and [Anthropic](/docs/integrations/platforms/anthropic). We will use the same image and tool in all cases. Let's first select an image, and build a placeholder tool that expects as input the string \"sunny\", \"cloudy\", or \"rainy\". We will ask the models to describe the weather in the image.\n", + "\n", + ":::note\n", + "The `tool` function is available in `@langchain/core` version 0.2.7 and above.\n", + "\n", + "If you are on an older version of core, you should use instantiate and use [`DynamicStructuredTool`](https://api.js.langchain.com/classes/langchain_core.tools.DynamicStructuredTool.html) instead.\n", + ":::" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: \"multiply\",\n", - " args: { weather: \"sunny\" },\n", - " id: \"toolu_01HLY1KmXZkKMn7Ar4ZtFuAM\"\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import * as fs from \"node:fs/promises\";\n", - "\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const imageData = await fs.readFile(\"../../data/sunny_day.jpeg\");\n", - "\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - "}).bindTools([weatherTool]);\n", - "\n", - "const message = new HumanMessage({\n", - " content: [\n", - " {\n", - " type: \"text\",\n", - " text: \"describe the weather in this image\",\n", - " },\n", - " {\n", - " type: \"image_url\",\n", - " image_url: {\n", - " url: `data:image/jpeg;base64,${imageData.toString(\"base64\")}`,\n", - " },\n", - " },\n", - " ],\n", - "});\n", - "\n", - "const response = await model.invoke([message]);\n", - "\n", - "console.log(response.tool_calls);" - ] - }, - { - "cell_type": "markdown", - "id": "a66b7d2f", - "metadata": {}, - "source": [ - "## Google Generative AI\n", - "\n", - "For Google GenAI, we can format a base64-encoded image into a content block of type \"image\", as below:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "f8184909", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", + "metadata": {}, + "outputs": [], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { z } from \"zod\";\n", + "\n", + "const imageUrl = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\";\n", + "\n", + "const weatherTool = tool(async ({ weather }) => {\n", + " console.log(weather);\n", + " return weather;\n", + "}, {\n", + " name: \"multiply\",\n", + " description: \"Describe the weather\",\n", + " schema: z.object({\n", + " weather: z.enum([\"sunny\", \"cloudy\", \"rainy\"])\n", + " }),\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ { name: 'multiply', args: { weather: 'sunny' } } ]\n" - ] - } - ], - "source": [ - "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", - "import axios from \"axios\";\n", - "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const axiosRes = await axios.get(imageUrl, { responseType: \"arraybuffer\" });\n", - "const base64 = btoa(\n", - " new Uint8Array(axiosRes.data).reduce(\n", - " (data, byte) => data + String.fromCharCode(byte),\n", - " ''\n", - " )\n", - ");\n", - "\n", - "const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-pro-latest\" }).bindTools([weatherTool]);\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"describe the weather in this image\"],\n", - " new MessagesPlaceholder(\"message\")\n", - "]);\n", - "\n", - "const response = await prompt.pipe(model).invoke({\n", - " message: new HumanMessage({\n", - " content: [{\n", - " type: \"media\",\n", - " mimeType: \"image/jpeg\",\n", - " data: base64,\n", - " }]\n", - " })\n", - "});\n", - "console.log(response.tool_calls);" - ] - }, - { - "cell_type": "markdown", - "id": "c5dd4ef4", - "metadata": {}, - "source": [ - "### Audio input\n", - "\n", - "Google's Gemini also supports audio inputs. In this next example we'll see how we can pass an audio file to the model, and get back a summary in structured format." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "c04c883e", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "8656018e-c56d-47d2-b2be-71e87827f90a", + "metadata": {}, + "source": [ + "## OpenAI\n", + "\n", + "For OpenAI, we can feed the image URL directly in a content block of type \"image_url\":" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: \"multiply\",\n", + " args: { weather: \"sunny\" },\n", + " id: \"call_ZaBYUggmrTSuDjcuZpMVKpMR\"\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const model = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + "}).bindTools([weatherTool]);\n", + "\n", + "const message = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"describe the weather in this image\"\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: {\n", + " url: imageUrl\n", + " }\n", + " }\n", + " ],\n", + "});\n", + "\n", + "const response = await model.invoke([message]);\n", + "\n", + "console.log(response.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "e5738224-1109-4bf8-8976-ff1570dd1d46", + "metadata": {}, + "source": [ + "Note that we recover tool calls with parsed arguments in LangChain's [standard format](/docs/how_to/tool_calling) in the model response." + ] + }, + { + "cell_type": "markdown", + "id": "0cee63ff-e09f-4dd8-8323-912edbde94f6", + "metadata": {}, + "source": [ + "## Anthropic\n", + "\n", + "For Anthropic, we can format a base64-encoded image into a content block of type \"image\", as below:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'summary_tool',\n", - " args: { summary: 'The video shows a person clapping their hands.' }\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "d90c4590-71c8-42b1-99ff-03a9eca8082e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: \"multiply\",\n", + " args: { weather: \"sunny\" },\n", + " id: \"toolu_01HLY1KmXZkKMn7Ar4ZtFuAM\"\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import * as fs from \"node:fs/promises\";\n", + "\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const imageData = await fs.readFile(\"../../data/sunny_day.jpeg\");\n", + "\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + "}).bindTools([weatherTool]);\n", + "\n", + "const message = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"describe the weather in this image\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: {\n", + " url: `data:image/jpeg;base64,${imageData.toString(\"base64\")}`,\n", + " },\n", + " },\n", + " ],\n", + "});\n", + "\n", + "const response = await model.invoke([message]);\n", + "\n", + "console.log(response.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "a66b7d2f", + "metadata": {}, + "source": [ + "## Google Generative AI\n", + "\n", + "For Google GenAI, we can format a base64-encoded image into a content block of type \"image\", as below:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "f8184909", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ { name: 'multiply', args: { weather: 'sunny' } } ]\n" + ] + } + ], + "source": [ + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", + "import axios from \"axios\";\n", + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const axiosRes = await axios.get(imageUrl, { responseType: \"arraybuffer\" });\n", + "const base64 = btoa(\n", + " new Uint8Array(axiosRes.data).reduce(\n", + " (data, byte) => data + String.fromCharCode(byte),\n", + " ''\n", + " )\n", + ");\n", + "\n", + "const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-pro-latest\" }).bindTools([weatherTool]);\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"describe the weather in this image\"],\n", + " new MessagesPlaceholder(\"message\")\n", + "]);\n", + "\n", + "const response = await prompt.pipe(model).invoke({\n", + " message: new HumanMessage({\n", + " content: [{\n", + " type: \"media\",\n", + " mimeType: \"image/jpeg\",\n", + " data: base64,\n", + " }]\n", + " })\n", + "});\n", + "console.log(response.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "c5dd4ef4", + "metadata": {}, + "source": [ + "### Audio input\n", + "\n", + "Google's Gemini also supports audio inputs. In this next example we'll see how we can pass an audio file to the model, and get back a summary in structured format." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "c04c883e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'summary_tool',\n", + " args: { summary: 'The video shows a person clapping their hands.' }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { SystemMessage } from \"@langchain/core/messages\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const summaryTool = tool((input) => {\n", + " return input.summary;\n", + "}, {\n", + " name: \"summary_tool\",\n", + " description: \"Log the summary of the content\",\n", + " schema: z.object({\n", + " summary: z.string().describe(\"The summary of the content to log\")\n", + " }),\n", + "});\n", + "\n", + "const audioUrl = \"https://www.pacdv.com/sounds/people_sound_effects/applause-1.wav\";\n", + "\n", + "const axiosRes = await axios.get(audioUrl, { responseType: \"arraybuffer\" });\n", + "const base64 = btoa(\n", + " new Uint8Array(axiosRes.data).reduce(\n", + " (data, byte) => data + String.fromCharCode(byte),\n", + " ''\n", + " )\n", + ");\n", + "\n", + "const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-pro-latest\" }).bindTools([summaryTool]);\n", + "\n", + "const response = await model.invoke([\n", + " new SystemMessage(\"Summarize this content. always use the summary_tool in your response\"),\n", + " new HumanMessage({\n", + " content: [{\n", + " type: \"media\",\n", + " mimeType: \"audio/wav\",\n", + " data: base64,\n", + " }]\n", + "})]);\n", + "\n", + "console.log(response.tool_calls);" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { SystemMessage } from \"@langchain/core/messages\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const summaryTool = tool((input) => {\n", - " return input.summary;\n", - "}, {\n", - " name: \"summary_tool\",\n", - " description: \"Log the summary of the content\",\n", - " schema: z.object({\n", - " summary: z.string().describe(\"The summary of the content to log\")\n", - " }),\n", - "});\n", - "\n", - "const audioUrl = \"https://www.pacdv.com/sounds/people_sound_effects/applause-1.wav\";\n", - "\n", - "const axiosRes = await axios.get(audioUrl, { responseType: \"arraybuffer\" });\n", - "const base64 = btoa(\n", - " new Uint8Array(axiosRes.data).reduce(\n", - " (data, byte) => data + String.fromCharCode(byte),\n", - " ''\n", - " )\n", - ");\n", - "\n", - "const model = new ChatGoogleGenerativeAI({ model: \"gemini-1.5-pro-latest\" }).bindTools([summaryTool]);\n", - "\n", - "const response = await model.invoke([\n", - " new SystemMessage(\"Summarize this content. always use the summary_tool in your response\"),\n", - " new HumanMessage({\n", - " content: [{\n", - " type: \"media\",\n", - " mimeType: \"audio/wav\",\n", - " data: base64,\n", - " }]\n", - "})]);\n", - "\n", - "console.log(response.tool_calls);" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tool_choice.ipynb b/docs/core_docs/docs/how_to/tool_choice.ipynb index a60491045a2c..b16652be5e61 100644 --- a/docs/core_docs/docs/how_to/tool_choice.ipynb +++ b/docs/core_docs/docs/how_to/tool_choice.ipynb @@ -1,176 +1,176 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to force tool calling behavior\n", - "\n", - "```{=mdx}\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [How to use a model to call tools](/docs/how_to/tool_calling)\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "In order to force our LLM to select a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { tool } from '@langchain/core/tools';\n", - "import { z } from 'zod';\n", - "\n", - "const add = tool((input) => {\n", - " return `${input.a + input.b}`\n", - "}, {\n", - " name: \"add\",\n", - " description: \"Adds a and b.\",\n", - " schema: z.object({\n", - " a: z.number(),\n", - " b: z.number(),\n", - " })\n", - "})\n", - "\n", - "const multiply = tool((input) => {\n", - " return `${input.a * input.b}`\n", - "}, {\n", - " name: \"Multiply\",\n", - " description: \"Multiplies a and b.\",\n", - " schema: z.object({\n", - " a: z.number(),\n", - " b: z.number(),\n", - " })\n", - "})\n", - "\n", - "const tools = [add, multiply]" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from '@langchain/openai';\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-3.5-turbo\",\n", - "})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For example, we can force our tool to call the multiply tool by using the following code:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " \"name\": \"Multiply\",\n", - " \"args\": {\n", - " \"a\": 2,\n", - " \"b\": 4\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_d5isFbUkn17Wjr6yEtNz7dDF\"\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const llmForcedToMultiply = llm.bindTools(tools, {\n", - " tool_choice: \"Multiply\",\n", - "})\n", - "const multiplyResult = await llmForcedToMultiply.invoke(\"what is 2 + 4\");\n", - "console.log(JSON.stringify(multiplyResult.tool_calls, null, 2));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Even if we pass it something that doesn't require multiplcation - it will still call the tool!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also just force our tool to select at least one of our tools by passing `\"any\"` (or for OpenAI models, the equivalent, `\"required\"`) to the `tool_choice` parameter." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to force tool calling behavior\n", + "\n", + "```{=mdx}\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [How to use a model to call tools](/docs/how_to/tool_calling)\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "In order to force our LLM to select a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " \"name\": \"add\",\n", - " \"args\": {\n", - " \"a\": 2,\n", - " \"b\": 3\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_La72g7Aj0XHG0pfPX6Dwg2vT\"\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { tool } from '@langchain/core/tools';\n", + "import { z } from 'zod';\n", + "\n", + "const add = tool((input) => {\n", + " return `${input.a + input.b}`\n", + "}, {\n", + " name: \"add\",\n", + " description: \"Adds a and b.\",\n", + " schema: z.object({\n", + " a: z.number(),\n", + " b: z.number(),\n", + " })\n", + "})\n", + "\n", + "const multiply = tool((input) => {\n", + " return `${input.a * input.b}`\n", + "}, {\n", + " name: \"Multiply\",\n", + " description: \"Multiplies a and b.\",\n", + " schema: z.object({\n", + " a: z.number(),\n", + " b: z.number(),\n", + " })\n", + "})\n", + "\n", + "const tools = [add, multiply]" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from '@langchain/openai';\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-3.5-turbo\",\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For example, we can force our tool to call the multiply tool by using the following code:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " \"name\": \"Multiply\",\n", + " \"args\": {\n", + " \"a\": 2,\n", + " \"b\": 4\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_d5isFbUkn17Wjr6yEtNz7dDF\"\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const llmForcedToMultiply = llm.bindTools(tools, {\n", + " tool_choice: \"Multiply\",\n", + "})\n", + "const multiplyResult = await llmForcedToMultiply.invoke(\"what is 2 + 4\");\n", + "console.log(JSON.stringify(multiplyResult.tool_calls, null, 2));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Even if we pass it something that doesn't require multiplcation - it will still call the tool!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also just force our tool to select at least one of our tools by passing `\"any\"` (or for OpenAI models, the equivalent, `\"required\"`) to the `tool_choice` parameter." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " \"name\": \"add\",\n", + " \"args\": {\n", + " \"a\": 2,\n", + " \"b\": 3\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_La72g7Aj0XHG0pfPX6Dwg2vT\"\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const llmForcedToUseTool = llm.bindTools(tools, {\n", + " tool_choice: \"any\",\n", + "})\n", + "const anyToolResult = await llmForcedToUseTool.invoke(\"What day is today?\");\n", + "console.log(JSON.stringify(anyToolResult.tool_calls, null, 2));" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const llmForcedToUseTool = llm.bindTools(tools, {\n", - " tool_choice: \"any\",\n", - "})\n", - "const anyToolResult = await llmForcedToUseTool.invoke(\"What day is today?\");\n", - "console.log(JSON.stringify(anyToolResult.tool_calls, null, 2));" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tool_configure.ipynb b/docs/core_docs/docs/how_to/tool_configure.ipynb index 83e7c2944252..11911f921730 100644 --- a/docs/core_docs/docs/how_to/tool_configure.ipynb +++ b/docs/core_docs/docs/how_to/tool_configure.ipynb @@ -1,114 +1,114 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to access the RunnableConfig from a tool\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [Custom tools](/docs/how_to/custom_tools)\n", - "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n", - "\n", - ":::\n", - "```\n", - "\n", - "Tools are runnables, and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://api.js.langchain.com/interfaces/langchain_core.runnables.RunnableConfig.html) object.\n", - "\n", - "This guide covers how to do this for custom tools created in different ways.\n", - "\n", - "## From the `tool` method\n", - "\n", - "Accessing the `RunnableConfig` object for a custom tool created with the [`tool`](https://api.js.langchain.com/functions/langchain_core.tools.tool-1.html) helper method is simple - it's always the second parameter passed into your custom function. Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "import type { RunnableConfig } from \"@langchain/core/runnables\";\n", - "\n", - "const reverseTool = tool(\n", - " async (input: { text: string }, config?: RunnableConfig) => {\n", - " const originalString = input.text + (config?.configurable?.additional_field ?? \"\");\n", - " return originalString.split(\"\").reverse().join(\"\");\n", - " }, {\n", - " name: \"reverse\",\n", - " description: \"A test tool that combines input text with a configurable parameter.\",\n", - " schema: z.object({\n", - " text: z.string()\n", - " }),\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then, if we invoke the tool with a `config` containing a `configurable` field, we can see that `additional_field` is passed through correctly:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to access the RunnableConfig from a tool\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [Custom tools](/docs/how_to/custom_tools)\n", + "- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n", + "\n", + ":::\n", + "```\n", + "\n", + "Tools are runnables, and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://api.js.langchain.com/interfaces/langchain_core.runnables.RunnableConfig.html) object.\n", + "\n", + "This guide covers how to do this for custom tools created in different ways.\n", + "\n", + "## From the `tool` method\n", + "\n", + "Accessing the `RunnableConfig` object for a custom tool created with the [`tool`](https://api.js.langchain.com/functions/langchain_core.tools.tool-1.html) helper method is simple - it's always the second parameter passed into your custom function. Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import type { RunnableConfig } from \"@langchain/core/runnables\";\n", + "\n", + "const reverseTool = tool(\n", + " async (input: { text: string }, config?: RunnableConfig) => {\n", + " const originalString = input.text + (config?.configurable?.additional_field ?? \"\");\n", + " return originalString.split(\"\").reverse().join(\"\");\n", + " }, {\n", + " name: \"reverse\",\n", + " description: \"A test tool that combines input text with a configurable parameter.\",\n", + " schema: z.object({\n", + " text: z.string()\n", + " }),\n", + " }\n", + ");" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "321cba\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, if we invoke the tool with a `config` containing a `configurable` field, we can see that `additional_field` is passed through correctly:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "321cba\n" + ] + } + ], + "source": [ + "await reverseTool.invoke(\n", + " {text: \"abc\"}, {configurable: {additional_field: \"123\"}}\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now seen how to configure and stream events from within a tool. Next, check out the following guides for more on using tools:\n", + "\n", + "- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n", + "- Building [tool-using chains and agents](/docs/how_to#tools)\n", + "- Getting [structured outputs](/docs/how_to/structured_output/) from models" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "await reverseTool.invoke(\n", - " {text: \"abc\"}, {configurable: {additional_field: \"123\"}}\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now seen how to configure and stream events from within a tool. Next, check out the following guides for more on using tools:\n", - "\n", - "- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n", - "- Building [tool-using chains and agents](/docs/how_to#tools)\n", - "- Getting [structured outputs](/docs/how_to/structured_output/) from models" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tool_results_pass_to_model.ipynb b/docs/core_docs/docs/how_to/tool_results_pass_to_model.ipynb index b1573bcf2779..5f9720f11474 100644 --- a/docs/core_docs/docs/how_to/tool_results_pass_to_model.ipynb +++ b/docs/core_docs/docs/how_to/tool_results_pass_to_model.ipynb @@ -1,370 +1,370 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to pass tool outputs to chat models\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [Tool calling](/docs/concepts/#functiontool-calling)\n", - "- [Using chat models to call tools](/docs/how_to/tool_calling)\n", - "- [Defining custom tools](/docs/how_to/custom_tools/)\n", - "\n", - ":::\n", - "```\n", - "\n", - "Some models are capable of [**tool calling**](/docs/concepts/#functiontool-calling) - generating arguments that conform to a specific user-provided schema. This guide will demonstrate how to use those tool cals to actually call a function and properly pass the results back to the model.\n", - "\n", - "![](../../static/img/tool_invocation.png)\n", - "\n", - "![](../../static/img/tool_results.png)\n", - "\n", - "First, let's define our tools and our model:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const addTool = tool(async ({ a, b }) => {\n", - " return a + b;\n", - "}, {\n", - " name: \"add\",\n", - " schema: z.object({\n", - " a: z.number(),\n", - " b: z.number(),\n", - " }),\n", - " description: \"Adds a and b.\",\n", - "});\n", - "\n", - "const multiplyTool = tool(async ({ a, b }) => {\n", - " return a * b;\n", - "}, {\n", - " name: \"multiply\",\n", - " schema: z.object({\n", - " a: z.number(),\n", - " b: z.number(),\n", - " }),\n", - " description: \"Multiplies a and b.\",\n", - "});\n", - "\n", - "const tools = [addTool, multiplyTool];" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, let's get the model to call a tool. We'll add it to a list of messages that we'll treat as conversation history:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-9p1NbC7sfZP0FE0bNfFiVYbPuWivg\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " },\n", - " {\n", - " \"id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 50,\n", - " \"promptTokens\": 87,\n", - " \"totalTokens\": 137\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_400f27fa1f\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"multiply\",\n", - " \"args\": {\n", - " \"a\": 3,\n", - " \"b\": 12\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\"\n", - " },\n", - " {\n", - " \"name\": \"add\",\n", - " \"args\": {\n", - " \"a\": 11,\n", - " \"b\": 49\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 87,\n", - " \"output_tokens\": 50,\n", - " \"total_tokens\": 137\n", - " }\n", - "}\n", - "2\n" - ] - } - ], - "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const llmWithTools = llm.bindTools(tools);\n", - "\n", - "const messages = [\n", - " new HumanMessage(\"What is 3 * 12? Also, what is 11 + 49?\"),\n", - "];\n", - "\n", - "const aiMessage = await llmWithTools.invoke(messages);\n", - "\n", - "console.log(aiMessage);\n", - "\n", - "messages.push(aiMessage);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next let's invoke the tool functions using the args the model populated!\n", - "\n", - "Conveniently, if we invoke a LangChain `Tool` with a `ToolCall`, we'll automatically get back a `ToolMessage` that can be fed back to the model:\n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "\n", - "This functionality requires `@langchain/core>=0.2.16`. Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", - "\n", - "If you are on earlier versions of `@langchain/core`, you will need to access construct a `ToolMessage` manually using fields from the tool call.\n", - "\n", - ":::\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to pass tool outputs to chat models\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [Tool calling](/docs/concepts/tool_calling)\n", + "- [Using chat models to call tools](/docs/how_to/tool_calling)\n", + "- [Defining custom tools](/docs/how_to/custom_tools/)\n", + "\n", + ":::\n", + "```\n", + "\n", + "Some models are capable of [**tool calling**](/docs/concepts/tool_calling) - generating arguments that conform to a specific user-provided schema. This guide will demonstrate how to use those tool cals to actually call a function and properly pass the results back to the model.\n", + "\n", + "![](../../static/img/tool_invocation.png)\n", + "\n", + "![](../../static/img/tool_results.png)\n", + "\n", + "First, let's define our tools and our model:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " HumanMessage {\n", - " \"content\": \"What is 3 * 12? Also, what is 11 + 49?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-9p1NbC7sfZP0FE0bNfFiVYbPuWivg\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " },\n", - " {\n", - " \"id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 50,\n", - " \"promptTokens\": 87,\n", - " \"totalTokens\": 137\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_400f27fa1f\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"multiply\",\n", - " \"args\": {\n", - " \"a\": 3,\n", - " \"b\": 12\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\"\n", - " },\n", - " {\n", - " \"name\": \"add\",\n", - " \"args\": {\n", - " \"a\": 11,\n", - " \"b\": 49\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 87,\n", - " \"output_tokens\": 50,\n", - " \"total_tokens\": 137\n", - " }\n", - " },\n", - " ToolMessage {\n", - " \"content\": \"36\",\n", - " \"name\": \"multiply\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\"\n", - " },\n", - " ToolMessage {\n", - " \"content\": \"60\",\n", - " \"name\": \"add\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\"\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const toolsByName = {\n", - " add: addTool,\n", - " multiply: multiplyTool,\n", - "}\n", - "\n", - "for (const toolCall of aiMessage.tool_calls) {\n", - " const selectedTool = toolsByName[toolCall.name];\n", - " const toolMessage = await selectedTool.invoke(toolCall);\n", - " messages.push(toolMessage);\n", - "}\n", - "\n", - "console.log(messages);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And finally, we'll invoke the model with the tool results. The model will use this information to generate a final answer to our original query:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const addTool = tool(async ({ a, b }) => {\n", + " return a + b;\n", + "}, {\n", + " name: \"add\",\n", + " schema: z.object({\n", + " a: z.number(),\n", + " b: z.number(),\n", + " }),\n", + " description: \"Adds a and b.\",\n", + "});\n", + "\n", + "const multiplyTool = tool(async ({ a, b }) => {\n", + " return a * b;\n", + "}, {\n", + " name: \"multiply\",\n", + " schema: z.object({\n", + " a: z.number(),\n", + " b: z.number(),\n", + " }),\n", + " description: \"Multiplies a and b.\",\n", + "});\n", + "\n", + "const tools = [addTool, multiplyTool];" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-9p1NttGpWjx1cQoVIDlMhumYq12Pe\",\n", - " \"content\": \"3 * 12 is 36, and 11 + 49 is 60.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 19,\n", - " \"promptTokens\": 153,\n", - " \"totalTokens\": 172\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_18cc0f1fa0\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 153,\n", - " \"output_tokens\": 19,\n", - " \"total_tokens\": 172\n", - " }\n", - "}\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's get the model to call a tool. We'll add it to a list of messages that we'll treat as conversation history:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9p1NbC7sfZP0FE0bNfFiVYbPuWivg\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " },\n", + " {\n", + " \"id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 50,\n", + " \"promptTokens\": 87,\n", + " \"totalTokens\": 137\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_400f27fa1f\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"multiply\",\n", + " \"args\": {\n", + " \"a\": 3,\n", + " \"b\": 12\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\"\n", + " },\n", + " {\n", + " \"name\": \"add\",\n", + " \"args\": {\n", + " \"a\": 11,\n", + " \"b\": 49\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 87,\n", + " \"output_tokens\": 50,\n", + " \"total_tokens\": 137\n", + " }\n", + "}\n", + "2\n" + ] + } + ], + "source": [ + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const llmWithTools = llm.bindTools(tools);\n", + "\n", + "const messages = [\n", + " new HumanMessage(\"What is 3 * 12? Also, what is 11 + 49?\"),\n", + "];\n", + "\n", + "const aiMessage = await llmWithTools.invoke(messages);\n", + "\n", + "console.log(aiMessage);\n", + "\n", + "messages.push(aiMessage);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next let's invoke the tool functions using the args the model populated!\n", + "\n", + "Conveniently, if we invoke a LangChain `Tool` with a `ToolCall`, we'll automatically get back a `ToolMessage` that can be fed back to the model:\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "\n", + "This functionality requires `@langchain/core>=0.2.16`. Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", + "\n", + "If you are on earlier versions of `@langchain/core`, you will need to access construct a `ToolMessage` manually using fields from the tool call.\n", + "\n", + ":::\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " HumanMessage {\n", + " \"content\": \"What is 3 * 12? Also, what is 11 + 49?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-9p1NbC7sfZP0FE0bNfFiVYbPuWivg\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " },\n", + " {\n", + " \"id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 50,\n", + " \"promptTokens\": 87,\n", + " \"totalTokens\": 137\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_400f27fa1f\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"multiply\",\n", + " \"args\": {\n", + " \"a\": 3,\n", + " \"b\": 12\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\"\n", + " },\n", + " {\n", + " \"name\": \"add\",\n", + " \"args\": {\n", + " \"a\": 11,\n", + " \"b\": 49\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 87,\n", + " \"output_tokens\": 50,\n", + " \"total_tokens\": 137\n", + " }\n", + " },\n", + " ToolMessage {\n", + " \"content\": \"36\",\n", + " \"name\": \"multiply\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_RbUuLMYf3vgcdSQ8bhy1D5Ty\"\n", + " },\n", + " ToolMessage {\n", + " \"content\": \"60\",\n", + " \"name\": \"add\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_Bzz1qgQjTlQIHMcEaDAdoH8X\"\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const toolsByName = {\n", + " add: addTool,\n", + " multiply: multiplyTool,\n", + "}\n", + "\n", + "for (const toolCall of aiMessage.tool_calls) {\n", + " const selectedTool = toolsByName[toolCall.name];\n", + " const toolMessage = await selectedTool.invoke(toolCall);\n", + " messages.push(toolMessage);\n", + "}\n", + "\n", + "console.log(messages);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And finally, we'll invoke the model with the tool results. The model will use this information to generate a final answer to our original query:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9p1NttGpWjx1cQoVIDlMhumYq12Pe\",\n", + " \"content\": \"3 * 12 is 36, and 11 + 49 is 60.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 153,\n", + " \"totalTokens\": 172\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_18cc0f1fa0\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 153,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 172\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "await llmWithTools.invoke(messages);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that each `ToolMessage` must include a `tool_call_id` that matches an `id` in the original tool calls that the model generates. This helps the model match tool responses with tool calls.\n", + "\n", + "Tool calling agents, like those in [LangGraph](https://langchain-ai.github.io/langgraphjs/tutorials/introduction/), use this basic flow to answer queries and solve tasks.\n", + "\n", + "## Related\n", + "\n", + "You've now seen how to pass tool calls back to a model.\n", + "\n", + "These guides may interest you next:\n", + "\n", + "- [LangGraph quickstart](https://langchain-ai.github.io/langgraphjs/tutorials/introduction/)\n", + "- Few shot prompting [with tools](/docs/how_to/tools_few_shot/)\n", + "- Stream [tool calls](/docs/how_to/tool_streaming/)\n", + "- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n", + "- Getting [structured outputs](/docs/how_to/structured_output/) from models" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "await llmWithTools.invoke(messages);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that each `ToolMessage` must include a `tool_call_id` that matches an `id` in the original tool calls that the model generates. This helps the model match tool responses with tool calls.\n", - "\n", - "Tool calling agents, like those in [LangGraph](https://langchain-ai.github.io/langgraphjs/tutorials/introduction/), use this basic flow to answer queries and solve tasks.\n", - "\n", - "## Related\n", - "\n", - "You've now seen how to pass tool calls back to a model.\n", - "\n", - "These guides may interest you next:\n", - "\n", - "- [LangGraph quickstart](https://langchain-ai.github.io/langgraphjs/tutorials/introduction/)\n", - "- Few shot prompting [with tools](/docs/how_to/tools_few_shot/)\n", - "- Stream [tool calls](/docs/how_to/tool_streaming/)\n", - "- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n", - "- Getting [structured outputs](/docs/how_to/structured_output/) from models" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tool_runtime.ipynb b/docs/core_docs/docs/how_to/tool_runtime.ipynb index 4f27f5ef13e4..76f3e85b27bb 100644 --- a/docs/core_docs/docs/how_to/tool_runtime.ipynb +++ b/docs/core_docs/docs/how_to/tool_runtime.ipynb @@ -1,398 +1,398 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to pass run time values to tools\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [How to create tools](/docs/how_to/custom_tools)\n", - "- [How to use a model to call tools](/docs/how_to/tool_calling/)\n", - ":::\n", - "\n", - ":::info Supported models\n", - "\n", - "This how-to guide uses models with native tool calling capability.\n", - "You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n", - "\n", - ":::\n", - "```\n", - "\n", - "You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n", - "\n", - "Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n", - "\n", - "Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({ model: \"gpt-4o-mini\" })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using context variables\n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "This functionality was added in `@langchain/core>=0.3.10`. If you are using the LangSmith SDK separately in your project, we also recommend upgrading to `langsmith>=0.1.65`. Please make sure your packages are up to date.\n", - "\n", - "It also requires [`async_hooks`](https://nodejs.org/api/async_hooks.html) support, which is not supported in all environments.\n", - ":::\n", - "```\n", - "\n", - "One way to solve this problem is by using **context variables**. Context variables are a powerful feature that allows you to set values at a higher level of your application, then access them within child runnable (such as tools) called from that level.\n", - "\n", - "They work outside of traditional scoping rules, so you don't need to have a direct reference to the declared variable to access its value.\n", - "\n", - "Below, we declare a tool that updates a central `userToPets` state based on a context variable called `userId`. Note that this `userId` is not part of the tool schema or input:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "import { getContextVariable } from \"@langchain/core/context\";\n", - "\n", - "let userToPets: Record = {};\n", - "\n", - "const updateFavoritePets = tool(async (input) => {\n", - " const userId = getContextVariable(\"userId\");\n", - " if (userId === undefined) {\n", - " throw new Error(`No \"userId\" found in current context. Remember to call \"setContextVariable('userId', value)\";`);\n", - " }\n", - " userToPets[userId] = input.pets;\n", - " return \"update_favorite_pets called.\"\n", - "}, {\n", - " name: \"update_favorite_pets\",\n", - " description: \"add to the list of favorite pets.\",\n", - " schema: z.object({\n", - " pets: z.array(z.string())\n", - " }),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you were to invoke the above tool before setting a context variable at a higher level, `userId` would be `undefined`:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "Error: No \"userId\" found in current context. Remember to call \"setContextVariable('userId', value)\";\n", - " at updateFavoritePets.name (evalmachine.:14:15)\n", - " at /Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:329:33\n", - " at AsyncLocalStorage.run (node:async_hooks:346:14)\n", - " at AsyncLocalStorageProvider.runWithConfig (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/singletons/index.cjs:58:24)\n", - " at /Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:325:68\n", - " at new Promise ()\n", - " at DynamicStructuredTool.func (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:321:20)\n", - " at DynamicStructuredTool._call (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:283:21)\n", - " at DynamicStructuredTool.call (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:111:33)\n", - " at async evalmachine.:3:22\n" - ] - } - ], - "source": [ - "await updateFavoritePets.invoke({ pets: [\"cat\", \"dog\" ]})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Instead, set a context variable with a parent of where the tools are invoked:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "import { setContextVariable } from \"@langchain/core/context\";\n", - "import { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\n", - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "\n", - "const handleRunTimeRequestRunnable = RunnableLambda.from(async (params: {\n", - " userId: string;\n", - " query: string;\n", - " llm: BaseChatModel;\n", - "}) => {\n", - " const { userId, query, llm } = params;\n", - " if (!llm.bindTools) {\n", - " throw new Error(\"Language model does not support tools.\");\n", - " }\n", - " // Set a context variable accessible to any child runnables called within this one.\n", - " // You can also set context variables at top level that act as globals.\n", - " setContextVariable(\"userId\", userId);\n", - " const tools = [updateFavoritePets];\n", - " const llmWithTools = llm.bindTools(tools);\n", - " const modelResponse = await llmWithTools.invoke(query);\n", - " // For simplicity, skip checking the tool call's name field and assume\n", - " // that the model is calling the \"updateFavoritePets\" tool\n", - " if (modelResponse.tool_calls.length > 0) {\n", - " return updateFavoritePets.invoke(modelResponse.tool_calls[0]);\n", - " } else {\n", - " return \"No tool invoked.\";\n", - " }\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And when our method invokes the tools, you will see that the tool properly access the previously set `userId` context variable and runs successfully:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to pass run time values to tools\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [How to create tools](/docs/how_to/custom_tools)\n", + "- [How to use a model to call tools](/docs/how_to/tool_calling/)\n", + ":::\n", + "\n", + ":::info Supported models\n", + "\n", + "This how-to guide uses models with native tool calling capability.\n", + "You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n", + "\n", + ":::\n", + "```\n", + "\n", + "You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n", + "\n", + "Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n", + "\n", + "Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "ToolMessage {\n", - " \"content\": \"update_favorite_pets called.\",\n", - " \"name\": \"update_favorite_pets\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_vsD2DbSpDquOtmFlOtbUME6h\"\n", - "}\n" - ] - } - ], - "source": [ - "await handleRunTimeRequestRunnable.invoke({\n", - " userId: \"brace\",\n", - " query: \"my favorite animals are cats and parrots.\",\n", - " llm: llm\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And have additionally updated the `userToPets` object with a key matching the `userId` we passed, `\"brace\"`:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{ brace: [ 'cats', 'parrots' ] }\n" - ] - } - ], - "source": [ - "console.log(userToPets);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Without context variables\n", - "\n", - "If you are on an earlier version of core or an environment that does not support `async_hooks`, you can use the following design pattern that creates the tool dynamically at run time and binds to them appropriate values.\n", - "\n", - "The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n", - "this information may be the user ID as resolved from the request itself." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "userToPets = {};\n", - "\n", - "function generateToolsForUser(userId: string) {\n", - " const updateFavoritePets = tool(async (input) => {\n", - " userToPets[userId] = input.pets;\n", - " return \"update_favorite_pets called.\"\n", - " }, {\n", - " name: \"update_favorite_pets\",\n", - " description: \"add to the list of favorite pets.\",\n", - " schema: z.object({\n", - " pets: z.array(z.string())\n", - " }),\n", - " });\n", - " // You can declare and return additional tools as well:\n", - " return [updateFavoritePets];\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Verify that the tool works correctly" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-4o-mini\" })" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{ cobb: [ 'tiger', 'wolf' ] }\n" - ] - } - ], - "source": [ - "const [updatePets] = generateToolsForUser(\"cobb\");\n", - "\n", - "await updatePets.invoke({ pets: [\"tiger\", \"wolf\"] });\n", - "\n", - "console.log(userToPets);" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "import { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\n", - "\n", - "async function handleRunTimeRequest(userId: string, query: string, llm: BaseChatModel): Promise {\n", - " if (!llm.bindTools) {\n", - " throw new Error(\"Language model does not support tools.\");\n", - " }\n", - " const tools = generateToolsForUser(userId);\n", - " const llmWithTools = llm.bindTools(tools);\n", - " return llmWithTools.invoke(query);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists. You can see that `user_id` is not among the params the LLM generates:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using context variables\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "This functionality was added in `@langchain/core>=0.3.10`. If you are using the LangSmith SDK separately in your project, we also recommend upgrading to `langsmith>=0.1.65`. Please make sure your packages are up to date.\n", + "\n", + "It also requires [`async_hooks`](https://nodejs.org/api/async_hooks.html) support, which is not supported in all environments.\n", + ":::\n", + "```\n", + "\n", + "One way to solve this problem is by using **context variables**. Context variables are a powerful feature that allows you to set values at a higher level of your application, then access them within child runnable (such as tools) called from that level.\n", + "\n", + "They work outside of traditional scoping rules, so you don't need to have a direct reference to the declared variable to access its value.\n", + "\n", + "Below, we declare a tool that updates a central `userToPets` state based on a context variable called `userId`. Note that this `userId` is not part of the tool schema or input:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { getContextVariable } from \"@langchain/core/context\";\n", + "\n", + "let userToPets: Record = {};\n", + "\n", + "const updateFavoritePets = tool(async (input) => {\n", + " const userId = getContextVariable(\"userId\");\n", + " if (userId === undefined) {\n", + " throw new Error(`No \"userId\" found in current context. Remember to call \"setContextVariable('userId', value)\";`);\n", + " }\n", + " userToPets[userId] = input.pets;\n", + " return \"update_favorite_pets called.\"\n", + "}, {\n", + " name: \"update_favorite_pets\",\n", + " description: \"add to the list of favorite pets.\",\n", + " schema: z.object({\n", + " pets: z.array(z.string())\n", + " }),\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you were to invoke the above tool before setting a context variable at a higher level, `userId` would be `undefined`:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error: No \"userId\" found in current context. Remember to call \"setContextVariable('userId', value)\";\n", + " at updateFavoritePets.name (evalmachine.:14:15)\n", + " at /Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:329:33\n", + " at AsyncLocalStorage.run (node:async_hooks:346:14)\n", + " at AsyncLocalStorageProvider.runWithConfig (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/singletons/index.cjs:58:24)\n", + " at /Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:325:68\n", + " at new Promise ()\n", + " at DynamicStructuredTool.func (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:321:20)\n", + " at DynamicStructuredTool._call (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:283:21)\n", + " at DynamicStructuredTool.call (/Users/jacoblee/langchain/langchainjs/langchain-core/dist/tools/index.cjs:111:33)\n", + " at async evalmachine.:3:22\n" + ] + } + ], + "source": [ + "await updateFavoritePets.invoke({ pets: [\"cat\", \"dog\" ]})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Instead, set a context variable with a parent of where the tools are invoked:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import { setContextVariable } from \"@langchain/core/context\";\n", + "import { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\n", + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "\n", + "const handleRunTimeRequestRunnable = RunnableLambda.from(async (params: {\n", + " userId: string;\n", + " query: string;\n", + " llm: BaseChatModel;\n", + "}) => {\n", + " const { userId, query, llm } = params;\n", + " if (!llm.bindTools) {\n", + " throw new Error(\"Language model does not support tools.\");\n", + " }\n", + " // Set a context variable accessible to any child runnables called within this one.\n", + " // You can also set context variables at top level that act as globals.\n", + " setContextVariable(\"userId\", userId);\n", + " const tools = [updateFavoritePets];\n", + " const llmWithTools = llm.bindTools(tools);\n", + " const modelResponse = await llmWithTools.invoke(query);\n", + " // For simplicity, skip checking the tool call's name field and assume\n", + " // that the model is calling the \"updateFavoritePets\" tool\n", + " if (modelResponse.tool_calls.length > 0) {\n", + " return updateFavoritePets.invoke(modelResponse.tool_calls[0]);\n", + " } else {\n", + " return \"No tool invoked.\";\n", + " }\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And when our method invokes the tools, you will see that the tool properly access the previously set `userId` context variable and runs successfully:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ToolMessage {\n", + " \"content\": \"update_favorite_pets called.\",\n", + " \"name\": \"update_favorite_pets\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_vsD2DbSpDquOtmFlOtbUME6h\"\n", + "}\n" + ] + } + ], + "source": [ + "await handleRunTimeRequestRunnable.invoke({\n", + " userId: \"brace\",\n", + " query: \"my favorite animals are cats and parrots.\",\n", + " llm: llm\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And have additionally updated the `userToPets` object with a key matching the `userId` we passed, `\"brace\"`:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " name: 'update_favorite_pets',\n", - " args: { pets: [ 'tigers', 'wolves' ] },\n", - " type: 'tool_call',\n", - " id: 'call_FBF4D51SkVK2clsLOQHX6wTv'\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ brace: [ 'cats', 'parrots' ] }\n" + ] + } + ], + "source": [ + "console.log(userToPets);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Without context variables\n", + "\n", + "If you are on an earlier version of core or an environment that does not support `async_hooks`, you can use the following design pattern that creates the tool dynamically at run time and binds to them appropriate values.\n", + "\n", + "The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n", + "this information may be the user ID as resolved from the request itself." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "userToPets = {};\n", + "\n", + "function generateToolsForUser(userId: string) {\n", + " const updateFavoritePets = tool(async (input) => {\n", + " userToPets[userId] = input.pets;\n", + " return \"update_favorite_pets called.\"\n", + " }, {\n", + " name: \"update_favorite_pets\",\n", + " description: \"add to the list of favorite pets.\",\n", + " schema: z.object({\n", + " pets: z.array(z.string())\n", + " }),\n", + " });\n", + " // You can declare and return additional tools as well:\n", + " return [updateFavoritePets];\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Verify that the tool works correctly" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ cobb: [ 'tiger', 'wolf' ] }\n" + ] + } + ], + "source": [ + "const [updatePets] = generateToolsForUser(\"cobb\");\n", + "\n", + "await updatePets.invoke({ pets: [\"tiger\", \"wolf\"] });\n", + "\n", + "console.log(userToPets);" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\n", + "\n", + "async function handleRunTimeRequest(userId: string, query: string, llm: BaseChatModel): Promise {\n", + " if (!llm.bindTools) {\n", + " throw new Error(\"Language model does not support tools.\");\n", + " }\n", + " const tools = generateToolsForUser(userId);\n", + " const llmWithTools = llm.bindTools(tools);\n", + " return llmWithTools.invoke(query);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists. You can see that `user_id` is not among the params the LLM generates:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " name: 'update_favorite_pets',\n", + " args: { pets: [ 'tigers', 'wolves' ] },\n", + " type: 'tool_call',\n", + " id: 'call_FBF4D51SkVK2clsLOQHX6wTv'\n", + "}\n" + ] + } + ], + "source": [ + "const aiMessage = await handleRunTimeRequest(\n", + " \"cobb\", \"my favorite pets are tigers and wolves.\", llm,\n", + ");\n", + "console.log(aiMessage.tool_calls[0]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{=mdx}\n", + ":::tip\n", + "Click [here](https://smith.langchain.com/public/3d766ecc-8f28-400b-8636-632e6f1598c7/r) to see the LangSmith trace for the above run.\n", + ":::\n", + "\n", + ":::tip\n", + "Chat models only output requests to invoke tools. They don't actually invoke the underlying tools.\n", + "\n", + "To see how to invoke the tools, please refer to [how to use a model to call tools](/docs/how_to/tool_calling/).\n", + ":::\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const aiMessage = await handleRunTimeRequest(\n", - " \"cobb\", \"my favorite pets are tigers and wolves.\", llm,\n", - ");\n", - "console.log(aiMessage.tool_calls[0]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{=mdx}\n", - ":::tip\n", - "Click [here](https://smith.langchain.com/public/3d766ecc-8f28-400b-8636-632e6f1598c7/r) to see the LangSmith trace for the above run.\n", - ":::\n", - "\n", - ":::tip\n", - "Chat models only output requests to invoke tools. They don't actually invoke the underlying tools.\n", - "\n", - "To see how to invoke the tools, please refer to [how to use a model to call tools](/docs/how_to/tool_calling/).\n", - ":::\n", - "```" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tool_stream_events.ipynb b/docs/core_docs/docs/how_to/tool_stream_events.ipynb index 0b657861cbbf..555c2994f8da 100644 --- a/docs/core_docs/docs/how_to/tool_stream_events.ipynb +++ b/docs/core_docs/docs/how_to/tool_stream_events.ipynb @@ -1,682 +1,682 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# How to stream events from a tool\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [Custom tools](/docs/how_to/custom_tools)\n", - "- [Using stream events](/docs/how_to/streaming/#using-stream-events)\n", - "- [Accessing RunnableConfig within a custom tool](/docs/how_to/tool_configure/)\n", - "\n", - ":::\n", - "```\n", - "\n", - "If you have tools that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the [`.streamEvents()`](/docs/how_to/streaming/#using-stream-events) method.\n", - "\n", - "```{=mdx}\n", - ":::caution Compatibility\n", - "\n", - "In order to support a wider variety of JavaScript environments, the base LangChain package does not automatically propagate configuration to child runnables by default. This includes callbacks necessary for `.streamEvents()`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n", - "\n", - "You will need to manually propagate the `RunnableConfig` object to the child runnable. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n", - "\n", - "This guide also requires `@langchain/core>=0.2.16`.\n", - ":::\n", - "```\n", - "\n", - "Say you have a custom tool that calls a chain that condenses its input by prompting a chat model to return only 10 words, then reversing the output. First, define it in a naive way:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-5-sonnet-20240620\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "const specialSummarizationTool = tool(async (input) => {\n", - " const prompt = ChatPromptTemplate.fromTemplate(\n", - " \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n", - " );\n", - " const reverse = (x: string) => {\n", - " return x.split(\"\").reverse().join(\"\");\n", - " };\n", - " const chain = prompt\n", - " .pipe(model)\n", - " .pipe(new StringOutputParser())\n", - " .pipe(reverse);\n", - " const summary = await chain.invoke({ long_text: input.long_text });\n", - " return summary;\n", - "}, {\n", - " name: \"special_summarization_tool\",\n", - " description: \"A tool that summarizes input text using advanced techniques.\",\n", - " schema: z.object({\n", - " long_text: z.string(),\n", - " }),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Invoking the tool directly works just fine:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - ".yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB\n" - ] - } - ], - "source": [ - "const LONG_TEXT = `\n", - "NARRATOR:\n", - "(Black screen with text; The sound of buzzing bees can be heard)\n", - "According to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\n", - "BARRY BENSON:\n", - "(Barry is picking out a shirt)\n", - "Yellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\n", - "JANET BENSON:\n", - "Barry! Breakfast is ready!\n", - "BARRY:\n", - "Coming! Hang on a second.`;\n", - "\n", - "await specialSummarizationTool.invoke({ long_text: LONG_TEXT });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "But if you wanted to access the raw output from the chat model rather than the full tool, you might try to use the [`.streamEvents()`](/docs/how_to/streaming/#using-stream-events) method and look for an `on_chat_model_end` event. Here's what happens:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "const stream = await specialSummarizationTool.streamEvents(\n", - " { long_text: LONG_TEXT },\n", - " { version: \"v2\" },\n", - ");\n", - "\n", - "for await (const event of stream) {\n", - " if (event.event === \"on_chat_model_end\") {\n", - " // Never triggers!\n", - " console.log(event);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You'll notice that there are no chat model events emitted from the child run!\n", - "\n", - "This is because the example above does not pass the tool's config object into the internal chain. To fix this, redefine your tool to take a special parameter typed as `RunnableConfig` (see [this guide](/docs/how_to/tool_configure) for more details). You'll also need to pass that parameter through into the internal chain when executing it:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "const specialSummarizationToolWithConfig = tool(async (input, config) => {\n", - " const prompt = ChatPromptTemplate.fromTemplate(\n", - " \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n", - " );\n", - " const reverse = (x: string) => {\n", - " return x.split(\"\").reverse().join(\"\");\n", - " };\n", - " const chain = prompt\n", - " .pipe(model)\n", - " .pipe(new StringOutputParser())\n", - " .pipe(reverse);\n", - " // Pass the \"config\" object as an argument to any executed runnables\n", - " const summary = await chain.invoke({ long_text: input.long_text }, config);\n", - " return summary;\n", - "}, {\n", - " name: \"special_summarization_tool\",\n", - " description: \"A tool that summarizes input text using advanced techniques.\",\n", - " schema: z.object({\n", - " long_text: z.string(),\n", - " }),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And now try the same `.streamEvents()` call as before with your new tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to stream events from a tool\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [Custom tools](/docs/how_to/custom_tools)\n", + "- [Using stream events](/docs/how_to/streaming/#using-stream-events)\n", + "- [Accessing RunnableConfig within a custom tool](/docs/how_to/tool_configure/)\n", + "\n", + ":::\n", + "```\n", + "\n", + "If you have tools that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the [`.streamEvents()`](/docs/how_to/streaming/#using-stream-events) method.\n", + "\n", + "```{=mdx}\n", + ":::caution Compatibility\n", + "\n", + "In order to support a wider variety of JavaScript environments, the base LangChain package does not automatically propagate configuration to child runnables by default. This includes callbacks necessary for `.streamEvents()`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n", + "\n", + "You will need to manually propagate the `RunnableConfig` object to the child runnable. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n", + "\n", + "This guide also requires `@langchain/core>=0.2.16`.\n", + ":::\n", + "```\n", + "\n", + "Say you have a custom tool that calls a chain that condenses its input by prompting a chat model to return only 10 words, then reversing the output. First, define it in a naive way:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " event: 'on_chat_model_end',\n", - " data: {\n", - " output: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: 'Bee defies physics; Barry chooses outfit for graduation day.',\n", - " name: undefined,\n", - " additional_kwargs: [Object],\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: [Object]\n", - " },\n", - " input: { messages: [Array] }\n", - " },\n", - " run_id: '27ac7b2e-591c-4adc-89ec-64d96e233ec8',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const stream = await specialSummarizationToolWithConfig.streamEvents(\n", - " { long_text: LONG_TEXT },\n", - " { version: \"v2\" },\n", - ");\n", - "\n", - "for await (const event of stream) {\n", - " if (event.event === \"on_chat_model_end\") {\n", - " // Never triggers!\n", - " console.log(event);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Awesome! This time there's an event emitted.\n", - "\n", - "For streaming, `.streamEvents()` automatically calls internal runnables in a chain with streaming enabled if possible, so if you wanted to a stream of tokens as they are generated from the chat model, you could simply filter to look for `on_chat_model_stream` events with no other changes:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "const model = new ChatAnthropic({\n", + " model: \"claude-3-5-sonnet-20240620\",\n", + " temperature: 0,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "const specialSummarizationTool = tool(async (input) => {\n", + " const prompt = ChatPromptTemplate.fromTemplate(\n", + " \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n", + " );\n", + " const reverse = (x: string) => {\n", + " return x.split(\"\").reverse().join(\"\");\n", + " };\n", + " const chain = prompt\n", + " .pipe(model)\n", + " .pipe(new StringOutputParser())\n", + " .pipe(reverse);\n", + " const summary = await chain.invoke({ long_text: input.long_text });\n", + " return summary;\n", + "}, {\n", + " name: \"special_summarization_tool\",\n", + " description: \"A tool that summarizes input text using advanced techniques.\",\n", + " schema: z.object({\n", + " long_text: z.string(),\n", + " }),\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Invoking the tool directly works just fine:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: 'Bee',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' def',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: 'ies physics',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ';',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' Barry',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' cho',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: 'oses outfit',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' for',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' graduation',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: ' day',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n", - "{\n", - " event: 'on_chat_model_stream',\n", - " data: {\n", - " chunk: AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: '.',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: [],\n", - " usage_metadata: undefined\n", - " }\n", - " },\n", - " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", - " name: 'ChatAnthropic',\n", - " tags: [ 'seq:step:2' ],\n", - " metadata: {\n", - " ls_provider: 'anthropic',\n", - " ls_model_name: 'claude-3-5-sonnet-20240620',\n", - " ls_model_type: 'chat',\n", - " ls_temperature: 0,\n", - " ls_max_tokens: 2048,\n", - " ls_stop: undefined\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ".yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB\n" + ] + } + ], + "source": [ + "const LONG_TEXT = `\n", + "NARRATOR:\n", + "(Black screen with text; The sound of buzzing bees can be heard)\n", + "According to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\n", + "BARRY BENSON:\n", + "(Barry is picking out a shirt)\n", + "Yellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\n", + "JANET BENSON:\n", + "Barry! Breakfast is ready!\n", + "BARRY:\n", + "Coming! Hang on a second.`;\n", + "\n", + "await specialSummarizationTool.invoke({ long_text: LONG_TEXT });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But if you wanted to access the raw output from the chat model rather than the full tool, you might try to use the [`.streamEvents()`](/docs/how_to/streaming/#using-stream-events) method and look for an `on_chat_model_end` event. Here's what happens:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "const stream = await specialSummarizationTool.streamEvents(\n", + " { long_text: LONG_TEXT },\n", + " { version: \"v2\" },\n", + ");\n", + "\n", + "for await (const event of stream) {\n", + " if (event.event === \"on_chat_model_end\") {\n", + " // Never triggers!\n", + " console.log(event);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You'll notice that there are no chat model events emitted from the child run!\n", + "\n", + "This is because the example above does not pass the tool's config object into the internal chain. To fix this, redefine your tool to take a special parameter typed as `RunnableConfig` (see [this guide](/docs/how_to/tool_configure) for more details). You'll also need to pass that parameter through into the internal chain when executing it:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "const specialSummarizationToolWithConfig = tool(async (input, config) => {\n", + " const prompt = ChatPromptTemplate.fromTemplate(\n", + " \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n", + " );\n", + " const reverse = (x: string) => {\n", + " return x.split(\"\").reverse().join(\"\");\n", + " };\n", + " const chain = prompt\n", + " .pipe(model)\n", + " .pipe(new StringOutputParser())\n", + " .pipe(reverse);\n", + " // Pass the \"config\" object as an argument to any executed runnables\n", + " const summary = await chain.invoke({ long_text: input.long_text }, config);\n", + " return summary;\n", + "}, {\n", + " name: \"special_summarization_tool\",\n", + " description: \"A tool that summarizes input text using advanced techniques.\",\n", + " schema: z.object({\n", + " long_text: z.string(),\n", + " }),\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And now try the same `.streamEvents()` call as before with your new tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " event: 'on_chat_model_end',\n", + " data: {\n", + " output: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: 'Bee defies physics; Barry chooses outfit for graduation day.',\n", + " name: undefined,\n", + " additional_kwargs: [Object],\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: [Object]\n", + " },\n", + " input: { messages: [Array] }\n", + " },\n", + " run_id: '27ac7b2e-591c-4adc-89ec-64d96e233ec8',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const stream = await specialSummarizationToolWithConfig.streamEvents(\n", + " { long_text: LONG_TEXT },\n", + " { version: \"v2\" },\n", + ");\n", + "\n", + "for await (const event of stream) {\n", + " if (event.event === \"on_chat_model_end\") {\n", + " // Never triggers!\n", + " console.log(event);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Awesome! This time there's an event emitted.\n", + "\n", + "For streaming, `.streamEvents()` automatically calls internal runnables in a chain with streaming enabled if possible, so if you wanted to a stream of tokens as they are generated from the chat model, you could simply filter to look for `on_chat_model_stream` events with no other changes:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: 'Bee',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' def',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: 'ies physics',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ';',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' Barry',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' cho',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: 'oses outfit',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' for',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' graduation',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: ' day',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n", + "{\n", + " event: 'on_chat_model_stream',\n", + " data: {\n", + " chunk: AIMessageChunk {\n", + " lc_serializable: true,\n", + " lc_kwargs: [Object],\n", + " lc_namespace: [Array],\n", + " content: '.',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " tool_call_chunks: [],\n", + " usage_metadata: undefined\n", + " }\n", + " },\n", + " run_id: '938c0469-83c6-4dbd-862e-cd73381165de',\n", + " name: 'ChatAnthropic',\n", + " tags: [ 'seq:step:2' ],\n", + " metadata: {\n", + " ls_provider: 'anthropic',\n", + " ls_model_name: 'claude-3-5-sonnet-20240620',\n", + " ls_model_type: 'chat',\n", + " ls_temperature: 0,\n", + " ls_max_tokens: 2048,\n", + " ls_stop: undefined\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const stream = await specialSummarizationToolWithConfig.streamEvents(\n", + " { long_text: LONG_TEXT },\n", + " { version: \"v2\" },\n", + ");\n", + "\n", + "for await (const event of stream) {\n", + " if (event.event === \"on_chat_model_stream\") {\n", + " // Never triggers!\n", + " console.log(event);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Automatically passing config (Advanced)\n", + "\n", + "If you've used [LangGraph](https://langchain-ai.github.io/langgraphjs/), you may have noticed that you don't need to pass config in nested calls. This is because LangGraph takes advantage of an API called [`async_hooks`](https://nodejs.org/api/async_hooks.html), which is not supported in many, but not all environments.\n", + "\n", + "If you wish, you can enable automatic configuration passing by running the following code to import and enable `AsyncLocalStorage` globally:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "import { AsyncLocalStorageProviderSingleton } from \"@langchain/core/singletons\";\n", + "import { AsyncLocalStorage } from \"async_hooks\";\n", + "\n", + "AsyncLocalStorageProviderSingleton.initializeGlobalInstance(\n", + " new AsyncLocalStorage()\n", + ");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "You've now seen how to stream events from within a tool. Next, check out the following guides for more on using tools:\n", + "\n", + "- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n", + "- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n", + "- [Dispatch custom callback events](/docs/how_to/callbacks_custom_events)\n", + "\n", + "You can also check out some more specific uses of tool calling:\n", + "\n", + "- Building [tool-using chains and agents](/docs/how_to#tools)\n", + "- Getting [structured outputs](/docs/how_to/structured_output/) from models" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const stream = await specialSummarizationToolWithConfig.streamEvents(\n", - " { long_text: LONG_TEXT },\n", - " { version: \"v2\" },\n", - ");\n", - "\n", - "for await (const event of stream) {\n", - " if (event.event === \"on_chat_model_stream\") {\n", - " // Never triggers!\n", - " console.log(event);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Automatically passing config (Advanced)\n", - "\n", - "If you've used [LangGraph](https://langchain-ai.github.io/langgraphjs/), you may have noticed that you don't need to pass config in nested calls. This is because LangGraph takes advantage of an API called [`async_hooks`](https://nodejs.org/api/async_hooks.html), which is not supported in many, but not all environments.\n", - "\n", - "If you wish, you can enable automatic configuration passing by running the following code to import and enable `AsyncLocalStorage` globally:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "import { AsyncLocalStorageProviderSingleton } from \"@langchain/core/singletons\";\n", - "import { AsyncLocalStorage } from \"async_hooks\";\n", - "\n", - "AsyncLocalStorageProviderSingleton.initializeGlobalInstance(\n", - " new AsyncLocalStorage()\n", - ");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "You've now seen how to stream events from within a tool. Next, check out the following guides for more on using tools:\n", - "\n", - "- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n", - "- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n", - "- [Dispatch custom callback events](/docs/how_to/callbacks_custom_events)\n", - "\n", - "You can also check out some more specific uses of tool calling:\n", - "\n", - "- Building [tool-using chains and agents](/docs/how_to#tools)\n", - "- Getting [structured outputs](/docs/how_to/structured_output/) from models" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tools_error.ipynb b/docs/core_docs/docs/how_to/tools_error.ipynb index b79dd18d9c77..2a05b38377ad 100644 --- a/docs/core_docs/docs/how_to/tools_error.ipynb +++ b/docs/core_docs/docs/how_to/tools_error.ipynb @@ -1,244 +1,244 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "5d60cbb9-2a6a-43ea-a9e9-f67b16ddd2b2", - "metadata": {}, - "source": [ - "# How to handle tool errors\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [How to use a model to call tools](/docs/how_to/tool_calling)\n", - "\n", - ":::\n", - "```\n", - "\n", - "Calling tools with an LLM isn't perfect. The model may try to call a tool that doesn't exist or fail to return arguments that match the requested schema. Strategies like keeping schemas simple, reducing the number of tools you pass at once, and having good names and descriptions can help mitigate this risk, but aren't foolproof.\n", - "\n", - "This guide covers some ways to build error handling into your chains to mitigate these failure modes." - ] - }, - { - "cell_type": "markdown", - "id": "0a50f93a-5d6f-4691-8f98-27239a1c2f95", - "metadata": {}, - "source": [ - "## Chain\n", - "\n", - "Suppose we have the following (dummy) tool and tool-calling chain. We'll make our tool intentionally convoluted to try and trip up the model." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "1d20604e-c4d1-4d21-841b-23e4f61aec36", - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-3.5-turbo-0125\",\n", - " temperature: 0,\n", - "});\n", - "\n", - "const complexTool = tool(async (params) => {\n", - " return params.int_arg * params.float_arg;\n", - "}, {\n", - " name: \"complex_tool\",\n", - " description: \"Do something complex with a complex tool.\",\n", - " schema: z.object({\n", - " int_arg: z.number(),\n", - " float_arg: z.number(),\n", - " number_arg: z.object({}),\n", - " })\n", - "});\n", - "\n", - "const llmWithTools = llm.bindTools([complexTool]);\n", - "\n", - "const chain = llmWithTools\n", - " .pipe((message) => message.tool_calls?.[0].args)\n", - " .pipe(complexTool);" - ] - }, - { - "cell_type": "markdown", - "id": "c34f005e-63f0-4841-9461-ca36c36607fc", - "metadata": {}, - "source": [ - "We can see that when we try to invoke this chain the model fails to correctly call the tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d354664c-ac44-4967-a35f-8912b3ad9477", - "metadata": {}, - "outputs": [ + "cells": [ { - "ename": "Error", - "evalue": "Received tool input did not match expected schema", - "output_type": "error", - "traceback": [ - "Stack trace:", - "Error: Received tool input did not match expected schema", - " at DynamicStructuredTool.call (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.2.16/dist/tools/index.js:100:19)", - " at eventLoopTick (ext:core/01_core.js:63:7)", - " at async RunnableSequence.invoke (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.2.16_1/dist/runnables/base.js:1139:27)", - " at async :1:22" - ] - } - ], - "source": [ - "await chain.invoke(\n", - " \"use complex tool. the args are 5, 2.1, potato\"\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "890d989d-2d39-4571-9a55-d3496b9b5d27", - "metadata": {}, - "source": [ - "## Try/except tool call\n", - "\n", - "The simplest way to more gracefully handle errors is to try/except the tool-calling step and return a helpful message on errors:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "8fedb550-683d-45ae-8876-ae7acb332019", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "5d60cbb9-2a6a-43ea-a9e9-f67b16ddd2b2", + "metadata": {}, + "source": [ + "# How to handle tool errors\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [How to use a model to call tools](/docs/how_to/tool_calling)\n", + "\n", + ":::\n", + "```\n", + "\n", + "Calling tools with an LLM isn't perfect. The model may try to call a tool that doesn't exist or fail to return arguments that match the requested schema. Strategies like keeping schemas simple, reducing the number of tools you pass at once, and having good names and descriptions can help mitigate this risk, but aren't foolproof.\n", + "\n", + "This guide covers some ways to build error handling into your chains to mitigate these failure modes." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Calling tool with arguments:\n", - "\n", - "{\"int_arg\":5,\"float_arg\":2.1,\"number_arg\":\"potato\"}\n", - "\n", - "raised the following error:\n", - "\n", - "Error: Received tool input did not match expected schema\n" - ] - } - ], - "source": [ - "const tryExceptToolWrapper = async (input, config) => {\n", - " try {\n", - " const result = await complexTool.invoke(input);\n", - " return result;\n", - " } catch (e) {\n", - " return `Calling tool with arguments:\\n\\n${JSON.stringify(input)}\\n\\nraised the following error:\\n\\n${e}`\n", - " }\n", - "}\n", - "\n", - "const chainWithTools = llmWithTools\n", - " .pipe((message) => message.tool_calls?.[0].args)\n", - " .pipe(tryExceptToolWrapper);\n", - "\n", - "const res = await chainWithTools.invoke(\"use complex tool. the args are 5, 2.1, potato\");\n", - "\n", - "console.log(res);" - ] - }, - { - "cell_type": "markdown", - "id": "3b2f6393-cb47-49d0-921c-09550a049fe4", - "metadata": {}, - "source": [ - "## Fallbacks\n", - "\n", - "We can also try to fallback to a better model in the event of a tool invocation error. In this case we'll fall back to an identical chain that uses `gpt-4-1106-preview` instead of `gpt-3.5-turbo`." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "02cc4223-35fa-4240-976a-012299ca703c", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "0a50f93a-5d6f-4691-8f98-27239a1c2f95", + "metadata": {}, + "source": [ + "## Chain\n", + "\n", + "Suppose we have the following (dummy) tool and tool-calling chain. We'll make our tool intentionally convoluted to try and trip up the model." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "1d20604e-c4d1-4d21-841b-23e4f61aec36", + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-3.5-turbo-0125\",\n", + " temperature: 0,\n", + "});\n", + "\n", + "const complexTool = tool(async (params) => {\n", + " return params.int_arg * params.float_arg;\n", + "}, {\n", + " name: \"complex_tool\",\n", + " description: \"Do something complex with a complex tool.\",\n", + " schema: z.object({\n", + " int_arg: z.number(),\n", + " float_arg: z.number(),\n", + " number_arg: z.object({}),\n", + " })\n", + "});\n", + "\n", + "const llmWithTools = llm.bindTools([complexTool]);\n", + "\n", + "const chain = llmWithTools\n", + " .pipe((message) => message.tool_calls?.[0].args)\n", + " .pipe(complexTool);" + ] + }, + { + "cell_type": "markdown", + "id": "c34f005e-63f0-4841-9461-ca36c36607fc", + "metadata": {}, + "source": [ + "We can see that when we try to invoke this chain the model fails to correctly call the tool:" + ] + }, { - "data": { - "text/plain": [ - "\u001b[33m10.5\u001b[39m" + "cell_type": "code", + "execution_count": 2, + "id": "d354664c-ac44-4967-a35f-8912b3ad9477", + "metadata": {}, + "outputs": [ + { + "ename": "Error", + "evalue": "Received tool input did not match expected schema", + "output_type": "error", + "traceback": [ + "Stack trace:", + "Error: Received tool input did not match expected schema", + " at DynamicStructuredTool.call (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.2.16/dist/tools/index.js:100:19)", + " at eventLoopTick (ext:core/01_core.js:63:7)", + " at async RunnableSequence.invoke (file:///Users/jacoblee/Library/Caches/deno/npm/registry.npmjs.org/@langchain/core/0.2.16_1/dist/runnables/base.js:1139:27)", + " at async :1:22" + ] + } + ], + "source": [ + "await chain.invoke(\n", + " \"use complex tool. the args are 5, 2.1, potato\"\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "890d989d-2d39-4571-9a55-d3496b9b5d27", + "metadata": {}, + "source": [ + "## Try/except tool call\n", + "\n", + "The simplest way to more gracefully handle errors is to try/except the tool-calling step and return a helpful message on errors:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "8fedb550-683d-45ae-8876-ae7acb332019", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Calling tool with arguments:\n", + "\n", + "{\"int_arg\":5,\"float_arg\":2.1,\"number_arg\":\"potato\"}\n", + "\n", + "raised the following error:\n", + "\n", + "Error: Received tool input did not match expected schema\n" + ] + } + ], + "source": [ + "const tryExceptToolWrapper = async (input, config) => {\n", + " try {\n", + " const result = await complexTool.invoke(input);\n", + " return result;\n", + " } catch (e) {\n", + " return `Calling tool with arguments:\\n\\n${JSON.stringify(input)}\\n\\nraised the following error:\\n\\n${e}`\n", + " }\n", + "}\n", + "\n", + "const chainWithTools = llmWithTools\n", + " .pipe((message) => message.tool_calls?.[0].args)\n", + " .pipe(tryExceptToolWrapper);\n", + "\n", + "const res = await chainWithTools.invoke(\"use complex tool. the args are 5, 2.1, potato\");\n", + "\n", + "console.log(res);" + ] + }, + { + "cell_type": "markdown", + "id": "3b2f6393-cb47-49d0-921c-09550a049fe4", + "metadata": {}, + "source": [ + "## Fallbacks\n", + "\n", + "We can also try to fallback to a better model in the event of a tool invocation error. In this case we'll fall back to an identical chain that uses `gpt-4-1106-preview` instead of `gpt-3.5-turbo`." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "02cc4223-35fa-4240-976a-012299ca703c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[33m10.5\u001b[39m" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const badChain = llmWithTools\n", + " .pipe((message) => message.tool_calls?.[0].args)\n", + " .pipe(complexTool);\n", + "\n", + "const betterModel = new ChatOpenAI({\n", + " model: \"gpt-4-1106-preview\",\n", + " temperature: 0,\n", + "}).bindTools([complexTool]);\n", + "\n", + "const betterChain = betterModel\n", + " .pipe((message) => message.tool_calls?.[0].args)\n", + " .pipe(complexTool);\n", + "\n", + "const chainWithFallback = badChain.withFallbacks([betterChain]);\n", + "\n", + "await chainWithFallback.invoke(\"use complex tool. the args are 5, 2.1, potato\");" + ] + }, + { + "cell_type": "markdown", + "id": "412f8c4e-cc83-4d87-84a1-5ba2f8edb1e9", + "metadata": {}, + "source": [ + "Looking at the [LangSmith trace](https://smith.langchain.com/public/ea31e7ca-4abc-48e3-9943-700100c86622/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds." + ] + }, + { + "cell_type": "markdown", + "id": "6b97af9f", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "Now you've seen some strategies how to handle tool calling errors. Next, you can learn more about how to use tools:\n", + "\n", + "- Few shot prompting [with tools](/docs/how_to/tool_calling#few-shotting-with-tools)\n", + "- Stream [tool calls](/docs/how_to/tool_streaming/)\n", + "- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n", + "\n", + "You can also check out some more specific uses of tool calling:\n", + "\n", + "- Getting [structured outputs](/docs/how_to/structured_output/) from models" ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "const badChain = llmWithTools\n", - " .pipe((message) => message.tool_calls?.[0].args)\n", - " .pipe(complexTool);\n", - "\n", - "const betterModel = new ChatOpenAI({\n", - " model: \"gpt-4-1106-preview\",\n", - " temperature: 0,\n", - "}).bindTools([complexTool]);\n", - "\n", - "const betterChain = betterModel\n", - " .pipe((message) => message.tool_calls?.[0].args)\n", - " .pipe(complexTool);\n", - "\n", - "const chainWithFallback = badChain.withFallbacks([betterChain]);\n", - "\n", - "await chainWithFallback.invoke(\"use complex tool. the args are 5, 2.1, potato\");" - ] - }, - { - "cell_type": "markdown", - "id": "412f8c4e-cc83-4d87-84a1-5ba2f8edb1e9", - "metadata": {}, - "source": [ - "Looking at the [LangSmith trace](https://smith.langchain.com/public/ea31e7ca-4abc-48e3-9943-700100c86622/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds." - ] - }, - { - "cell_type": "markdown", - "id": "6b97af9f", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "Now you've seen some strategies how to handle tool calling errors. Next, you can learn more about how to use tools:\n", - "\n", - "- Few shot prompting [with tools](/docs/how_to/tool_calling#few-shotting-with-tools)\n", - "- Stream [tool calls](/docs/how_to/tool_streaming/)\n", - "- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n", - "\n", - "You can also check out some more specific uses of tool calling:\n", - "\n", - "- Getting [structured outputs](/docs/how_to/structured_output/) from models" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tools_few_shot.ipynb b/docs/core_docs/docs/how_to/tools_few_shot.ipynb index 78107f52180d..02f50007e604 100644 --- a/docs/core_docs/docs/how_to/tools_few_shot.ipynb +++ b/docs/core_docs/docs/how_to/tools_few_shot.ipynb @@ -1,217 +1,217 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## How to use few-shot prompting with tool calling\n", - "\n", - "```{=mdx}\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [LangChain Tools](/docs/concepts/#tools)\n", - "- [Tool calling](/docs/concepts/#functiontool-calling)\n", - "- [Passing tool outputs to chat models](/docs/how_to/tool_results_pass_to_model/)\n", - "\n", - ":::\n", - "```\n", - "\n", - "For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessages` with `ToolCalls` and corresponding `ToolMessages` to our prompt.\n", - "\n", - "First define a model and a calculator tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { tool } from \"@langchain/core/tools\";\n", - "import { z } from \"zod\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({ model: \"gpt-4o\", temperature: 0, })\n", - "\n", - "/**\n", - " * Note that the descriptions here are crucial, as they will be passed along\n", - " * to the model along with the class name.\n", - " */\n", - "const calculatorSchema = z.object({\n", - " operation: z\n", - " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", - " .describe(\"The type of operation to execute.\"),\n", - " number1: z.number().describe(\"The first number to operate on.\"),\n", - " number2: z.number().describe(\"The second number to operate on.\"),\n", - "});\n", - "\n", - "const calculatorTool = tool(async ({ operation, number1, number2 }) => {\n", - " // Functions must return strings\n", - " if (operation === \"add\") {\n", - " return `${number1 + number2}`;\n", - " } else if (operation === \"subtract\") {\n", - " return `${number1 - number2}`;\n", - " } else if (operation === \"multiply\") {\n", - " return `${number1 * number2}`;\n", - " } else if (operation === \"divide\") {\n", - " return `${number1 / number2}`;\n", - " } else {\n", - " throw new Error(\"Invalid operation.\");\n", - " }\n", - "}, {\n", - " name: \"calculator\",\n", - " description: \"Can perform mathematical operations.\",\n", - " schema: calculatorSchema,\n", - "});\n", - "\n", - "const llmWithTools = llm.bindTools([calculatorTool]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our calculator can handle common addition, subtraction, multiplication, and division. But what happens if we ask about a new mathematical operator, `🦜`?\n", - "\n", - "Let's see what happens when we use it naively:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "[\n", - " {\n", - " name: 'calculator',\n", - " args: { operation: 'multiply', number1: 3, number2: 12 },\n", - " type: 'tool_call',\n", - " id: 'call_I0oQGmdESpIgcf91ej30p9aR'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const res = await llmWithTools.invoke(\"What is 3 🦜 12\");\n", - "\n", - "console.log(res.content);\n", - "console.log(res.tool_calls);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It doesn't quite know how to interpret `🦜` as an operation, and it defaults to `multiply`. Now, let's try giving it some examples in the form of a manufactured messages to steer it towards `divide`:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How to use few-shot prompting with tool calling\n", + "\n", + "```{=mdx}\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [LangChain Tools](/docs/concepts/tools)\n", + "- [Tool calling](/docs/concepts/tool_calling)\n", + "- [Passing tool outputs to chat models](/docs/how_to/tool_results_pass_to_model/)\n", + "\n", + ":::\n", + "```\n", + "\n", + "For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessages` with `ToolCalls` and corresponding `ToolMessages` to our prompt.\n", + "\n", + "First define a model and a calculator tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { z } from \"zod\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-4o\", temperature: 0, })\n", + "\n", + "/**\n", + " * Note that the descriptions here are crucial, as they will be passed along\n", + " * to the model along with the class name.\n", + " */\n", + "const calculatorSchema = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + "});\n", + "\n", + "const calculatorTool = tool(async ({ operation, number1, number2 }) => {\n", + " // Functions must return strings\n", + " if (operation === \"add\") {\n", + " return `${number1 + number2}`;\n", + " } else if (operation === \"subtract\") {\n", + " return `${number1 - number2}`;\n", + " } else if (operation === \"multiply\") {\n", + " return `${number1 * number2}`;\n", + " } else if (operation === \"divide\") {\n", + " return `${number1 / number2}`;\n", + " } else {\n", + " throw new Error(\"Invalid operation.\");\n", + " }\n", + "}, {\n", + " name: \"calculator\",\n", + " description: \"Can perform mathematical operations.\",\n", + " schema: calculatorSchema,\n", + "});\n", + "\n", + "const llmWithTools = llm.bindTools([calculatorTool]);" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'calculator',\n", - " args: { number1: 3, number2: 12, operation: 'divide' },\n", - " type: 'tool_call',\n", - " id: 'call_O6M4yDaA6s8oDqs2Zfl7TZAp'\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our calculator can handle common addition, subtraction, multiplication, and division. But what happens if we ask about a new mathematical operator, `🦜`?\n", + "\n", + "Let's see what happens when we use it naively:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "[\n", + " {\n", + " name: 'calculator',\n", + " args: { operation: 'multiply', number1: 3, number2: 12 },\n", + " type: 'tool_call',\n", + " id: 'call_I0oQGmdESpIgcf91ej30p9aR'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const res = await llmWithTools.invoke(\"What is 3 🦜 12\");\n", + "\n", + "console.log(res.content);\n", + "console.log(res.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It doesn't quite know how to interpret `🦜` as an operation, and it defaults to `multiply`. Now, let's try giving it some examples in the form of a manufactured messages to steer it towards `divide`:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'calculator',\n", + " args: { number1: 3, number2: 12, operation: 'divide' },\n", + " type: 'tool_call',\n", + " id: 'call_O6M4yDaA6s8oDqs2Zfl7TZAp'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { HumanMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\n", + "\n", + "const res = await llmWithTools.invoke([\n", + " new HumanMessage(\"What is 333382 🦜 1932?\"),\n", + " new AIMessage({\n", + " content: \"The 🦜 operator is shorthand for division, so we call the divide tool.\",\n", + " tool_calls: [{\n", + " id: \"12345\",\n", + " name: \"calculator\",\n", + " args: {\n", + " number1: 333382,\n", + " number2: 1932,\n", + " operation: \"divide\",\n", + " }\n", + " }]\n", + " }),\n", + " new ToolMessage({\n", + " tool_call_id: \"12345\",\n", + " content: \"The answer is 172.558.\"\n", + " }),\n", + " new AIMessage(\"The answer is 172.558.\"),\n", + " new HumanMessage(\"What is 6 🦜 2?\"),\n", + " new AIMessage({\n", + " content: \"The 🦜 operator is shorthand for division, so we call the divide tool.\",\n", + " tool_calls: [{\n", + " id: \"54321\",\n", + " name: \"calculator\",\n", + " args: {\n", + " number1: 6,\n", + " number2: 2,\n", + " operation: \"divide\",\n", + " }\n", + " }]\n", + " }),\n", + " new ToolMessage({\n", + " tool_call_id: \"54321\",\n", + " content: \"The answer is 3.\"\n", + " }),\n", + " new AIMessage(\"The answer is 3.\"),\n", + " new HumanMessage(\"What is 3 🦜 12?\")\n", + "]);\n", + "\n", + "console.log(res.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we can see that it now equates `🦜` with the `divide` operation in the correct way!\n", + "\n", + "## Related\n", + "\n", + "- Stream [tool calls](/docs/how_to/tool_streaming/)\n", + "- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n", + "- Getting [structured outputs](/docs/how_to/structured_output/) from models" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { HumanMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\n", - "\n", - "const res = await llmWithTools.invoke([\n", - " new HumanMessage(\"What is 333382 🦜 1932?\"),\n", - " new AIMessage({\n", - " content: \"The 🦜 operator is shorthand for division, so we call the divide tool.\",\n", - " tool_calls: [{\n", - " id: \"12345\",\n", - " name: \"calculator\",\n", - " args: {\n", - " number1: 333382,\n", - " number2: 1932,\n", - " operation: \"divide\",\n", - " }\n", - " }]\n", - " }),\n", - " new ToolMessage({\n", - " tool_call_id: \"12345\",\n", - " content: \"The answer is 172.558.\"\n", - " }),\n", - " new AIMessage(\"The answer is 172.558.\"),\n", - " new HumanMessage(\"What is 6 🦜 2?\"),\n", - " new AIMessage({\n", - " content: \"The 🦜 operator is shorthand for division, so we call the divide tool.\",\n", - " tool_calls: [{\n", - " id: \"54321\",\n", - " name: \"calculator\",\n", - " args: {\n", - " number1: 6,\n", - " number2: 2,\n", - " operation: \"divide\",\n", - " }\n", - " }]\n", - " }),\n", - " new ToolMessage({\n", - " tool_call_id: \"54321\",\n", - " content: \"The answer is 3.\"\n", - " }),\n", - " new AIMessage(\"The answer is 3.\"),\n", - " new HumanMessage(\"What is 3 🦜 12?\")\n", - "]);\n", - "\n", - "console.log(res.tool_calls);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And we can see that it now equates `🦜` with the `divide` operation in the correct way!\n", - "\n", - "## Related\n", - "\n", - "- Stream [tool calls](/docs/how_to/tool_streaming/)\n", - "- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n", - "- Getting [structured outputs](/docs/how_to/structured_output/) from models" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/tools_prompting.ipynb b/docs/core_docs/docs/how_to/tools_prompting.ipynb index 640511df27d7..49356ba2e570 100644 --- a/docs/core_docs/docs/how_to/tools_prompting.ipynb +++ b/docs/core_docs/docs/how_to/tools_prompting.ipynb @@ -1,421 +1,421 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "3243cb05-8243-421f-99fa-98201abb3094", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 3\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "14b94240", - "metadata": {}, - "source": [ - "# How to add ad-hoc tool calling capability to LLMs and Chat Models\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n", - "- [Chaining runnables](/docs/how_to/sequence/)\n", - "- [Tool calling](/docs/how_to/tool_calling/)\n", - "\n", - ":::\n", - "\n", - "In this guide we'll build a Chain that does not rely on any special model APIs (like tool calling, which we showed in the [Quickstart](/docs/how_to/tool_calling)) and instead just prompts the model directly to invoke tools." - ] - }, - { - "cell_type": "markdown", - "id": "a0a22cb8-19e7-450a-9d1b-6848d2c81cd1", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "We'll need to install the following packages:\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from '@theme/Npm2Yarn';\n", - "\n", - "\n", - " @langchain/core zod\n", - "\n", - "```\n", - "\n", - "#### Set environment variables\n", - "\n", - "```\n", - "# Optional, use LangSmith for best-in-class observability\n", - "LANGSMITH_API_KEY=your-api-key\n", - "LANGCHAIN_TRACING_V2=true\n", - "\n", - "# Reduce tracing latency if you are not in a serverless environment\n", - "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "68946881", - "metadata": {}, - "source": [ - "## Create a tool\n", - "\n", - "First, we need to create a tool to call. For this example, we will create a custom tool from a function. For more information on all details related to creating custom tools, please see [this guide](/docs/how_to/custom_tools)." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "90187d07", - "metadata": {}, - "outputs": [], - "source": [ - "import { tool } from \"@langchain/core/tools\";\n", - "import { z } from \"zod\";\n", - "\n", - "const multiplyTool = tool((input) => {\n", - " return (input.first_int * input.second_int).toString()\n", - "}, {\n", - " name: \"multiply\",\n", - " description: \"Multiply two integers together.\",\n", - " schema: z.object({\n", - " first_int: z.number(),\n", - " second_int: z.number(),\n", - " })\n", - "})\n" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "d7009e1a", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "multiply\n", - "Multiply two integers together.\n" - ] - } - ], - "source": [ - "console.log(multiplyTool.name)\n", - "console.log(multiplyTool.description)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "be77e780", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "3243cb05-8243-421f-99fa-98201abb3094", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 3\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "20\n" - ] - } - ], - "source": [ - "await multiplyTool.invoke({ first_int: 4, second_int: 5 })" - ] - }, - { - "cell_type": "markdown", - "id": "15dd690e-e54d-4209-91a4-181f69a452ac", - "metadata": {}, - "source": [ - "## Creating our prompt\n", - "\n", - "We'll want to write a prompt that specifies the tools the model has access to, the arguments to those tools, and the desired output format of the model. In this case we'll instruct it to output a JSON blob of the form `{\"name\": \"...\", \"arguments\": {...}}`.\n", - "\n", - "```{=mdx}\n", - ":::tip\n", - "As of `langchain` version `0.2.8`, the `renderTextDescription` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core.language_models_base.ToolDefinition.html).\n", - ":::\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "c64818f0-9364-423c-922e-bdfb8f01e726", - "metadata": {}, - "outputs": [], - "source": [ - "import { renderTextDescription } from \"langchain/tools/render\";\n", - "\n", - "const renderedTools = renderTextDescription([multiplyTool])" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "63552d4d-8bd6-4aca-8805-56e236f6552d", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const systemPrompt = `You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n", - "\n", - "{rendered_tools}\n", - "\n", - "Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.`;\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [[\"system\", systemPrompt], [\"user\", \"{input}\"]]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "14df2cd5-b6fa-4b10-892d-e8692c7931e5", - "metadata": {}, - "source": [ - "## Adding an output parser\n", - "\n", - "We'll use the `JsonOutputParser` for parsing our models output to JSON.\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from '@theme/ChatModelTabs';\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "f129f5bd-127c-4c95-8f34-8f437da7ca8f", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "14b94240", + "metadata": {}, + "source": [ + "# How to add ad-hoc tool calling capability to LLMs and Chat Models\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [LangChain Expression Language (LCEL)](/docs/concepts/lcel)\n", + "- [Chaining runnables](/docs/how_to/sequence/)\n", + "- [Tool calling](/docs/how_to/tool_calling/)\n", + "\n", + ":::\n", + "\n", + "In this guide we'll build a Chain that does not rely on any special model APIs (like tool calling, which we showed in the [Quickstart](/docs/how_to/tool_calling)) and instead just prompts the model directly to invoke tools." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{ name: 'multiply', arguments: [ 13, 4 ] }\n" - ] - } - ], - "source": [ - "import { JsonOutputParser } from \"@langchain/core/output_parsers\";\n", - "const chain = prompt.pipe(model).pipe(new JsonOutputParser())\n", - "await chain.invoke({ input: \"what's thirteen times 4\", rendered_tools: renderedTools })" - ] - }, - { - "cell_type": "markdown", - "id": "8e29dd4c-8eb5-457f-92d1-8add076404dc", - "metadata": {}, - "source": [ - "## Invoking the tool\n", - "\n", - "We can invoke the tool as part of the chain by passing along the model-generated \"arguments\" to it:" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "0555b384-fde6-4404-86e0-7ea199003d58", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a0a22cb8-19e7-450a-9d1b-6848d2c81cd1", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "We'll need to install the following packages:\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from '@theme/Npm2Yarn';\n", + "\n", + "\n", + " @langchain/core zod\n", + "\n", + "```\n", + "\n", + "#### Set environment variables\n", + "\n", + "```\n", + "# Optional, use LangSmith for best-in-class observability\n", + "LANGSMITH_API_KEY=your-api-key\n", + "LANGCHAIN_TRACING_V2=true\n", + "\n", + "# Reduce tracing latency if you are not in a serverless environment\n", + "# LANGCHAIN_CALLBACKS_BACKGROUND=true\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "52\n" - ] - } - ], - "source": [ - "import { RunnableLambda, RunnablePick } from \"@langchain/core/runnables\"\n", - "\n", - "const chain = prompt.pipe(model).pipe(new JsonOutputParser()).pipe(new RunnablePick(\"arguments\")).pipe(new RunnableLambda({ func: (input) => multiplyTool.invoke({\n", - " first_int: input[0],\n", - " second_int: input[1]\n", - "}) }))\n", - "await chain.invoke({ input: \"what's thirteen times 4\", rendered_tools: renderedTools })" - ] - }, - { - "cell_type": "markdown", - "id": "8d60b2cb-6ce0-48fc-8d18-d2337161a53d", - "metadata": {}, - "source": [ - "## Choosing from multiple tools\n", - "\n", - "Suppose we have multiple tools we want the chain to be able to choose from:" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "95c86d32-ee45-4c87-a28c-14eff19b49e9", - "metadata": {}, - "outputs": [], - "source": [ - "const addTool = tool((input) => {\n", - " return (input.first_int + input.second_int).toString()\n", - "}, {\n", - " name: \"add\",\n", - " description: \"Add two integers together.\",\n", - " schema: z.object({\n", - " first_int: z.number(),\n", - " second_int: z.number(),\n", - " }),\n", - "});\n", - "\n", - "const exponentiateTool = tool((input) => {\n", - " return Math.pow(input.first_int, input.second_int).toString()\n", - "}, {\n", - " name: \"exponentiate\",\n", - " description: \"Exponentiate the base to the exponent power.\",\n", - " schema: z.object({\n", - " first_int: z.number(),\n", - " second_int: z.number(),\n", - " }),\n", - "});\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "id": "748405ff-4c85-4bd7-82e1-30458b5a4106", - "metadata": {}, - "source": [ - "With function calling, we can do this like so:" - ] - }, - { - "cell_type": "markdown", - "id": "eb3aa89e-40e1-45ec-b1f3-ab28cfc8e42d", - "metadata": {}, - "source": [ - "If we want to run the model selected tool, we can do so using a function that returns the tool based on the model output. Specifically, our function will action return it's own subchain that gets the \"arguments\" part of the model output and passes it to the chosen tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "db254773-5b8e-43d0-aabe-c21566c154cd", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "68946881", + "metadata": {}, + "source": [ + "## Create a tool\n", + "\n", + "First, we need to create a tool to call. For this example, we will create a custom tool from a function. For more information on all details related to creating custom tools, please see [this guide](/docs/how_to/custom_tools)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "1135\n" - ] - } - ], - "source": [ - "import { StructuredToolInterface } from \"@langchain/core/tools\"\n", - "\n", - "const tools = [addTool, exponentiateTool, multiplyTool]\n", - "\n", - "const toolChain = (modelOutput) => {\n", - " const toolMap: Record = Object.fromEntries(tools.map(tool => [tool.name, tool]))\n", - " const chosenTool = toolMap[modelOutput.name]\n", - " return new RunnablePick(\"arguments\").pipe(new RunnableLambda({ func: (input) => chosenTool.invoke({\n", - " first_int: input[0],\n", - " second_int: input[1]\n", - " }) }))\n", - "}\n", - "const toolChainRunnable = new RunnableLambda({\n", - " func: toolChain\n", - "})\n", - "\n", - "const renderedTools = renderTextDescription(tools)\n", - "const systemPrompt = `You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n", - "\n", - "{rendered_tools}\n", - "\n", - "Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.`\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [[\"system\", systemPrompt], [\"user\", \"{input}\"]]\n", - ")\n", - "const chain = prompt.pipe(model).pipe(new JsonOutputParser()).pipe(toolChainRunnable)\n", - "await chain.invoke({ input: \"what's 3 plus 1132\", rendered_tools: renderedTools })" - ] - }, - { - "cell_type": "markdown", - "id": "b4a9c5aa-f60a-4017-af6f-1ff6e04bfb61", - "metadata": {}, - "source": [ - "## Returning tool inputs\n", - "\n", - "It can be helpful to return not only tool outputs but also tool inputs. We can easily do this with LCEL by `RunnablePassthrough.assign`-ing the tool output. This will take whatever the input is to the RunnablePassrthrough components (assumed to be a dictionary) and add a key to it while still passing through everything that's currently in the input:" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "45404406-859d-4caa-8b9d-5838162c80a0", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 13, + "id": "90187d07", + "metadata": {}, + "outputs": [], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { z } from \"zod\";\n", + "\n", + "const multiplyTool = tool((input) => {\n", + " return (input.first_int * input.second_int).toString()\n", + "}, {\n", + " name: \"multiply\",\n", + " description: \"Multiply two integers together.\",\n", + " schema: z.object({\n", + " first_int: z.number(),\n", + " second_int: z.number(),\n", + " })\n", + "})\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "d7009e1a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "multiply\n", + "Multiply two integers together.\n" + ] + } + ], + "source": [ + "console.log(multiplyTool.name)\n", + "console.log(multiplyTool.description)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "be77e780", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "20\n" + ] + } + ], + "source": [ + "await multiplyTool.invoke({ first_int: 4, second_int: 5 })" + ] + }, + { + "cell_type": "markdown", + "id": "15dd690e-e54d-4209-91a4-181f69a452ac", + "metadata": {}, + "source": [ + "## Creating our prompt\n", + "\n", + "We'll want to write a prompt that specifies the tools the model has access to, the arguments to those tools, and the desired output format of the model. In this case we'll instruct it to output a JSON blob of the form `{\"name\": \"...\", \"arguments\": {...}}`.\n", + "\n", + "```{=mdx}\n", + ":::tip\n", + "As of `langchain` version `0.2.8`, the `renderTextDescription` function now supports [OpenAI-formatted tools](https://api.js.langchain.com/interfaces/langchain_core.language_models_base.ToolDefinition.html).\n", + ":::\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "c64818f0-9364-423c-922e-bdfb8f01e726", + "metadata": {}, + "outputs": [], + "source": [ + "import { renderTextDescription } from \"langchain/tools/render\";\n", + "\n", + "const renderedTools = renderTextDescription([multiplyTool])" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "63552d4d-8bd6-4aca-8805-56e236f6552d", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const systemPrompt = `You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n", + "\n", + "{rendered_tools}\n", + "\n", + "Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.`;\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [[\"system\", systemPrompt], [\"user\", \"{input}\"]]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "14df2cd5-b6fa-4b10-892d-e8692c7931e5", + "metadata": {}, + "source": [ + "## Adding an output parser\n", + "\n", + "We'll use the `JsonOutputParser` for parsing our models output to JSON.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from '@theme/ChatModelTabs';\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "f129f5bd-127c-4c95-8f34-8f437da7ca8f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ name: 'multiply', arguments: [ 13, 4 ] }\n" + ] + } + ], + "source": [ + "import { JsonOutputParser } from \"@langchain/core/output_parsers\";\n", + "const chain = prompt.pipe(model).pipe(new JsonOutputParser())\n", + "await chain.invoke({ input: \"what's thirteen times 4\", rendered_tools: renderedTools })" + ] + }, + { + "cell_type": "markdown", + "id": "8e29dd4c-8eb5-457f-92d1-8add076404dc", + "metadata": {}, + "source": [ + "## Invoking the tool\n", + "\n", + "We can invoke the tool as part of the chain by passing along the model-generated \"arguments\" to it:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{ name: 'add', arguments: [ 3, 1132 ], output: '1135' }\n" - ] + "cell_type": "code", + "execution_count": 19, + "id": "0555b384-fde6-4404-86e0-7ea199003d58", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "52\n" + ] + } + ], + "source": [ + "import { RunnableLambda, RunnablePick } from \"@langchain/core/runnables\"\n", + "\n", + "const chain = prompt.pipe(model).pipe(new JsonOutputParser()).pipe(new RunnablePick(\"arguments\")).pipe(new RunnableLambda({ func: (input) => multiplyTool.invoke({\n", + " first_int: input[0],\n", + " second_int: input[1]\n", + "}) }))\n", + "await chain.invoke({ input: \"what's thirteen times 4\", rendered_tools: renderedTools })" + ] + }, + { + "cell_type": "markdown", + "id": "8d60b2cb-6ce0-48fc-8d18-d2337161a53d", + "metadata": {}, + "source": [ + "## Choosing from multiple tools\n", + "\n", + "Suppose we have multiple tools we want the chain to be able to choose from:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "95c86d32-ee45-4c87-a28c-14eff19b49e9", + "metadata": {}, + "outputs": [], + "source": [ + "const addTool = tool((input) => {\n", + " return (input.first_int + input.second_int).toString()\n", + "}, {\n", + " name: \"add\",\n", + " description: \"Add two integers together.\",\n", + " schema: z.object({\n", + " first_int: z.number(),\n", + " second_int: z.number(),\n", + " }),\n", + "});\n", + "\n", + "const exponentiateTool = tool((input) => {\n", + " return Math.pow(input.first_int, input.second_int).toString()\n", + "}, {\n", + " name: \"exponentiate\",\n", + " description: \"Exponentiate the base to the exponent power.\",\n", + " schema: z.object({\n", + " first_int: z.number(),\n", + " second_int: z.number(),\n", + " }),\n", + "});\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "748405ff-4c85-4bd7-82e1-30458b5a4106", + "metadata": {}, + "source": [ + "With function calling, we can do this like so:" + ] + }, + { + "cell_type": "markdown", + "id": "eb3aa89e-40e1-45ec-b1f3-ab28cfc8e42d", + "metadata": {}, + "source": [ + "If we want to run the model selected tool, we can do so using a function that returns the tool based on the model output. Specifically, our function will action return it's own subchain that gets the \"arguments\" part of the model output and passes it to the chosen tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "db254773-5b8e-43d0-aabe-c21566c154cd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1135\n" + ] + } + ], + "source": [ + "import { StructuredToolInterface } from \"@langchain/core/tools\"\n", + "\n", + "const tools = [addTool, exponentiateTool, multiplyTool]\n", + "\n", + "const toolChain = (modelOutput) => {\n", + " const toolMap: Record = Object.fromEntries(tools.map(tool => [tool.name, tool]))\n", + " const chosenTool = toolMap[modelOutput.name]\n", + " return new RunnablePick(\"arguments\").pipe(new RunnableLambda({ func: (input) => chosenTool.invoke({\n", + " first_int: input[0],\n", + " second_int: input[1]\n", + " }) }))\n", + "}\n", + "const toolChainRunnable = new RunnableLambda({\n", + " func: toolChain\n", + "})\n", + "\n", + "const renderedTools = renderTextDescription(tools)\n", + "const systemPrompt = `You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n", + "\n", + "{rendered_tools}\n", + "\n", + "Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.`\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [[\"system\", systemPrompt], [\"user\", \"{input}\"]]\n", + ")\n", + "const chain = prompt.pipe(model).pipe(new JsonOutputParser()).pipe(toolChainRunnable)\n", + "await chain.invoke({ input: \"what's 3 plus 1132\", rendered_tools: renderedTools })" + ] + }, + { + "cell_type": "markdown", + "id": "b4a9c5aa-f60a-4017-af6f-1ff6e04bfb61", + "metadata": {}, + "source": [ + "## Returning tool inputs\n", + "\n", + "It can be helpful to return not only tool outputs but also tool inputs. We can easily do this with LCEL by `RunnablePassthrough.assign`-ing the tool output. This will take whatever the input is to the RunnablePassrthrough components (assumed to be a dictionary) and add a key to it while still passing through everything that's currently in the input:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "45404406-859d-4caa-8b9d-5838162c80a0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ name: 'add', arguments: [ 3, 1132 ], output: '1135' }\n" + ] + } + ], + "source": [ + "import { RunnablePassthrough } from \"@langchain/core/runnables\"\n", + "\n", + "const chain = prompt.pipe(model).pipe(new JsonOutputParser()).pipe(RunnablePassthrough.assign({ output: toolChainRunnable }))\n", + "await chain.invoke({ input: \"what's 3 plus 1132\", rendered_tools: renderedTools })\n" + ] + }, + { + "cell_type": "markdown", + "id": "775252dc", + "metadata": {}, + "source": [ + "## What's next?\n", + "\n", + "This how-to guide shows the \"happy path\" when the model correctly outputs all the required tool information.\n", + "\n", + "In reality, if you're using more complex tools, you will start encountering errors from the model, especially for models that have not been fine tuned for tool calling and for less capable models.\n", + "\n", + "You will need to be prepared to add strategies to improve the output from the model; e.g.,\n", + "\n", + "- Provide few shot examples.\n", + "- Add error handling (e.g., catch the exception and feed it back to the LLM to ask it to correct its previous output)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { RunnablePassthrough } from \"@langchain/core/runnables\"\n", - "\n", - "const chain = prompt.pipe(model).pipe(new JsonOutputParser()).pipe(RunnablePassthrough.assign({ output: toolChainRunnable }))\n", - "await chain.invoke({ input: \"what's 3 plus 1132\", rendered_tools: renderedTools })\n" - ] - }, - { - "cell_type": "markdown", - "id": "775252dc", - "metadata": {}, - "source": [ - "## What's next?\n", - "\n", - "This how-to guide shows the \"happy path\" when the model correctly outputs all the required tool information.\n", - "\n", - "In reality, if you're using more complex tools, you will start encountering errors from the model, especially for models that have not been fine tuned for tool calling and for less capable models.\n", - "\n", - "You will need to be prepared to add strategies to improve the output from the model; e.g.,\n", - "\n", - "- Provide few shot examples.\n", - "- Add error handling (e.g., catch the exception and feed it back to the LLM to ask it to correct its previous output)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/trim_messages.ipynb b/docs/core_docs/docs/how_to/trim_messages.ipynb index b52b7223a5a5..d4f7748c98cb 100644 --- a/docs/core_docs/docs/how_to/trim_messages.ipynb +++ b/docs/core_docs/docs/how_to/trim_messages.ipynb @@ -1,810 +1,810 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "b5ee5b75-6876-4d62-9ade-5a7a808ae5a2", - "metadata": {}, - "source": [ - "# How to trim messages\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Messages](/docs/concepts/#messages)\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Chaining](/docs/how_to/sequence/)\n", - "- [Chat history](/docs/concepts/#chat-history)\n", - "\n", - "The methods in this guide also require `@langchain/core>=0.2.8`.\n", - "Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", - "\n", - ":::\n", - "\n", - "All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model.\n", - "\n", - "The `trimMessages` util provides some basic strategies for trimming a list of messages to be of a certain token length.\n", - "\n", - "## Getting the last `maxTokens` tokens\n", - "\n", - "To get the last `maxTokens` in the list of Messages we can set `strategy: \"last\"`. Notice that for our `tokenCounter` we can pass in a function (more on that below) or a language model (since language models have a message token counting method). It makes sense to pass in a model when you're trimming your messages to fit into the context window of that specific model:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "c974633b-3bd0-4844-8a8f-85e3e25f13fe", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"role\": \"human\",\n", - " \"content\": \"and who is harrison chasing anyways\"\n", - "}\n", - "\n", - "{\n", - " \"role\": \"ai\",\n", - " \"content\": \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n", - "}\n", - "\n", - "{\n", - " \"role\": \"human\",\n", - " \"content\": \"what do you call a speechless parrot\"\n", - "}\n" - ] - } - ], - "source": [ - "import { AIMessage, HumanMessage, SystemMessage, trimMessages } from \"@langchain/core/messages\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const messages = [\n", - " new SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n", - " new HumanMessage(\"i wonder why it's called langchain\"),\n", - " new AIMessage(\n", - " 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n", - " ),\n", - " new HumanMessage(\"and who is harrison chasing anyways\"),\n", - " new AIMessage(\n", - " \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n", - " ),\n", - " new HumanMessage(\"what do you call a speechless parrot\"),\n", - "];\n", - "\n", - "const trimmed = await trimMessages(\n", - " messages,\n", - " {\n", - " maxTokens: 45,\n", - " strategy: \"last\",\n", - " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", - " }\n", - ");\n", - "\n", - "console.log(trimmed.map((x) => JSON.stringify({\n", - " role: x._getType(),\n", - " content: x.content,\n", - "}, null, 2)).join(\"\\n\\n\"));" - ] - }, - { - "cell_type": "markdown", - "id": "d3f46654-c4b2-4136-b995-91c3febe5bf9", - "metadata": {}, - "source": [ - "If we want to always keep the initial system message we can specify `includeSystem: true`:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "589b0223-3a73-44ec-8315-2dba3ee6117d", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "b5ee5b75-6876-4d62-9ade-5a7a808ae5a2", + "metadata": {}, + "source": [ + "# How to trim messages\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Messages](/docs/concepts/messages)\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Chaining](/docs/how_to/sequence/)\n", + "- [Chat history](/docs/concepts/chat_history)\n", + "\n", + "The methods in this guide also require `@langchain/core>=0.2.8`.\n", + "Please see here for a [guide on upgrading](/docs/how_to/installation/#installing-integration-packages).\n", + "\n", + ":::\n", + "\n", + "All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model.\n", + "\n", + "The `trimMessages` util provides some basic strategies for trimming a list of messages to be of a certain token length.\n", + "\n", + "## Getting the last `maxTokens` tokens\n", + "\n", + "To get the last `maxTokens` in the list of Messages we can set `strategy: \"last\"`. Notice that for our `tokenCounter` we can pass in a function (more on that below) or a language model (since language models have a message token counting method). It makes sense to pass in a model when you're trimming your messages to fit into the context window of that specific model:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " SystemMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: undefined\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'what do you call a speechless parrot',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'what do you call a speechless parrot',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "await trimMessages(\n", - " messages,\n", - " {\n", - " maxTokens: 45,\n", - " strategy: \"last\",\n", - " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", - " includeSystem: true\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "8a8b542c-04d1-4515-8d82-b999ea4fac4f", - "metadata": {}, - "source": [ - "If we want to allow splitting up the contents of a message we can specify `allowPartial: true`:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "8c46a209-dddd-4d01-81f6-f6ae55d3225c", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "id": "c974633b-3bd0-4844-8a8f-85e3e25f13fe", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"role\": \"human\",\n", + " \"content\": \"and who is harrison chasing anyways\"\n", + "}\n", + "\n", + "{\n", + " \"role\": \"ai\",\n", + " \"content\": \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n", + "}\n", + "\n", + "{\n", + " \"role\": \"human\",\n", + " \"content\": \"what do you call a speechless parrot\"\n", + "}\n" + ] + } + ], + "source": [ + "import { AIMessage, HumanMessage, SystemMessage, trimMessages } from \"@langchain/core/messages\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const messages = [\n", + " new SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n", + " new HumanMessage(\"i wonder why it's called langchain\"),\n", + " new AIMessage(\n", + " 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n", + " ),\n", + " new HumanMessage(\"and who is harrison chasing anyways\"),\n", + " new AIMessage(\n", + " \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n", + " ),\n", + " new HumanMessage(\"what do you call a speechless parrot\"),\n", + "];\n", + "\n", + "const trimmed = await trimMessages(\n", + " messages,\n", + " {\n", + " maxTokens: 45,\n", + " strategy: \"last\",\n", + " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", + " }\n", + ");\n", + "\n", + "console.log(trimmed.map((x) => JSON.stringify({\n", + " role: x._getType(),\n", + " content: x.content,\n", + "}, null, 2)).join(\"\\n\\n\"));" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " SystemMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: undefined\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'what do you call a speechless parrot',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'what do you call a speechless parrot',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "await trimMessages(\n", - " messages,\n", - " {\n", - " maxTokens: 50,\n", - " strategy: \"last\",\n", - " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", - " includeSystem: true,\n", - " allowPartial: true\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "306adf9c-41cd-495c-b4dc-e4f43dd7f8f8", - "metadata": {}, - "source": [ - "If we need to make sure that our first message (excluding the system message) is always of a specific type, we can specify `startOn`:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "878a730b-fe44-4e9d-ab65-7b8f7b069de8", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "d3f46654-c4b2-4136-b995-91c3febe5bf9", + "metadata": {}, + "source": [ + "If we want to always keep the initial system message we can specify `includeSystem: true`:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " SystemMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'and who is harrison chasing anyways',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'and who is harrison chasing anyways',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: undefined\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'what do you call a speechless parrot',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'what do you call a speechless parrot',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "await trimMessages(\n", - " messages,\n", - " {\n", - " maxTokens: 60,\n", - " strategy: \"last\",\n", - " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", - " includeSystem: true,\n", - " startOn: \"human\"\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "7f5d391d-235b-4091-b2de-c22866b478f3", - "metadata": {}, - "source": [ - "## Getting the first `maxTokens` tokens\n", - "\n", - "We can perform the flipped operation of getting the *first* `maxTokens` by specifying `strategy: \"first\"`:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "5f56ae54-1a39-4019-9351-3b494c003d5b", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 5, + "id": "589b0223-3a73-44ec-8315-2dba3ee6117d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " SystemMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: undefined\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'what do you call a speechless parrot',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'what do you call a speechless parrot',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "await trimMessages(\n", + " messages,\n", + " {\n", + " maxTokens: 45,\n", + " strategy: \"last\",\n", + " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", + " includeSystem: true\n", + " }\n", + ");" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " SystemMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"i wonder why it's called langchain\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: \"i wonder why it's called langchain\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "await trimMessages(\n", - " messages,\n", - " {\n", - " maxTokens: 45,\n", - " strategy: \"first\",\n", - " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "ab70bf70-1e5a-4d51-b9b8-a823bf2cf532", - "metadata": {}, - "source": [ - "## Writing a custom token counter\n", - "\n", - "We can write a custom token counter function that takes in a list of messages and returns an int." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "1c1c3b1e-2ece-49e7-a3b6-e69877c1633b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "8a8b542c-04d1-4515-8d82-b999ea4fac4f", + "metadata": {}, + "source": [ + "If we want to allow splitting up the contents of a message we can specify `allowPartial: true`:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: undefined\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'what do you call a speechless parrot',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'what do you call a speechless parrot',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { encodingForModel } from '@langchain/core/utils/tiktoken';\n", - "import { BaseMessage, HumanMessage, AIMessage, ToolMessage, SystemMessage, MessageContent, MessageContentText } from '@langchain/core/messages';\n", - "\n", - "async function strTokenCounter(messageContent: MessageContent): Promise {\n", - " if (typeof messageContent === 'string') {\n", - " return (\n", - " await encodingForModel(\"gpt-4\")\n", - " ).encode(messageContent).length;\n", - " } else {\n", - " if (messageContent.every((x) => x.type === \"text\" && x.text)) {\n", - " return (\n", - " await encodingForModel(\"gpt-4\")\n", - " ).encode((messageContent as MessageContentText[]).map(({ text }) => text).join(\"\")).length;\n", - " }\n", - " throw new Error(`Unsupported message content ${JSON.stringify(messageContent)}`);\n", - " }\n", - "}\n", - "\n", - "async function tiktokenCounter(messages: BaseMessage[]): Promise {\n", - " let numTokens = 3; // every reply is primed with <|start|>assistant<|message|>\n", - " const tokensPerMessage = 3;\n", - " const tokensPerName = 1;\n", - "\n", - " for (const msg of messages) {\n", - " let role: string;\n", - " if (msg instanceof HumanMessage) {\n", - " role = 'user';\n", - " } else if (msg instanceof AIMessage) {\n", - " role = 'assistant';\n", - " } else if (msg instanceof ToolMessage) {\n", - " role = 'tool';\n", - " } else if (msg instanceof SystemMessage) {\n", - " role = 'system';\n", - " } else {\n", - " throw new Error(`Unsupported message type ${msg.constructor.name}`);\n", - " }\n", - "\n", - " numTokens += tokensPerMessage + (await strTokenCounter(role)) + (await strTokenCounter(msg.content));\n", - "\n", - " if (msg.name) {\n", - " numTokens += tokensPerName + (await strTokenCounter(msg.name));\n", - " }\n", - " }\n", - "\n", - " return numTokens;\n", - "}\n", - "\n", - "await trimMessages(messages, {\n", - " maxTokens: 45,\n", - " strategy: 'last',\n", - " tokenCounter: tiktokenCounter,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "4b2a672b-c007-47c5-9105-617944dc0a6a", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "`trimMessages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "96aa29b2-01e0-437c-a1ab-02fb0141cb57", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 10, + "id": "8c46a209-dddd-4d01-81f6-f6ae55d3225c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " SystemMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: undefined\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'what do you call a speechless parrot',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'what do you call a speechless parrot',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "await trimMessages(\n", + " messages,\n", + " {\n", + " maxTokens: 50,\n", + " strategy: \"last\",\n", + " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", + " includeSystem: true,\n", + " allowPartial: true\n", + " }\n", + ");" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Thanks! I do try to keep things light. But for a more serious answer, \"LangChain\" is likely named to reflect its focus on language processing and the way it connects different components or models together—essentially forming a \"chain\" of linguistic operations. The \"Lang\" part emphasizes its focus on language, while \"Chain\" highlights the interconnected workflows it aims to facilitate.',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Thanks! I do try to keep things light. But for a more serious answer, \"LangChain\" is likely named to reflect its focus on language processing and the way it connects different components or models together—essentially forming a \"chain\" of linguistic operations. The \"Lang\" part emphasizes its focus on language, while \"Chain\" highlights the interconnected workflows it aims to facilitate.',\n", - " name: undefined,\n", - " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: 77, promptTokens: 59, totalTokens: 136 },\n", - " finish_reason: 'stop'\n", - " },\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: { input_tokens: 59, output_tokens: 77, total_tokens: 136 }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "import { trimMessages } from \"@langchain/core/messages\";\n", - "\n", - "const llm = new ChatOpenAI({ model: \"gpt-4o\" })\n", - "\n", - "// Notice we don't pass in messages. This creates\n", - "// a RunnableLambda that takes messages as input\n", - "const trimmer = trimMessages({\n", - " maxTokens: 45,\n", - " strategy: \"last\",\n", - " tokenCounter: llm,\n", - " includeSystem: true,\n", - "})\n", - "\n", - "const chain = trimmer.pipe(llm);\n", - "await chain.invoke(messages)" - ] - }, - { - "cell_type": "markdown", - "id": "4d91d390-e7f7-467b-ad87-d100411d7a21", - "metadata": {}, - "source": [ - "Looking at [the LangSmith trace](https://smith.langchain.com/public/3793312c-a74b-4e77-92b4-f91b3d74ac5f/r) we can see that before the messages are passed to the model they are first trimmed.\n", - "\n", - "Looking at just the trimmer, we can see that it's a Runnable object that can be invoked like all Runnables:" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "1ff02d0a-353d-4fac-a77c-7c2c5262abd9", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "306adf9c-41cd-495c-b4dc-e4f43dd7f8f8", + "metadata": {}, + "source": [ + "If we need to make sure that our first message (excluding the system message) is always of a specific type, we can specify `startOn`:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " SystemMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: \"you're a good assistant, you always respond with a joke.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'Hmmm let me think.\\n' +\n", - " '\\n' +\n", - " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: undefined\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'what do you call a speechless parrot',\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'what do you call a speechless parrot',\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "await trimmer.invoke(messages)" - ] - }, - { - "cell_type": "markdown", - "id": "dc4720c8-4062-4ebc-9385-58411202ce6e", - "metadata": {}, - "source": [ - "## Using with ChatMessageHistory\n", - "\n", - "Trimming messages is especially useful when [working with chat histories](/docs/how_to/message_history/), which can get arbitrarily long:" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "a9517858-fc2f-4dc3-898d-bf98a0e905a0", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 12, + "id": "878a730b-fe44-4e9d-ab65-7b8f7b069de8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " SystemMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'and who is harrison chasing anyways',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'and who is harrison chasing anyways',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: undefined\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'what do you call a speechless parrot',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'what do you call a speechless parrot',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "await trimMessages(\n", + " messages,\n", + " {\n", + " maxTokens: 60,\n", + " strategy: \"last\",\n", + " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", + " includeSystem: true,\n", + " startOn: \"human\"\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "7f5d391d-235b-4091-b2de-c22866b478f3", + "metadata": {}, + "source": [ + "## Getting the first `maxTokens` tokens\n", + "\n", + "We can perform the flipped operation of getting the *first* `maxTokens` by specifying `strategy: \"first\"`:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "5f56ae54-1a39-4019-9351-3b494c003d5b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " SystemMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"i wonder why it's called langchain\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: \"i wonder why it's called langchain\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "await trimMessages(\n", + " messages,\n", + " {\n", + " maxTokens: 45,\n", + " strategy: \"first\",\n", + " tokenCounter: new ChatOpenAI({ modelName: \"gpt-4\" }),\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "ab70bf70-1e5a-4d51-b9b8-a823bf2cf532", + "metadata": {}, + "source": [ + "## Writing a custom token counter\n", + "\n", + "We can write a custom token counter function that takes in a list of messages and returns an int." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "1c1c3b1e-2ece-49e7-a3b6-e69877c1633b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: undefined\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'what do you call a speechless parrot',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'what do you call a speechless parrot',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { encodingForModel } from '@langchain/core/utils/tiktoken';\n", + "import { BaseMessage, HumanMessage, AIMessage, ToolMessage, SystemMessage, MessageContent, MessageContentText } from '@langchain/core/messages';\n", + "\n", + "async function strTokenCounter(messageContent: MessageContent): Promise {\n", + " if (typeof messageContent === 'string') {\n", + " return (\n", + " await encodingForModel(\"gpt-4\")\n", + " ).encode(messageContent).length;\n", + " } else {\n", + " if (messageContent.every((x) => x.type === \"text\" && x.text)) {\n", + " return (\n", + " await encodingForModel(\"gpt-4\")\n", + " ).encode((messageContent as MessageContentText[]).map(({ text }) => text).join(\"\")).length;\n", + " }\n", + " throw new Error(`Unsupported message content ${JSON.stringify(messageContent)}`);\n", + " }\n", + "}\n", + "\n", + "async function tiktokenCounter(messages: BaseMessage[]): Promise {\n", + " let numTokens = 3; // every reply is primed with <|start|>assistant<|message|>\n", + " const tokensPerMessage = 3;\n", + " const tokensPerName = 1;\n", + "\n", + " for (const msg of messages) {\n", + " let role: string;\n", + " if (msg instanceof HumanMessage) {\n", + " role = 'user';\n", + " } else if (msg instanceof AIMessage) {\n", + " role = 'assistant';\n", + " } else if (msg instanceof ToolMessage) {\n", + " role = 'tool';\n", + " } else if (msg instanceof SystemMessage) {\n", + " role = 'system';\n", + " } else {\n", + " throw new Error(`Unsupported message type ${msg.constructor.name}`);\n", + " }\n", + "\n", + " numTokens += tokensPerMessage + (await strTokenCounter(role)) + (await strTokenCounter(msg.content));\n", + "\n", + " if (msg.name) {\n", + " numTokens += tokensPerName + (await strTokenCounter(msg.name));\n", + " }\n", + " }\n", + "\n", + " return numTokens;\n", + "}\n", + "\n", + "await trimMessages(messages, {\n", + " maxTokens: 45,\n", + " strategy: 'last',\n", + " tokenCounter: tiktokenCounter,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "4b2a672b-c007-47c5-9105-617944dc0a6a", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "`trimMessages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'A \"polly-no-want-a-cracker\"!',\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'A \"polly-no-want-a-cracker\"!',\n", - " name: undefined,\n", - " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: 11, promptTokens: 57, totalTokens: 68 },\n", - " finish_reason: 'stop'\n", - " },\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: { input_tokens: 57, output_tokens: 11, total_tokens: 68 }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 17, + "id": "96aa29b2-01e0-437c-a1ab-02fb0141cb57", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Thanks! I do try to keep things light. But for a more serious answer, \"LangChain\" is likely named to reflect its focus on language processing and the way it connects different components or models together—essentially forming a \"chain\" of linguistic operations. The \"Lang\" part emphasizes its focus on language, while \"Chain\" highlights the interconnected workflows it aims to facilitate.',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Thanks! I do try to keep things light. But for a more serious answer, \"LangChain\" is likely named to reflect its focus on language processing and the way it connects different components or models together—essentially forming a \"chain\" of linguistic operations. The \"Lang\" part emphasizes its focus on language, while \"Chain\" highlights the interconnected workflows it aims to facilitate.',\n", + " name: undefined,\n", + " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: 77, promptTokens: 59, totalTokens: 136 },\n", + " finish_reason: 'stop'\n", + " },\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: { input_tokens: 59, output_tokens: 77, total_tokens: 136 }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { trimMessages } from \"@langchain/core/messages\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-4o\" })\n", + "\n", + "// Notice we don't pass in messages. This creates\n", + "// a RunnableLambda that takes messages as input\n", + "const trimmer = trimMessages({\n", + " maxTokens: 45,\n", + " strategy: \"last\",\n", + " tokenCounter: llm,\n", + " includeSystem: true,\n", + "})\n", + "\n", + "const chain = trimmer.pipe(llm);\n", + "await chain.invoke(messages)" + ] + }, + { + "cell_type": "markdown", + "id": "4d91d390-e7f7-467b-ad87-d100411d7a21", + "metadata": {}, + "source": [ + "Looking at [the LangSmith trace](https://smith.langchain.com/public/3793312c-a74b-4e77-92b4-f91b3d74ac5f/r) we can see that before the messages are passed to the model they are first trimmed.\n", + "\n", + "Looking at just the trimmer, we can see that it's a Runnable object that can be invoked like all Runnables:" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "1ff02d0a-353d-4fac-a77c-7c2c5262abd9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " SystemMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: \"you're a good assistant, you always respond with a joke.\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " },\n", + " AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'Hmmm let me think.\\n' +\n", + " '\\n' +\n", + " \"Why, he's probably chasing after the last cup of coffee in the office!\",\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: undefined\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'what do you call a speechless parrot',\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'what do you call a speechless parrot',\n", + " name: undefined,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "await trimmer.invoke(messages)" + ] + }, + { + "cell_type": "markdown", + "id": "dc4720c8-4062-4ebc-9385-58411202ce6e", + "metadata": {}, + "source": [ + "## Using with ChatMessageHistory\n", + "\n", + "Trimming messages is especially useful when [working with chat histories](/docs/how_to/message_history/), which can get arbitrarily long:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "a9517858-fc2f-4dc3-898d-bf98a0e905a0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'A \"polly-no-want-a-cracker\"!',\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'A \"polly-no-want-a-cracker\"!',\n", + " name: undefined,\n", + " additional_kwargs: { function_call: undefined, tool_calls: undefined },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: 11, promptTokens: 57, totalTokens: 68 },\n", + " finish_reason: 'stop'\n", + " },\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: { input_tokens: 57, output_tokens: 11, total_tokens: 68 }\n", + "}\n" + ] + } + ], + "source": [ + "import { InMemoryChatMessageHistory } from \"@langchain/core/chat_history\";\n", + "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "import { HumanMessage, trimMessages } from \"@langchain/core/messages\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const chatHistory = new InMemoryChatMessageHistory(messages.slice(0, -1))\n", + "\n", + "const dummyGetSessionHistory = async (sessionId: string) => {\n", + " if (sessionId !== \"1\") {\n", + " throw new Error(\"Session not found\");\n", + " }\n", + " return chatHistory;\n", + " }\n", + "\n", + " const llm = new ChatOpenAI({ model: \"gpt-4o\" });\n", + "\n", + " const trimmer = trimMessages({\n", + " maxTokens: 45,\n", + " strategy: \"last\",\n", + " tokenCounter: llm,\n", + " includeSystem: true,\n", + " });\n", + "\n", + "const chain = trimmer.pipe(llm);\n", + "const chainWithHistory = new RunnableWithMessageHistory({\n", + " runnable: chain,\n", + " getMessageHistory: dummyGetSessionHistory,\n", + "})\n", + "await chainWithHistory.invoke(\n", + " [new HumanMessage(\"what do you call a speechless parrot\")],\n", + " { configurable: { sessionId: \"1\"} },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "556b7b4c-43cb-41de-94fc-1a41f4ec4d2e", + "metadata": {}, + "source": [ + "Looking at [the LangSmith trace](https://smith.langchain.com/public/cfc76880-5895-4852-b7d0-12916448bdb2/r) we can see that we retrieve all of our messages but before the messages are passed to the model they are trimmed to be just the system message and last human message." + ] + }, + { + "cell_type": "markdown", + "id": "75dc7b84-b92f-44e7-8beb-ba22398e4efb", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For a complete description of all arguments head to the [API reference](https://api.js.langchain.com/functions/langchain_core.messages.trimMessages.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { InMemoryChatMessageHistory } from \"@langchain/core/chat_history\";\n", - "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", - "import { HumanMessage, trimMessages } from \"@langchain/core/messages\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const chatHistory = new InMemoryChatMessageHistory(messages.slice(0, -1))\n", - "\n", - "const dummyGetSessionHistory = async (sessionId: string) => {\n", - " if (sessionId !== \"1\") {\n", - " throw new Error(\"Session not found\");\n", - " }\n", - " return chatHistory;\n", - " }\n", - "\n", - " const llm = new ChatOpenAI({ model: \"gpt-4o\" });\n", - "\n", - " const trimmer = trimMessages({\n", - " maxTokens: 45,\n", - " strategy: \"last\",\n", - " tokenCounter: llm,\n", - " includeSystem: true,\n", - " });\n", - "\n", - "const chain = trimmer.pipe(llm);\n", - "const chainWithHistory = new RunnableWithMessageHistory({\n", - " runnable: chain,\n", - " getMessageHistory: dummyGetSessionHistory,\n", - "})\n", - "await chainWithHistory.invoke(\n", - " [new HumanMessage(\"what do you call a speechless parrot\")],\n", - " { configurable: { sessionId: \"1\"} },\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "556b7b4c-43cb-41de-94fc-1a41f4ec4d2e", - "metadata": {}, - "source": [ - "Looking at [the LangSmith trace](https://smith.langchain.com/public/cfc76880-5895-4852-b7d0-12916448bdb2/r) we can see that we retrieve all of our messages but before the messages are passed to the model they are trimmed to be just the system message and last human message." - ] - }, - { - "cell_type": "markdown", - "id": "75dc7b84-b92f-44e7-8beb-ba22398e4efb", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For a complete description of all arguments head to the [API reference](https://api.js.langchain.com/functions/langchain_core.messages.trimMessages.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/how_to/vectorstore_retriever.mdx b/docs/core_docs/docs/how_to/vectorstore_retriever.mdx index 02ad2e257edd..fec4eec59114 100644 --- a/docs/core_docs/docs/how_to/vectorstore_retriever.mdx +++ b/docs/core_docs/docs/how_to/vectorstore_retriever.mdx @@ -5,8 +5,8 @@ This guide assumes familiarity with the following concepts: - [Vector stores](/docs/concepts/#vectorstores) -- [Retrievers](/docs/concepts/#retrievers) -- [Text splitters](/docs/concepts#text-splitters) +- [Retrievers](/docs/concepts/retrievers) +- [Text splitters](/docs/concepts/text_splitters) - [Chaining runnables](/docs/how_to/sequence/) ::: diff --git a/docs/core_docs/docs/how_to/vectorstores.mdx b/docs/core_docs/docs/how_to/vectorstores.mdx index b3138d9fe3c5..42a02a02065c 100644 --- a/docs/core_docs/docs/how_to/vectorstores.mdx +++ b/docs/core_docs/docs/how_to/vectorstores.mdx @@ -13,8 +13,8 @@ Head to [Integrations](/docs/integrations/vectorstores) for documentation on bui This guide assumes familiarity with the following concepts: - [Vector stores](/docs/concepts/#vectorstores) -- [Embeddings](/docs/concepts/#embedding-models) -- [Document loaders](/docs/concepts#document-loaders) +- [Embeddings](/docs/concepts/embedding_models) +- [Document loaders](/docs/concepts/document_loaders) ::: @@ -34,7 +34,7 @@ import ExampleLoader from "@examples/indexes/vector_stores/memory_fromdocs.ts"; {ExampleLoader} -Most of the time, you'll need to split the loaded text as a preparation step. See [this section](/docs/concepts/#text-splitters) to learn more about text splitters. +Most of the time, you'll need to split the loaded text as a preparation step. See [this section](/docs/concepts/text_splitters) to learn more about text splitters. ## Creating a new index from texts diff --git a/docs/core_docs/docs/integrations/chat/alibaba_tongyi.mdx b/docs/core_docs/docs/integrations/chat/alibaba_tongyi.mdx index e5821b25a51a..c3b56637265d 100644 --- a/docs/core_docs/docs/integrations/chat/alibaba_tongyi.mdx +++ b/docs/core_docs/docs/integrations/chat/alibaba_tongyi.mdx @@ -32,5 +32,5 @@ import Tongyi from "@examples/models/chat/integration_alitongyi.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/anthropic.ipynb b/docs/core_docs/docs/integrations/chat/anthropic.ipynb index b46785ed8e89..b8761588d277 100644 --- a/docs/core_docs/docs/integrations/chat/anthropic.ipynb +++ b/docs/core_docs/docs/integrations/chat/anthropic.ipynb @@ -1,995 +1,995 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Anthropic\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatAnthropic\n", - "\n", - "[Anthropic](https://www.anthropic.com/) is an AI safety and research company. They are the creator of Claude.\n", - "\n", - "This will help you getting started with Anthropic [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatAnthropic` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/anthropic/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatAnthropic](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html) | [`@langchain/anthropic`](https://www.npmjs.com/package/@langchain/anthropic) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/anthropic?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/anthropic?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "You'll need to sign up and obtain an [Anthropic API key](https://www.anthropic.com/), and install the `@langchain/anthropic` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [Anthropic's website](https://www.anthropic.com/) to sign up to Anthropic and generate an API key. Once you've done this set the `ANTHROPIC_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export ANTHROPIC_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `ChatAnthropic` integration lives in the `@langchain/anthropic` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/anthropic @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\" \n", - "\n", - "const llm = new ChatAnthropic({\n", - " model: \"claude-3-haiku-20240307\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"msg_013WBXXiggy6gMbAUY6NpsuU\",\n", - " \"content\": \"Voici la traduction en français :\\n\\nJ'adore la programmation.\",\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_013WBXXiggy6gMbAUY6NpsuU\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-haiku-20240307\",\n", - " \"stop_reason\": \"end_turn\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 29,\n", - " \"output_tokens\": 20\n", - " }\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_013WBXXiggy6gMbAUY6NpsuU\",\n", - " \"model\": \"claude-3-haiku-20240307\",\n", - " \"stop_reason\": \"end_turn\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 29,\n", - " \"output_tokens\": 20\n", - " },\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 29,\n", - " \"output_tokens\": 20,\n", - " \"total_tokens\": 49\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Anthropic\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Voici la traduction en français :\n", - "\n", - "J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatAnthropic\n", + "\n", + "[Anthropic](https://www.anthropic.com/) is an AI safety and research company. They are the creator of Claude.\n", + "\n", + "This will help you getting started with Anthropic [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatAnthropic` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/anthropic/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatAnthropic](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html) | [`@langchain/anthropic`](https://www.npmjs.com/package/@langchain/anthropic) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/anthropic?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/anthropic?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "You'll need to sign up and obtain an [Anthropic API key](https://www.anthropic.com/), and install the `@langchain/anthropic` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [Anthropic's website](https://www.anthropic.com/) to sign up to Anthropic and generate an API key. Once you've done this set the `ANTHROPIC_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export ANTHROPIC_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `ChatAnthropic` integration lives in the `@langchain/anthropic` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/anthropic @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"msg_01Ca52fpd1mcGRhH4spzAWr4\",\n", - " \"content\": \"Ich liebe das Programmieren.\",\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_01Ca52fpd1mcGRhH4spzAWr4\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-haiku-20240307\",\n", - " \"stop_reason\": \"end_turn\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 23,\n", - " \"output_tokens\": 11\n", - " }\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_01Ca52fpd1mcGRhH4spzAWr4\",\n", - " \"model\": \"claude-3-haiku-20240307\",\n", - " \"stop_reason\": \"end_turn\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 23,\n", - " \"output_tokens\": 11\n", - " },\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 23,\n", - " \"output_tokens\": 11,\n", - " \"total_tokens\": 34\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "8dac39db", - "metadata": {}, - "source": [ - "## Content blocks\n", - "\n", - "One key difference to note between Anthropic models and most others is that the contents of a single Anthropic AI message can either be a single string or a **list of content blocks**. For example when an Anthropic model [calls a tool](/docs/how_to/tool_calling), the tool invocation is part of the message content (as well as being exposed in the standardized `AIMessage.tool_calls` field):" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "f5994de0", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"msg_01DZGs9DyuashaYxJ4WWpWUP\",\n", - " \"content\": [\n", - " {\n", - " \"type\": \"text\",\n", - " \"text\": \"Here is the calculation for 2 + 2:\"\n", - " },\n", - " {\n", - " \"type\": \"tool_use\",\n", - " \"id\": \"toolu_01SQXBamkBr6K6NdHE7GWwF8\",\n", - " \"name\": \"calculator\",\n", - " \"input\": {\n", - " \"number1\": 2,\n", - " \"number2\": 2,\n", - " \"operation\": \"add\"\n", - " }\n", - " }\n", - " ],\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_01DZGs9DyuashaYxJ4WWpWUP\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-haiku-20240307\",\n", - " \"stop_reason\": \"tool_use\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 449,\n", - " \"output_tokens\": 100\n", - " }\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_01DZGs9DyuashaYxJ4WWpWUP\",\n", - " \"model\": \"claude-3-haiku-20240307\",\n", - " \"stop_reason\": \"tool_use\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 449,\n", - " \"output_tokens\": 100\n", - " },\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"calculator\",\n", - " \"args\": {\n", - " \"number1\": 2,\n", - " \"number2\": 2,\n", - " \"operation\": \"add\"\n", - " },\n", - " \"id\": \"toolu_01SQXBamkBr6K6NdHE7GWwF8\",\n", - " \"type\": \"tool_call\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 449,\n", - " \"output_tokens\": 100,\n", - " \"total_tokens\": 549\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { z } from \"zod\";\n", - "import { zodToJsonSchema } from \"zod-to-json-schema\";\n", - "\n", - "const calculatorSchema = z.object({\n", - " operation: z\n", - " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", - " .describe(\"The type of operation to execute.\"),\n", - " number1: z.number().describe(\"The first number to operate on.\"),\n", - " number2: z.number().describe(\"The second number to operate on.\"),\n", - "});\n", - "\n", - "const calculatorTool = {\n", - " name: \"calculator\",\n", - " description: \"A simple calculator tool\",\n", - " input_schema: zodToJsonSchema(calculatorSchema),\n", - "};\n", - "\n", - "const toolCallingLlm = new ChatAnthropic({\n", - " model: \"claude-3-haiku-20240307\",\n", - "}).bindTools([calculatorTool]);\n", - "\n", - "const toolPrompt = ChatPromptTemplate.fromMessages([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant who always needs to use a calculator.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "// Chain your prompt and model together\n", - "const toolCallChain = toolPrompt.pipe(toolCallingLlm);\n", - "\n", - "await toolCallChain.invoke({\n", - " input: \"What is 2 + 2?\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "d452d4b6", - "metadata": {}, - "source": [ - "## Custom headers\n", - "\n", - "You can pass custom headers in your requests like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "41943f0a", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\" \n", + "\n", + "const llm = new ChatAnthropic({\n", + " model: \"claude-3-haiku-20240307\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"msg_019z4nWpShzsrbSHTWXWQh6z\",\n", - " \"content\": \"The sky appears blue due to a phenomenon called Rayleigh scattering. Here's a brief explanation:\\n\\n1) Sunlight is made up of different wavelengths of visible light, including all the colors of the rainbow.\\n\\n2) As sunlight passes through the atmosphere, the gases (mostly nitrogen and oxygen) cause the shorter wavelengths of light, such as violet and blue, to be scattered more easily than the longer wavelengths like red and orange.\\n\\n3) This scattering of the shorter blue wavelengths occurs in all directions by the gas molecules in the atmosphere.\\n\\n4) Our eyes are more sensitive to the scattered blue light than the scattered violet light, so we perceive the sky as having a blue color.\\n\\n5) The scattering is more pronounced for light traveling over longer distances through the atmosphere. This is why the sky appears even darker blue when looking towards the horizon.\\n\\nSo in essence, the selective scattering of the shorter blue wavelengths of sunlight by the gases in the atmosphere is what causes the sky to appear blue to our eyes during the daytime.\",\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_019z4nWpShzsrbSHTWXWQh6z\",\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_reason\": \"end_turn\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 13,\n", - " \"output_tokens\": 236\n", - " }\n", - " },\n", - " \"response_metadata\": {\n", - " \"id\": \"msg_019z4nWpShzsrbSHTWXWQh6z\",\n", - " \"model\": \"claude-3-sonnet-20240229\",\n", - " \"stop_reason\": \"end_turn\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 13,\n", - " \"output_tokens\": 236\n", - " },\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 13,\n", - " \"output_tokens\": 236,\n", - " \"total_tokens\": 249\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const llmWithCustomHeaders = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - " maxTokens: 1024,\n", - " clientOptions: {\n", - " defaultHeaders: {\n", - " \"X-Api-Key\": process.env.ANTHROPIC_API_KEY,\n", - " },\n", - " },\n", - "});\n", - "\n", - "await llmWithCustomHeaders.invoke(\"Why is the sky blue?\");" - ] - }, - { - "cell_type": "markdown", - "id": "3c5e6d7a", - "metadata": {}, - "source": [ - "## Prompt caching\n", - "\n", - "```{=mdx}\n", - "\n", - ":::caution Compatibility\n", - "This feature is currently in beta.\n", - ":::\n", - "\n", - "```\n", - "\n", - "Anthropic supports [caching parts of your prompt](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching) in order to reduce costs for use-cases that require long context. You can cache tools and both entire messages and individual blocks.\n", - "\n", - "The initial request containing one or more blocks or tool definitions with a `\"cache_control\": { \"type\": \"ephemeral\" }` field will automatically cache that part of the prompt. This initial caching step will cost extra, but subsequent requests will be billed at a reduced rate. The cache has a lifetime of 5 minutes, but this is refereshed each time the cache is hit.\n", - "\n", - "There is also currently a minimum cacheable prompt length, which varies according to model. You can see this information [here](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#structuring-your-prompt).\n", - "\n", - "This currently requires you to initialize your model with a beta header. Here's an example of caching part of a system message that contains the LangChain [conceptual docs](/docs/concepts/):" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "5e02b056", - "metadata": {}, - "outputs": [], - "source": [ - "let CACHED_TEXT = \"...\";" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "bba739ed", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "CACHED_TEXT = `## Components\n", - "\n", - "LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs.\n", - "Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.\n", - "\n", - "### Chat models\n", - "\n", - "\n", - "\n", - "Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).\n", - "These are generally newer models (older models are generally \\`LLMs\\`, see below).\n", - "Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.\n", - "\n", - "Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input.\n", - "This gives them the same interface as LLMs (and simpler to use).\n", - "When a string is passed in as input, it will be converted to a \\`HumanMessage\\` under the hood before being passed to the underlying model.\n", - "\n", - "LangChain does not host any Chat Models, rather we rely on third party integrations.\n", - "\n", - "We have some standardized parameters when constructing ChatModels:\n", - "\n", - "- \\`model\\`: the name of the model\n", - "\n", - "Chat Models also accept other parameters that are specific to that integration.\n", - "\n", - ":::important\n", - "Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it.\n", - "Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.\n", - "Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.\n", - ":::\n", - "\n", - "For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).\n", - "\n", - "#### Multimodality\n", - "\n", - "Some chat models are multimodal, accepting images, audio and even video as inputs.\n", - "These are still less common, meaning model providers haven't standardized on the \"best\" way to define the API.\n", - "Multimodal outputs are even less common. As such, we've kept our multimodal abstractions fairly light weight\n", - "and plan to further solidify the multimodal APIs and interaction patterns as the field matures.\n", - "\n", - "In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format.\n", - "So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.\n", - "\n", - "For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).\n", - "\n", - "### LLMs\n", - "\n", - "\n", - "\n", - ":::caution\n", - "Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),\n", - "even for non-chat use cases.\n", - "\n", - "You are probably looking for [the section above instead](/docs/concepts/#chat-models).\n", - ":::\n", - "\n", - "Language models that takes a string as input and returns a string.\n", - "These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above).\n", - "\n", - "Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.\n", - "This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).\n", - "When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model.\n", - "\n", - "LangChain does not host any LLMs, rather we rely on third party integrations.\n", - "\n", - "For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).\n", - "\n", - "### Message types\n", - "\n", - "Some language models take an array of messages as input and return a message.\n", - "There are a few different types of messages.\n", - "All messages have a \\`role\\`, \\`content\\`, and \\`response_metadata\\` property.\n", - "\n", - "The \\`role\\` describes WHO is saying the message.\n", - "LangChain has different message classes for different roles.\n", - "\n", - "The \\`content\\` property describes the content of the message.\n", - "This can be a few different things:\n", - "\n", - "- A string (most models deal this type of content)\n", - "- A List of objects (this is used for multi-modal input, where the object contains information about that input type and that input location)\n", - "\n", - "#### HumanMessage\n", - "\n", - "This represents a message from the user.\n", - "\n", - "#### AIMessage\n", - "\n", - "This represents a message from the model. In addition to the \\`content\\` property, these messages also have:\n", - "\n", - "**\\`response_metadata\\`**\n", - "\n", - "The \\`response_metadata\\` property contains additional metadata about the response. The data here is often specific to each model provider.\n", - "This is where information like log-probs and token usage may be stored.\n", - "\n", - "**\\`tool_calls\\`**\n", - "\n", - "These represent a decision from an language model to call a tool. They are included as part of an \\`AIMessage\\` output.\n", - "They can be accessed from there with the \\`.tool_calls\\` property.\n", - "\n", - "This property returns a list of \\`ToolCall\\`s. A \\`ToolCall\\` is an object with the following arguments:\n", - "\n", - "- \\`name\\`: The name of the tool that should be called.\n", - "- \\`args\\`: The arguments to that tool.\n", - "- \\`id\\`: The id of that tool call.\n", - "\n", - "#### SystemMessage\n", - "\n", - "This represents a system message, which tells the model how to behave. Not every model provider supports this.\n", - "\n", - "#### ToolMessage\n", - "\n", - "This represents the result of a tool call. In addition to \\`role\\` and \\`content\\`, this message has:\n", - "\n", - "- a \\`tool_call_id\\` field which conveys the id of the call to the tool that was called to produce this result.\n", - "- an \\`artifact\\` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.\n", - "\n", - "#### (Legacy) FunctionMessage\n", - "\n", - "This is a legacy message type, corresponding to OpenAI's legacy function-calling API. \\`ToolMessage\\` should be used instead to correspond to the updated tool-calling API.\n", - "\n", - "This represents the result of a function call. In addition to \\`role\\` and \\`content\\`, this message has a \\`name\\` parameter which conveys the name of the function that was called to produce this result.\n", - "\n", - "### Prompt templates\n", - "\n", - "\n", - "\n", - "Prompt templates help to translate user input and parameters into instructions for a language model.\n", - "This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.\n", - "\n", - "Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in.\n", - "\n", - "Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or an array of messages.\n", - "The reason this PromptValue exists is to make it easy to switch between strings and messages.\n", - "\n", - "There are a few different types of prompt templates:\n", - "\n", - "#### String PromptTemplates\n", - "\n", - "These prompt templates are used to format a single string, and generally are used for simpler inputs.\n", - "For example, a common way to construct and use a PromptTemplate is as follows:\n", - "\n", - "\\`\\`\\`typescript\n", - "import { PromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const promptTemplate = PromptTemplate.fromTemplate(\n", - " \"Tell me a joke about {topic}\"\n", - ");\n", - "\n", - "await promptTemplate.invoke({ topic: \"cats\" });\n", - "\\`\\`\\`\n", - "\n", - "#### ChatPromptTemplates\n", - "\n", - "These prompt templates are used to format an array of messages. These \"templates\" consist of an array of templates themselves.\n", - "For example, a common way to construct and use a ChatPromptTemplate is as follows:\n", - "\n", - "\\`\\`\\`typescript\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const promptTemplate = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant\"],\n", - " [\"user\", \"Tell me a joke about {topic}\"],\n", - "]);\n", - "\n", - "await promptTemplate.invoke({ topic: \"cats\" });\n", - "\\`\\`\\`\n", - "\n", - "In the above example, this ChatPromptTemplate will construct two messages when called.\n", - "The first is a system message, that has no variables to format.\n", - "The second is a HumanMessage, and will be formatted by the \\`topic\\` variable the user passes in.\n", - "\n", - "#### MessagesPlaceholder\n", - "\n", - "\n", - "\n", - "This prompt template is responsible for adding an array of messages in a particular place.\n", - "In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.\n", - "But what if we wanted the user to pass in an array of messages that we would slot into a particular spot?\n", - "This is how you use MessagesPlaceholder.\n", - "\n", - "\\`\\`\\`typescript\n", - "import {\n", - " ChatPromptTemplate,\n", - " MessagesPlaceholder,\n", - "} from \"@langchain/core/prompts\";\n", - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const promptTemplate = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant\"],\n", - " new MessagesPlaceholder(\"msgs\"),\n", - "]);\n", - "\n", - "promptTemplate.invoke({ msgs: [new HumanMessage({ content: \"hi!\" })] });\n", - "\\`\\`\\`\n", - "\n", - "This will produce an array of two messages, the first one being a system message, and the second one being the HumanMessage we passed in.\n", - "If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in).\n", - "This is useful for letting an array of messages be slotted into a particular spot.\n", - "\n", - "An alternative way to accomplish the same thing without using the \\`MessagesPlaceholder\\` class explicitly is:\n", - "\n", - "\\`\\`\\`typescript\n", - "const promptTemplate = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant\"],\n", - " [\"placeholder\", \"{msgs}\"], // <-- This is the changed part\n", - "]);\n", - "\\`\\`\\`\n", - "\n", - "For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).\n", - "\n", - "### Example Selectors\n", - "\n", - "One common prompting technique for achieving better performance is to include examples as part of the prompt.\n", - "This gives the language model concrete examples of how it should behave.\n", - "Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.\n", - "Example Selectors are classes responsible for selecting and then formatting examples into prompts.\n", - "\n", - "For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).\n", - "\n", - "### Output parsers\n", - "\n", - "\n", - "\n", - ":::note\n", - "\n", - "The information here refers to parsers that take a text output from a model try to parse it into a more structured representation.\n", - "More and more models are supporting function (or tool) calling, which handles this automatically.\n", - "It is recommended to use function/tool calling rather than output parsing.\n", - "See documentation for that [here](/docs/concepts/#function-tool-calling).\n", - "\n", - ":::\n", - "\n", - "Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.\n", - "Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs.\n", - "\n", - "There are two main methods an output parser must implement:\n", - "\n", - "- \"Get format instructions\": A method which returns a string containing instructions for how the output of a language model should be formatted.\n", - "- \"Parse\": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.\n", - "\n", - "And then one optional one:\n", - "\n", - "- \"Parse with prompt\": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.\n", - "\n", - "Output parsers accept a string or \\`BaseMessage\\` as input and can return an arbitrary type.\n", - "\n", - "LangChain has many different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information:\n", - "\n", - "**Name**: The name of the output parser\n", - "\n", - "**Supports Streaming**: Whether the output parser supports streaming.\n", - "\n", - "**Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific arguments.\n", - "\n", - "**Output Type**: The output type of the object returned by the parser.\n", - "\n", - "**Description**: Our commentary on this output parser and when to use it.\n", - "\n", - "The current date is ${new Date().toISOString()}`;\n", - "\n", - "// Noop statement to hide output\n", - "void 0;" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "6e47de9b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "USAGE: {\n", - " input_tokens: 19,\n", - " cache_creation_input_tokens: 2925,\n", - " cache_read_input_tokens: 0,\n", - " output_tokens: 327\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "import { HumanMessage, SystemMessage } from \"@langchain/core/messages\";\n", - "\n", - "const modelWithCaching = new ChatAnthropic({\n", - " model: \"claude-3-haiku-20240307\",\n", - " clientOptions: {\n", - " defaultHeaders: {\n", - " \"anthropic-beta\": \"prompt-caching-2024-07-31\",\n", - " },\n", - " },\n", - "});\n", - "\n", - "const LONG_TEXT = `You are a pirate. Always respond in pirate dialect.\n", - "\n", - "Use the following as context when answering questions:\n", - "\n", - "${CACHED_TEXT}`;\n", - "\n", - "const messages = [\n", - " new SystemMessage({\n", - " content: [\n", - " {\n", - " type: \"text\",\n", - " text: LONG_TEXT,\n", - " // Tell Anthropic to cache this block\n", - " cache_control: { type: \"ephemeral\" },\n", - " },\n", - " ],\n", - " }),\n", - " new HumanMessage({\n", - " content: \"What types of messages are supported in LangChain?\",\n", - " }),\n", - "];\n", - "\n", - "const res = await modelWithCaching.invoke(messages);\n", - "\n", - "console.log(\"USAGE:\", res.response_metadata.usage);" - ] - }, - { - "cell_type": "markdown", - "id": "826d95b6", - "metadata": {}, - "source": [ - "We can see that there's a new field called `cache_creation_input_tokens` in the raw usage field returned from Anthropic.\n", - "\n", - "If we use the same messages again, we can see that the long text's input tokens are read from the cache:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "5d264f8b", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_013WBXXiggy6gMbAUY6NpsuU\",\n", + " \"content\": \"Voici la traduction en français :\\n\\nJ'adore la programmation.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_013WBXXiggy6gMbAUY6NpsuU\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 29,\n", + " \"output_tokens\": 20\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_013WBXXiggy6gMbAUY6NpsuU\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 29,\n", + " \"output_tokens\": 20\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 29,\n", + " \"output_tokens\": 20,\n", + " \"total_tokens\": 49\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "USAGE: {\n", - " input_tokens: 19,\n", - " cache_creation_input_tokens: 0,\n", - " cache_read_input_tokens: 2925,\n", - " output_tokens: 250\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Voici la traduction en français :\n", + "\n", + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_01Ca52fpd1mcGRhH4spzAWr4\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01Ca52fpd1mcGRhH4spzAWr4\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 23,\n", + " \"output_tokens\": 11\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01Ca52fpd1mcGRhH4spzAWr4\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 23,\n", + " \"output_tokens\": 11\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 23,\n", + " \"output_tokens\": 11,\n", + " \"total_tokens\": 34\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "8dac39db", + "metadata": {}, + "source": [ + "## Content blocks\n", + "\n", + "One key difference to note between Anthropic models and most others is that the contents of a single Anthropic AI message can either be a single string or a **list of content blocks**. For example when an Anthropic model [calls a tool](/docs/how_to/tool_calling), the tool invocation is part of the message content (as well as being exposed in the standardized `AIMessage.tool_calls` field):" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f5994de0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_01DZGs9DyuashaYxJ4WWpWUP\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"text\",\n", + " \"text\": \"Here is the calculation for 2 + 2:\"\n", + " },\n", + " {\n", + " \"type\": \"tool_use\",\n", + " \"id\": \"toolu_01SQXBamkBr6K6NdHE7GWwF8\",\n", + " \"name\": \"calculator\",\n", + " \"input\": {\n", + " \"number1\": 2,\n", + " \"number2\": 2,\n", + " \"operation\": \"add\"\n", + " }\n", + " }\n", + " ],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01DZGs9DyuashaYxJ4WWpWUP\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"tool_use\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 449,\n", + " \"output_tokens\": 100\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01DZGs9DyuashaYxJ4WWpWUP\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"tool_use\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 449,\n", + " \"output_tokens\": 100\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"calculator\",\n", + " \"args\": {\n", + " \"number1\": 2,\n", + " \"number2\": 2,\n", + " \"operation\": \"add\"\n", + " },\n", + " \"id\": \"toolu_01SQXBamkBr6K6NdHE7GWwF8\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 449,\n", + " \"output_tokens\": 100,\n", + " \"total_tokens\": 549\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { z } from \"zod\";\n", + "import { zodToJsonSchema } from \"zod-to-json-schema\";\n", + "\n", + "const calculatorSchema = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + "});\n", + "\n", + "const calculatorTool = {\n", + " name: \"calculator\",\n", + " description: \"A simple calculator tool\",\n", + " input_schema: zodToJsonSchema(calculatorSchema),\n", + "};\n", + "\n", + "const toolCallingLlm = new ChatAnthropic({\n", + " model: \"claude-3-haiku-20240307\",\n", + "}).bindTools([calculatorTool]);\n", + "\n", + "const toolPrompt = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant who always needs to use a calculator.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "// Chain your prompt and model together\n", + "const toolCallChain = toolPrompt.pipe(toolCallingLlm);\n", + "\n", + "await toolCallChain.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "d452d4b6", + "metadata": {}, + "source": [ + "## Custom headers\n", + "\n", + "You can pass custom headers in your requests like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "41943f0a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_019z4nWpShzsrbSHTWXWQh6z\",\n", + " \"content\": \"The sky appears blue due to a phenomenon called Rayleigh scattering. Here's a brief explanation:\\n\\n1) Sunlight is made up of different wavelengths of visible light, including all the colors of the rainbow.\\n\\n2) As sunlight passes through the atmosphere, the gases (mostly nitrogen and oxygen) cause the shorter wavelengths of light, such as violet and blue, to be scattered more easily than the longer wavelengths like red and orange.\\n\\n3) This scattering of the shorter blue wavelengths occurs in all directions by the gas molecules in the atmosphere.\\n\\n4) Our eyes are more sensitive to the scattered blue light than the scattered violet light, so we perceive the sky as having a blue color.\\n\\n5) The scattering is more pronounced for light traveling over longer distances through the atmosphere. This is why the sky appears even darker blue when looking towards the horizon.\\n\\nSo in essence, the selective scattering of the shorter blue wavelengths of sunlight by the gases in the atmosphere is what causes the sky to appear blue to our eyes during the daytime.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_019z4nWpShzsrbSHTWXWQh6z\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 13,\n", + " \"output_tokens\": 236\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_019z4nWpShzsrbSHTWXWQh6z\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 13,\n", + " \"output_tokens\": 236\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 13,\n", + " \"output_tokens\": 236,\n", + " \"total_tokens\": 249\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const llmWithCustomHeaders = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + " maxTokens: 1024,\n", + " clientOptions: {\n", + " defaultHeaders: {\n", + " \"X-Api-Key\": process.env.ANTHROPIC_API_KEY,\n", + " },\n", + " },\n", + "});\n", + "\n", + "await llmWithCustomHeaders.invoke(\"Why is the sky blue?\");" + ] + }, + { + "cell_type": "markdown", + "id": "3c5e6d7a", + "metadata": {}, + "source": [ + "## Prompt caching\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution Compatibility\n", + "This feature is currently in beta.\n", + ":::\n", + "\n", + "```\n", + "\n", + "Anthropic supports [caching parts of your prompt](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching) in order to reduce costs for use-cases that require long context. You can cache tools and both entire messages and individual blocks.\n", + "\n", + "The initial request containing one or more blocks or tool definitions with a `\"cache_control\": { \"type\": \"ephemeral\" }` field will automatically cache that part of the prompt. This initial caching step will cost extra, but subsequent requests will be billed at a reduced rate. The cache has a lifetime of 5 minutes, but this is refereshed each time the cache is hit.\n", + "\n", + "There is also currently a minimum cacheable prompt length, which varies according to model. You can see this information [here](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#structuring-your-prompt).\n", + "\n", + "This currently requires you to initialize your model with a beta header. Here's an example of caching part of a system message that contains the LangChain [conceptual docs](/docs/concepts/):" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "5e02b056", + "metadata": {}, + "outputs": [], + "source": [ + "let CACHED_TEXT = \"...\";" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "bba739ed", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "CACHED_TEXT = `## Components\n", + "\n", + "LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs.\n", + "Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.\n", + "\n", + "### Chat models\n", + "\n", + "\n", + "\n", + "Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).\n", + "These are generally newer models (older models are generally \\`LLMs\\`, see below).\n", + "Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.\n", + "\n", + "Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input.\n", + "This gives them the same interface as LLMs (and simpler to use).\n", + "When a string is passed in as input, it will be converted to a \\`HumanMessage\\` under the hood before being passed to the underlying model.\n", + "\n", + "LangChain does not host any Chat Models, rather we rely on third party integrations.\n", + "\n", + "We have some standardized parameters when constructing ChatModels:\n", + "\n", + "- \\`model\\`: the name of the model\n", + "\n", + "Chat Models also accept other parameters that are specific to that integration.\n", + "\n", + ":::important\n", + "Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it.\n", + "Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.\n", + "Please see the [tool calling section](/docs/concepts/tool_calling) for more information.\n", + ":::\n", + "\n", + "For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).\n", + "\n", + "#### Multimodality\n", + "\n", + "Some chat models are multimodal, accepting images, audio and even video as inputs.\n", + "These are still less common, meaning model providers haven't standardized on the \"best\" way to define the API.\n", + "Multimodal outputs are even less common. As such, we've kept our multimodal abstractions fairly light weight\n", + "and plan to further solidify the multimodal APIs and interaction patterns as the field matures.\n", + "\n", + "In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format.\n", + "So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.\n", + "\n", + "For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).\n", + "\n", + "### LLMs\n", + "\n", + "\n", + "\n", + ":::caution\n", + "Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/chat_models),\n", + "even for non-chat use cases.\n", + "\n", + "You are probably looking for [the section above instead](/docs/concepts/chat_models).\n", + ":::\n", + "\n", + "Language models that takes a string as input and returns a string.\n", + "These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/chat_models), see above).\n", + "\n", + "Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.\n", + "This gives them the same interface as [Chat Models](/docs/concepts/chat_models).\n", + "When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model.\n", + "\n", + "LangChain does not host any LLMs, rather we rely on third party integrations.\n", + "\n", + "For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).\n", + "\n", + "### Message types\n", + "\n", + "Some language models take an array of messages as input and return a message.\n", + "There are a few different types of messages.\n", + "All messages have a \\`role\\`, \\`content\\`, and \\`response_metadata\\` property.\n", + "\n", + "The \\`role\\` describes WHO is saying the message.\n", + "LangChain has different message classes for different roles.\n", + "\n", + "The \\`content\\` property describes the content of the message.\n", + "This can be a few different things:\n", + "\n", + "- A string (most models deal this type of content)\n", + "- A List of objects (this is used for multi-modal input, where the object contains information about that input type and that input location)\n", + "\n", + "#### HumanMessage\n", + "\n", + "This represents a message from the user.\n", + "\n", + "#### AIMessage\n", + "\n", + "This represents a message from the model. In addition to the \\`content\\` property, these messages also have:\n", + "\n", + "**\\`response_metadata\\`**\n", + "\n", + "The \\`response_metadata\\` property contains additional metadata about the response. The data here is often specific to each model provider.\n", + "This is where information like log-probs and token usage may be stored.\n", + "\n", + "**\\`tool_calls\\`**\n", + "\n", + "These represent a decision from an language model to call a tool. They are included as part of an \\`AIMessage\\` output.\n", + "They can be accessed from there with the \\`.tool_calls\\` property.\n", + "\n", + "This property returns a list of \\`ToolCall\\`s. A \\`ToolCall\\` is an object with the following arguments:\n", + "\n", + "- \\`name\\`: The name of the tool that should be called.\n", + "- \\`args\\`: The arguments to that tool.\n", + "- \\`id\\`: The id of that tool call.\n", + "\n", + "#### SystemMessage\n", + "\n", + "This represents a system message, which tells the model how to behave. Not every model provider supports this.\n", + "\n", + "#### ToolMessage\n", + "\n", + "This represents the result of a tool call. In addition to \\`role\\` and \\`content\\`, this message has:\n", + "\n", + "- a \\`tool_call_id\\` field which conveys the id of the call to the tool that was called to produce this result.\n", + "- an \\`artifact\\` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.\n", + "\n", + "#### (Legacy) FunctionMessage\n", + "\n", + "This is a legacy message type, corresponding to OpenAI's legacy function-calling API. \\`ToolMessage\\` should be used instead to correspond to the updated tool-calling API.\n", + "\n", + "This represents the result of a function call. In addition to \\`role\\` and \\`content\\`, this message has a \\`name\\` parameter which conveys the name of the function that was called to produce this result.\n", + "\n", + "### Prompt templates\n", + "\n", + "\n", + "\n", + "Prompt templates help to translate user input and parameters into instructions for a language model.\n", + "This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.\n", + "\n", + "Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in.\n", + "\n", + "Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or an array of messages.\n", + "The reason this PromptValue exists is to make it easy to switch between strings and messages.\n", + "\n", + "There are a few different types of prompt templates:\n", + "\n", + "#### String PromptTemplates\n", + "\n", + "These prompt templates are used to format a single string, and generally are used for simpler inputs.\n", + "For example, a common way to construct and use a PromptTemplate is as follows:\n", + "\n", + "\\`\\`\\`typescript\n", + "import { PromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const promptTemplate = PromptTemplate.fromTemplate(\n", + " \"Tell me a joke about {topic}\"\n", + ");\n", + "\n", + "await promptTemplate.invoke({ topic: \"cats\" });\n", + "\\`\\`\\`\n", + "\n", + "#### ChatPromptTemplates\n", + "\n", + "These prompt templates are used to format an array of messages. These \"templates\" consist of an array of templates themselves.\n", + "For example, a common way to construct and use a ChatPromptTemplate is as follows:\n", + "\n", + "\\`\\`\\`typescript\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const promptTemplate = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"user\", \"Tell me a joke about {topic}\"],\n", + "]);\n", + "\n", + "await promptTemplate.invoke({ topic: \"cats\" });\n", + "\\`\\`\\`\n", + "\n", + "In the above example, this ChatPromptTemplate will construct two messages when called.\n", + "The first is a system message, that has no variables to format.\n", + "The second is a HumanMessage, and will be formatted by the \\`topic\\` variable the user passes in.\n", + "\n", + "#### MessagesPlaceholder\n", + "\n", + "\n", + "\n", + "This prompt template is responsible for adding an array of messages in a particular place.\n", + "In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.\n", + "But what if we wanted the user to pass in an array of messages that we would slot into a particular spot?\n", + "This is how you use MessagesPlaceholder.\n", + "\n", + "\\`\\`\\`typescript\n", + "import {\n", + " ChatPromptTemplate,\n", + " MessagesPlaceholder,\n", + "} from \"@langchain/core/prompts\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const promptTemplate = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " new MessagesPlaceholder(\"msgs\"),\n", + "]);\n", + "\n", + "promptTemplate.invoke({ msgs: [new HumanMessage({ content: \"hi!\" })] });\n", + "\\`\\`\\`\n", + "\n", + "This will produce an array of two messages, the first one being a system message, and the second one being the HumanMessage we passed in.\n", + "If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in).\n", + "This is useful for letting an array of messages be slotted into a particular spot.\n", + "\n", + "An alternative way to accomplish the same thing without using the \\`MessagesPlaceholder\\` class explicitly is:\n", + "\n", + "\\`\\`\\`typescript\n", + "const promptTemplate = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"placeholder\", \"{msgs}\"], // <-- This is the changed part\n", + "]);\n", + "\\`\\`\\`\n", + "\n", + "For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).\n", + "\n", + "### Example Selectors\n", + "\n", + "One common prompting technique for achieving better performance is to include examples as part of the prompt.\n", + "This gives the language model concrete examples of how it should behave.\n", + "Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.\n", + "Example Selectors are classes responsible for selecting and then formatting examples into prompts.\n", + "\n", + "For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).\n", + "\n", + "### Output parsers\n", + "\n", + "\n", + "\n", + ":::note\n", + "\n", + "The information here refers to parsers that take a text output from a model try to parse it into a more structured representation.\n", + "More and more models are supporting function (or tool) calling, which handles this automatically.\n", + "It is recommended to use function/tool calling rather than output parsing.\n", + "See documentation for that [here](/docs/concepts/tool_calling).\n", + "\n", + ":::\n", + "\n", + "Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.\n", + "Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs.\n", + "\n", + "There are two main methods an output parser must implement:\n", + "\n", + "- \"Get format instructions\": A method which returns a string containing instructions for how the output of a language model should be formatted.\n", + "- \"Parse\": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.\n", + "\n", + "And then one optional one:\n", + "\n", + "- \"Parse with prompt\": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.\n", + "\n", + "Output parsers accept a string or \\`BaseMessage\\` as input and can return an arbitrary type.\n", + "\n", + "LangChain has many different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information:\n", + "\n", + "**Name**: The name of the output parser\n", + "\n", + "**Supports Streaming**: Whether the output parser supports streaming.\n", + "\n", + "**Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific arguments.\n", + "\n", + "**Output Type**: The output type of the object returned by the parser.\n", + "\n", + "**Description**: Our commentary on this output parser and when to use it.\n", + "\n", + "The current date is ${new Date().toISOString()}`;\n", + "\n", + "// Noop statement to hide output\n", + "void 0;" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "6e47de9b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "USAGE: {\n", + " input_tokens: 19,\n", + " cache_creation_input_tokens: 2925,\n", + " cache_read_input_tokens: 0,\n", + " output_tokens: 327\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { HumanMessage, SystemMessage } from \"@langchain/core/messages\";\n", + "\n", + "const modelWithCaching = new ChatAnthropic({\n", + " model: \"claude-3-haiku-20240307\",\n", + " clientOptions: {\n", + " defaultHeaders: {\n", + " \"anthropic-beta\": \"prompt-caching-2024-07-31\",\n", + " },\n", + " },\n", + "});\n", + "\n", + "const LONG_TEXT = `You are a pirate. Always respond in pirate dialect.\n", + "\n", + "Use the following as context when answering questions:\n", + "\n", + "${CACHED_TEXT}`;\n", + "\n", + "const messages = [\n", + " new SystemMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: LONG_TEXT,\n", + " // Tell Anthropic to cache this block\n", + " cache_control: { type: \"ephemeral\" },\n", + " },\n", + " ],\n", + " }),\n", + " new HumanMessage({\n", + " content: \"What types of messages are supported in LangChain?\",\n", + " }),\n", + "];\n", + "\n", + "const res = await modelWithCaching.invoke(messages);\n", + "\n", + "console.log(\"USAGE:\", res.response_metadata.usage);" + ] + }, + { + "cell_type": "markdown", + "id": "826d95b6", + "metadata": {}, + "source": [ + "We can see that there's a new field called `cache_creation_input_tokens` in the raw usage field returned from Anthropic.\n", + "\n", + "If we use the same messages again, we can see that the long text's input tokens are read from the cache:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5d264f8b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "USAGE: {\n", + " input_tokens: 19,\n", + " cache_creation_input_tokens: 0,\n", + " cache_read_input_tokens: 2925,\n", + " output_tokens: 250\n", + "}\n" + ] + } + ], + "source": [ + "const res2 = await modelWithCaching.invoke(messages);\n", + "\n", + "console.log(\"USAGE:\", res2.response_metadata.usage);" + ] + }, + { + "cell_type": "markdown", + "id": "fc6bba1b", + "metadata": {}, + "source": [ + "### Tool caching\n", + "\n", + "You can also cache tools by setting the same `\"cache_control\": { \"type\": \"ephemeral\" }` within a tool definition. This currently requires you to bind a tool in [Anthropic's raw tool format](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9c7c5eaf", + "metadata": {}, + "outputs": [], + "source": [ + "const SOME_LONG_DESCRIPTION = \"...\";\n", + "\n", + "// Tool in Anthropic format\n", + "const anthropicTools = [{\n", + " name: \"get_weather\",\n", + " description: SOME_LONG_DESCRIPTION,\n", + " input_schema: {\n", + " type: \"object\",\n", + " properties: {\n", + " location: {\n", + " type: \"string\",\n", + " description: \"Location to get the weather for\",\n", + " },\n", + " unit: {\n", + " type: \"string\",\n", + " description: \"Temperature unit to return\",\n", + " },\n", + " },\n", + " required: [\"location\"],\n", + " },\n", + " // Tell Anthropic to cache this tool\n", + " cache_control: { type: \"ephemeral\" },\n", + "}]\n", + "\n", + "const modelWithCachedTools = modelWithCaching.bindTools(anthropicTools);\n", + "\n", + "await modelWithCachedTools.invoke(\"what is the weather in SF?\");" + ] + }, + { + "cell_type": "markdown", + "id": "5d000dd9", + "metadata": {}, + "source": [ + "\n", + "\n", + "For more on how prompt caching works, see [Anthropic's docs](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#how-prompt-caching-works)." + ] + }, + { + "cell_type": "markdown", + "id": "f8dece4e", + "metadata": {}, + "source": [ + "## Custom clients\n", + "\n", + "Anthropic models [may be hosted on cloud services such as Google Vertex](https://docs.anthropic.com/en/api/claude-on-vertex-ai) that rely on a different underlying client with the same interface as the primary Anthropic client. You can access these services by providing a `createClient` method that returns an initialized instance of an Anthropic client. Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00ec6d41", + "metadata": {}, + "outputs": [], + "source": [ + "import { AnthropicVertex } from \"@anthropic-ai/vertex-sdk\";\n", + "\n", + "const customClient = new AnthropicVertex();\n", + "\n", + "const modelWithCustomClient = new ChatAnthropic({\n", + " modelName: \"claude-3-sonnet@20240229\",\n", + " maxRetries: 0,\n", + " createClient: () => customClient,\n", + "});\n", + "\n", + "await modelWithCustomClient.invoke([{ role: \"user\", content: \"Hello!\" }]);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatAnthropic features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const res2 = await modelWithCaching.invoke(messages);\n", - "\n", - "console.log(\"USAGE:\", res2.response_metadata.usage);" - ] - }, - { - "cell_type": "markdown", - "id": "fc6bba1b", - "metadata": {}, - "source": [ - "### Tool caching\n", - "\n", - "You can also cache tools by setting the same `\"cache_control\": { \"type\": \"ephemeral\" }` within a tool definition. This currently requires you to bind a tool in [Anthropic's raw tool format](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9c7c5eaf", - "metadata": {}, - "outputs": [], - "source": [ - "const SOME_LONG_DESCRIPTION = \"...\";\n", - "\n", - "// Tool in Anthropic format\n", - "const anthropicTools = [{\n", - " name: \"get_weather\",\n", - " description: SOME_LONG_DESCRIPTION,\n", - " input_schema: {\n", - " type: \"object\",\n", - " properties: {\n", - " location: {\n", - " type: \"string\",\n", - " description: \"Location to get the weather for\",\n", - " },\n", - " unit: {\n", - " type: \"string\",\n", - " description: \"Temperature unit to return\",\n", - " },\n", - " },\n", - " required: [\"location\"],\n", - " },\n", - " // Tell Anthropic to cache this tool\n", - " cache_control: { type: \"ephemeral\" },\n", - "}]\n", - "\n", - "const modelWithCachedTools = modelWithCaching.bindTools(anthropicTools);\n", - "\n", - "await modelWithCachedTools.invoke(\"what is the weather in SF?\");" - ] - }, - { - "cell_type": "markdown", - "id": "5d000dd9", - "metadata": {}, - "source": [ - "\n", - "\n", - "For more on how prompt caching works, see [Anthropic's docs](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#how-prompt-caching-works)." - ] - }, - { - "cell_type": "markdown", - "id": "f8dece4e", - "metadata": {}, - "source": [ - "## Custom clients\n", - "\n", - "Anthropic models [may be hosted on cloud services such as Google Vertex](https://docs.anthropic.com/en/api/claude-on-vertex-ai) that rely on a different underlying client with the same interface as the primary Anthropic client. You can access these services by providing a `createClient` method that returns an initialized instance of an Anthropic client. Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "00ec6d41", - "metadata": {}, - "outputs": [], - "source": [ - "import { AnthropicVertex } from \"@anthropic-ai/vertex-sdk\";\n", - "\n", - "const customClient = new AnthropicVertex();\n", - "\n", - "const modelWithCustomClient = new ChatAnthropic({\n", - " modelName: \"claude-3-sonnet@20240229\",\n", - " maxRetries: 0,\n", - " createClient: () => customClient,\n", - "});\n", - "\n", - "await modelWithCustomClient.invoke([{ role: \"user\", content: \"Hello!\" }]);" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatAnthropic features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/azure.ipynb b/docs/core_docs/docs/integrations/chat/azure.ipynb index fb07aa31e913..5f6ac31b19b7 100644 --- a/docs/core_docs/docs/integrations/chat/azure.ipynb +++ b/docs/core_docs/docs/integrations/chat/azure.ipynb @@ -1,448 +1,448 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Azure OpenAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# AzureChatOpenAI\n", - "\n", - "Azure OpenAI is a Microsoft Azure service that provides powerful language models from OpenAI.\n", - "\n", - "This will help you getting started with AzureChatOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/azure_chat_openai) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [AzureChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html) | [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", - "\n", - "## Setup\n", - "\n", - "[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond.\n", - "\n", - "LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node).\n", - "\n", - "You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview).\n", - "\n", - "### Credentials\n", - "\n", - "If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started.\n", - "\n", - "You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", - "\n", - "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance. Then, if using Node.js, you can set your credentials as environment variables:\n", - "\n", - "```bash\n", - "AZURE_OPENAI_API_INSTANCE_NAME=\n", - "AZURE_OPENAI_API_DEPLOYMENT_NAME=\n", - "AZURE_OPENAI_API_KEY=\n", - "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain AzureChatOpenAI integration lives in the `@langchain/openai` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { AzureChatOpenAI } from \"@langchain/openai\" \n", - "\n", - "const llm = new AzureChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " maxRetries: 2,\n", - " azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", - " azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", - " azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", - " azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION, // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-9qrWKByvVrzWMxSn8joRZAklHoB32\",\n", - " \"content\": \"J'adore la programmation.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 31,\n", - " \"totalTokens\": 39\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 31,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 39\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Azure OpenAI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# AzureChatOpenAI\n", + "\n", + "Azure OpenAI is a Microsoft Azure service that provides powerful language models from OpenAI.\n", + "\n", + "This will help you getting started with AzureChatOpenAI [chat models](/docs/concepts/chat_models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/azure_chat_openai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [AzureChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html) | [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond.\n", + "\n", + "LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node).\n", + "\n", + "You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview).\n", + "\n", + "### Credentials\n", + "\n", + "If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started.\n", + "\n", + "You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", + "\n", + "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance. Then, if using Node.js, you can set your credentials as environment variables:\n", + "\n", + "```bash\n", + "AZURE_OPENAI_API_INSTANCE_NAME=\n", + "AZURE_OPENAI_API_DEPLOYMENT_NAME=\n", + "AZURE_OPENAI_API_KEY=\n", + "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain AzureChatOpenAI integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureChatOpenAI } from \"@langchain/openai\" \n", + "\n", + "const llm = new AzureChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", + " azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION, // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9qrWKByvVrzWMxSn8joRZAklHoB32\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9qrWR7WiNjZ3leSG4Wd77cnKEVivv\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-9qrWR7WiNjZ3leSG4Wd77cnKEVivv\",\n", - " \"content\": \"Ich liebe das Programmieren.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 6,\n", - " \"promptTokens\": 26,\n", - " \"totalTokens\": 32\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 26,\n", - " \"output_tokens\": 6,\n", - " \"total_tokens\": 32\n", - " }\n", - "}\n" - ] + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Using Azure Managed Identity\n", + "\n", + "If you're using Azure Managed Identity, you can configure the credentials like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "d7f47b2a", + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " DefaultAzureCredential,\n", + " getBearerTokenProvider,\n", + "} from \"@azure/identity\";\n", + "import { AzureChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const credentials = new DefaultAzureCredential();\n", + "const azureADTokenProvider = getBearerTokenProvider(\n", + " credentials,\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ");\n", + "\n", + "const llmWithManagedIdentity = new AzureChatOpenAI({\n", + " azureADTokenProvider,\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "6a889856", + "metadata": {}, + "source": [ + "## Using a different domain\n", + "\n", + "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", + "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "ace7f876", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llmWithDifferentDomain = new AzureChatOpenAI({\n", + " temperature: 0.9,\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " azureOpenAIBasePath:\n", + " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "092e7a38", + "metadata": {}, + "source": [ + "## Custom headers\n", + "\n", + "You can specify custom headers by passing in a `configuration` field:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43503a94", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llmWithCustomHeaders = new AzureChatOpenAI({\n", + " azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", + " azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION, // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " configuration: {\n", + " defaultHeaders: {\n", + " \"x-custom-header\": `SOME_VALUE`,\n", + " },\n", + " },\n", + "});\n", + "\n", + "await llmWithCustomHeaders.invoke(\"Hi there!\");" + ] + }, + { + "cell_type": "markdown", + "id": "1a6b849d", + "metadata": {}, + "source": [ + "The `configuration` field also accepts other `ClientOptions` parameters accepted by the official SDK.\n", + "\n", + "**Note:** The specific header `api-key` currently cannot be overridden in this manner and will pass through the value from `azureOpenAIApiKey`." + ] + }, + { + "cell_type": "markdown", + "id": "0ac0310c", + "metadata": {}, + "source": [ + "## Migration from Azure OpenAI SDK\n", + "\n", + "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", + "\n", + "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "\n", + " @langchain/openai\n", + "\n", + "\n", + "```\n", + "\n", + "```bash\n", + "npm uninstall @langchain/azure-openai\n", + "```\n", + "\n", + " \n", + "2. Update your imports to use the new `AzureChatOpenAI` class from the `@langchain/openai` package:\n", + " ```typescript\n", + " import { AzureChatOpenAI } from \"@langchain/openai\";\n", + " ```\n", + "3. Update your code to use the new `AzureChatOpenAI` class and pass the required parameters:\n", + "\n", + " ```typescript\n", + " const model = new AzureChatOpenAI({\n", + " azureOpenAIApiKey: \"\",\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + " });\n", + " ```\n", + "\n", + " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", + "\n", + " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", + "\n", + " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all AzureChatOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", - "metadata": {}, - "source": [ - "## Using Azure Managed Identity\n", - "\n", - "If you're using Azure Managed Identity, you can configure the credentials like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "d7f47b2a", - "metadata": {}, - "outputs": [], - "source": [ - "import {\n", - " DefaultAzureCredential,\n", - " getBearerTokenProvider,\n", - "} from \"@azure/identity\";\n", - "import { AzureChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const credentials = new DefaultAzureCredential();\n", - "const azureADTokenProvider = getBearerTokenProvider(\n", - " credentials,\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ");\n", - "\n", - "const llmWithManagedIdentity = new AzureChatOpenAI({\n", - " azureADTokenProvider,\n", - " azureOpenAIApiInstanceName: \"\",\n", - " azureOpenAIApiDeploymentName: \"\",\n", - " azureOpenAIApiVersion: \"\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "6a889856", - "metadata": {}, - "source": [ - "## Using a different domain\n", - "\n", - "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", - "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "ace7f876", - "metadata": {}, - "outputs": [], - "source": [ - "import { AzureChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llmWithDifferentDomain = new AzureChatOpenAI({\n", - " temperature: 0.9,\n", - " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", - " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", - " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", - " azureOpenAIBasePath:\n", - " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", - "});\n" - ] - }, - { - "cell_type": "markdown", - "id": "092e7a38", - "metadata": {}, - "source": [ - "## Custom headers\n", - "\n", - "You can specify custom headers by passing in a `configuration` field:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "43503a94", - "metadata": {}, - "outputs": [], - "source": [ - "import { AzureChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llmWithCustomHeaders = new AzureChatOpenAI({\n", - " azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", - " azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", - " azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", - " azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION, // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", - " configuration: {\n", - " defaultHeaders: {\n", - " \"x-custom-header\": `SOME_VALUE`,\n", - " },\n", - " },\n", - "});\n", - "\n", - "await llmWithCustomHeaders.invoke(\"Hi there!\");" - ] - }, - { - "cell_type": "markdown", - "id": "1a6b849d", - "metadata": {}, - "source": [ - "The `configuration` field also accepts other `ClientOptions` parameters accepted by the official SDK.\n", - "\n", - "**Note:** The specific header `api-key` currently cannot be overridden in this manner and will pass through the value from `azureOpenAIApiKey`." - ] - }, - { - "cell_type": "markdown", - "id": "0ac0310c", - "metadata": {}, - "source": [ - "## Migration from Azure OpenAI SDK\n", - "\n", - "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", - "\n", - "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "\n", - " @langchain/openai\n", - "\n", - "\n", - "```\n", - "\n", - "```bash\n", - "npm uninstall @langchain/azure-openai\n", - "```\n", - "\n", - " \n", - "2. Update your imports to use the new `AzureChatOpenAI` class from the `@langchain/openai` package:\n", - " ```typescript\n", - " import { AzureChatOpenAI } from \"@langchain/openai\";\n", - " ```\n", - "3. Update your code to use the new `AzureChatOpenAI` class and pass the required parameters:\n", - "\n", - " ```typescript\n", - " const model = new AzureChatOpenAI({\n", - " azureOpenAIApiKey: \"\",\n", - " azureOpenAIApiInstanceName: \"\",\n", - " azureOpenAIApiDeploymentName: \"\",\n", - " azureOpenAIApiVersion: \"\",\n", - " });\n", - " ```\n", - "\n", - " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", - "\n", - " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", - "\n", - " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all AzureChatOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/baidu_qianfan.mdx b/docs/core_docs/docs/integrations/chat/baidu_qianfan.mdx index 7e9db88dcc36..21e182606446 100644 --- a/docs/core_docs/docs/integrations/chat/baidu_qianfan.mdx +++ b/docs/core_docs/docs/integrations/chat/baidu_qianfan.mdx @@ -40,5 +40,5 @@ import ChatBaiduQianfanStreamExample from "@examples/models/chat/chat_stream_bai ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/baidu_wenxin.mdx b/docs/core_docs/docs/integrations/chat/baidu_wenxin.mdx index 3ab0c963436b..b0fa177e7767 100644 --- a/docs/core_docs/docs/integrations/chat/baidu_wenxin.mdx +++ b/docs/core_docs/docs/integrations/chat/baidu_wenxin.mdx @@ -35,5 +35,5 @@ import Wenxin from "@examples/models/chat/integration_baiduwenxin.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/bedrock.ipynb b/docs/core_docs/docs/integrations/chat/bedrock.ipynb index 389fe1149020..dbe56fc26d1a 100644 --- a/docs/core_docs/docs/integrations/chat/bedrock.ipynb +++ b/docs/core_docs/docs/integrations/chat/bedrock.ipynb @@ -1,303 +1,303 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Amazon Bedrock\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# BedrockChat\n", - "\n", - "[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. \n", - "\n", - "This will help you getting started with Amazon Bedrock [chat models](/docs/concepts/#chat-models). For detailed documentation of all `BedrockChat` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_bedrock.BedrockChat.html).\n", - "\n", - "```{=mdx}\n", - ":::tip\n", - "The newer [`ChatBedrockConverse` chat model is now available via the dedicated `@langchain/aws`](/docs/integrations/chat/bedrock_converse) integration package. Use [tool calling](/docs/concepts#functiontool-calling) with more models with this package.\n", - ":::\n", - "```\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/bedrock/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`BedrockChat`](https://api.js.langchain.com/classes/langchain_community_chat_models_bedrock.BedrockChat.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html) to sign up for AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by [following these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html).\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `BedrockChat` integration lives in the `@langchain/community` package. You'll also need to install several official AWS packages as peer dependencies:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core @aws-crypto/sha256-js @aws-sdk/credential-provider-node @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", - "\n", - "```\n", - "\n", - "You can also use BedrockChat in web environments such as Edge functions or Cloudflare Workers by omitting the @aws-sdk/credential-provider-node dependency and using the web entrypoint:\n", - "\n", - "```{=mdx}\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core @aws-crypto/sha256-js @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Currently, only Anthropic, Cohere, and Mistral models are supported with the chat model integration. For foundation models from AI21 or Amazon, see the [text generation Bedrock variant](/docs/integrations/llms/bedrock/).\n", - "\n", - "There are a few different ways to authenticate with AWS - the below examples rely on an access key, secret access key and region set in your environment variables:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { BedrockChat } from \"@langchain/community/chat_models/bedrock\";\n", - "\n", - "const llm = new BedrockChat({\n", - " model: \"anthropic.claude-3-5-sonnet-20240620-v1:0\",\n", - " region: process.env.BEDROCK_AWS_REGION,\n", - " credentials: {\n", - " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,\n", - " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,\n", - " },\n", - " // endpointUrl: \"custom.amazonaws.com\",\n", - " // modelKwargs: {\n", - " // anthropic_version: \"bedrock-2023-05-31\",\n", - " // },\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"J'adore la programmation.\",\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_bdrk_01RwhfuWkLLcp7ks1X3u8bwd\"\n", - " },\n", - " \"response_metadata\": {\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-5-sonnet-20240620\",\n", - " \"stop_reason\": \"end_turn\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 29,\n", - " \"output_tokens\": 11\n", - " }\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Amazon Bedrock\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# BedrockChat\n", + "\n", + "[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. \n", + "\n", + "This will help you getting started with Amazon Bedrock [chat models](/docs/concepts/chat_models). For detailed documentation of all `BedrockChat` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_bedrock.BedrockChat.html).\n", + "\n", + "```{=mdx}\n", + ":::tip\n", + "The newer [`ChatBedrockConverse` chat model is now available via the dedicated `@langchain/aws`](/docs/integrations/chat/bedrock_converse) integration package. Use [tool calling](/docs/concepts/tool_calling) with more models with this package.\n", + ":::\n", + "```\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/bedrock/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`BedrockChat`](https://api.js.langchain.com/classes/langchain_community_chat_models_bedrock.BedrockChat.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html) to sign up for AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by [following these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html).\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `BedrockChat` integration lives in the `@langchain/community` package. You'll also need to install several official AWS packages as peer dependencies:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core @aws-crypto/sha256-js @aws-sdk/credential-provider-node @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", + "\n", + "```\n", + "\n", + "You can also use BedrockChat in web environments such as Edge functions or Cloudflare Workers by omitting the @aws-sdk/credential-provider-node dependency and using the web entrypoint:\n", + "\n", + "```{=mdx}\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core @aws-crypto/sha256-js @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"Here's the German translation:\\n\\nIch liebe Programmieren.\",\n", - " \"additional_kwargs\": {\n", - " \"id\": \"msg_bdrk_01RtUH3qrYJPUdutYoxphFkv\"\n", - " },\n", - " \"response_metadata\": {\n", - " \"type\": \"message\",\n", - " \"role\": \"assistant\",\n", - " \"model\": \"claude-3-5-sonnet-20240620\",\n", - " \"stop_reason\": \"end_turn\",\n", - " \"stop_sequence\": null,\n", - " \"usage\": {\n", - " \"input_tokens\": 23,\n", - " \"output_tokens\": 18\n", - " }\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - "}\n" - ] + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Currently, only Anthropic, Cohere, and Mistral models are supported with the chat model integration. For foundation models from AI21 or Amazon, see the [text generation Bedrock variant](/docs/integrations/llms/bedrock/).\n", + "\n", + "There are a few different ways to authenticate with AWS - the below examples rely on an access key, secret access key and region set in your environment variables:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { BedrockChat } from \"@langchain/community/chat_models/bedrock\";\n", + "\n", + "const llm = new BedrockChat({\n", + " model: \"anthropic.claude-3-5-sonnet-20240620-v1:0\",\n", + " region: process.env.BEDROCK_AWS_REGION,\n", + " credentials: {\n", + " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,\n", + " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,\n", + " },\n", + " // endpointUrl: \"custom.amazonaws.com\",\n", + " // modelKwargs: {\n", + " // anthropic_version: \"bedrock-2023-05-31\",\n", + " // },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_bdrk_01RwhfuWkLLcp7ks1X3u8bwd\"\n", + " },\n", + " \"response_metadata\": {\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-5-sonnet-20240620\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 29,\n", + " \"output_tokens\": 11\n", + " }\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Here's the German translation:\\n\\nIch liebe Programmieren.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_bdrk_01RtUH3qrYJPUdutYoxphFkv\"\n", + " },\n", + " \"response_metadata\": {\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-5-sonnet-20240620\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 23,\n", + " \"output_tokens\": 18\n", + " }\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "Tool calling with Bedrock models works in a similar way to [other models](/docs/how_to/tool_calling), but note that not all Bedrock models support tool calling. Please refer to the [AWS model documentation](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html) for more information." + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `BedrockChat` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_bedrock.BedrockChat.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", - "metadata": {}, - "source": [ - "## Tool calling\n", - "\n", - "Tool calling with Bedrock models works in a similar way to [other models](/docs/how_to/tool_calling), but note that not all Bedrock models support tool calling. Please refer to the [AWS model documentation](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html) for more information." - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `BedrockChat` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_bedrock.BedrockChat.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/bedrock_converse.ipynb b/docs/core_docs/docs/integrations/chat/bedrock_converse.ipynb index c07a99664b98..b4b4fca65c8f 100644 --- a/docs/core_docs/docs/integrations/chat/bedrock_converse.ipynb +++ b/docs/core_docs/docs/integrations/chat/bedrock_converse.ipynb @@ -1,303 +1,303 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Amazon Bedrock Converse\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatBedrockConverse\n", - "\n", - "[Amazon Bedrock Converse](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html) is a fully managed service that makes Foundation Models (FMs) from leading AI startups and Amazon available via an API. You can choose from a wide range of FMs to find the model that is best suited for your use case. It provides a unified conversational interface for Bedrock models, but does not yet have feature parity for all functionality within the older [Bedrock model service](/docs/integrations/chat/bedrock).\n", - "\n", - "This will help you getting started with Amazon Bedrock Converse [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatBedrockConverse` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.ChatBedrockConverse.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/bedrock/#beta-bedrock-converse-api) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`ChatBedrockConverse`](https://api.js.langchain.com/classes/langchain_aws.ChatBedrockConverse.html) | [`@langchain/aws`](https://npmjs.com/@langchain/aws) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/aws?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/aws?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html) to sign up for AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by [following these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html).\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `ChatBedrockConverse` integration lives in the `@langchain/aws` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/aws @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions.\n", - "\n", - "There are a few different ways to authenticate with AWS - the below examples rely on an access key, secret access key and region set in your environment variables:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatBedrockConverse } from \"@langchain/aws\";\n", - "\n", - "const llm = new ChatBedrockConverse({\n", - " model: \"anthropic.claude-3-5-sonnet-20240620-v1:0\",\n", - " region: process.env.BEDROCK_AWS_REGION,\n", - " credentials: {\n", - " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,\n", - " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,\n", - " },\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"f5dc5791-224e-4fe5-ba2e-4cc51d9e7795\",\n", - " \"content\": \"J'adore la programmation.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"$metadata\": {\n", - " \"httpStatusCode\": 200,\n", - " \"requestId\": \"f5dc5791-224e-4fe5-ba2e-4cc51d9e7795\",\n", - " \"attempts\": 1,\n", - " \"totalRetryDelay\": 0\n", - " },\n", - " \"metrics\": {\n", - " \"latencyMs\": 584\n", - " },\n", - " \"stopReason\": \"end_turn\",\n", - " \"usage\": {\n", - " \"inputTokens\": 29,\n", - " \"outputTokens\": 11,\n", - " \"totalTokens\": 40\n", - " }\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 29,\n", - " \"output_tokens\": 11,\n", - " \"total_tokens\": 40\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Amazon Bedrock Converse\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatBedrockConverse\n", + "\n", + "[Amazon Bedrock Converse](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html) is a fully managed service that makes Foundation Models (FMs) from leading AI startups and Amazon available via an API. You can choose from a wide range of FMs to find the model that is best suited for your use case. It provides a unified conversational interface for Bedrock models, but does not yet have feature parity for all functionality within the older [Bedrock model service](/docs/integrations/chat/bedrock).\n", + "\n", + "This will help you getting started with Amazon Bedrock Converse [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatBedrockConverse` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.ChatBedrockConverse.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/bedrock/#beta-bedrock-converse-api) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`ChatBedrockConverse`](https://api.js.langchain.com/classes/langchain_aws.ChatBedrockConverse.html) | [`@langchain/aws`](https://npmjs.com/@langchain/aws) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/aws?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/aws?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html) to sign up for AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by [following these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html).\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `ChatBedrockConverse` integration lives in the `@langchain/aws` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/aws @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"c6401e11-8f85-4a71-8e15-4856d55aef78\",\n", - " \"content\": \"Here's the German translation:\\n\\nIch liebe Programmieren.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"$metadata\": {\n", - " \"httpStatusCode\": 200,\n", - " \"requestId\": \"c6401e11-8f85-4a71-8e15-4856d55aef78\",\n", - " \"attempts\": 1,\n", - " \"totalRetryDelay\": 0\n", - " },\n", - " \"metrics\": {\n", - " \"latencyMs\": 760\n", - " },\n", - " \"stopReason\": \"end_turn\",\n", - " \"usage\": {\n", - " \"inputTokens\": 23,\n", - " \"outputTokens\": 18,\n", - " \"totalTokens\": 41\n", - " }\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 23,\n", - " \"output_tokens\": 18,\n", - " \"total_tokens\": 41\n", - " }\n", - "}\n" - ] + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions.\n", + "\n", + "There are a few different ways to authenticate with AWS - the below examples rely on an access key, secret access key and region set in your environment variables:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatBedrockConverse } from \"@langchain/aws\";\n", + "\n", + "const llm = new ChatBedrockConverse({\n", + " model: \"anthropic.claude-3-5-sonnet-20240620-v1:0\",\n", + " region: process.env.BEDROCK_AWS_REGION,\n", + " credentials: {\n", + " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,\n", + " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,\n", + " },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"f5dc5791-224e-4fe5-ba2e-4cc51d9e7795\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"$metadata\": {\n", + " \"httpStatusCode\": 200,\n", + " \"requestId\": \"f5dc5791-224e-4fe5-ba2e-4cc51d9e7795\",\n", + " \"attempts\": 1,\n", + " \"totalRetryDelay\": 0\n", + " },\n", + " \"metrics\": {\n", + " \"latencyMs\": 584\n", + " },\n", + " \"stopReason\": \"end_turn\",\n", + " \"usage\": {\n", + " \"inputTokens\": 29,\n", + " \"outputTokens\": 11,\n", + " \"totalTokens\": 40\n", + " }\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 29,\n", + " \"output_tokens\": 11,\n", + " \"total_tokens\": 40\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"c6401e11-8f85-4a71-8e15-4856d55aef78\",\n", + " \"content\": \"Here's the German translation:\\n\\nIch liebe Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"$metadata\": {\n", + " \"httpStatusCode\": 200,\n", + " \"requestId\": \"c6401e11-8f85-4a71-8e15-4856d55aef78\",\n", + " \"attempts\": 1,\n", + " \"totalRetryDelay\": 0\n", + " },\n", + " \"metrics\": {\n", + " \"latencyMs\": 760\n", + " },\n", + " \"stopReason\": \"end_turn\",\n", + " \"usage\": {\n", + " \"inputTokens\": 23,\n", + " \"outputTokens\": 18,\n", + " \"totalTokens\": 41\n", + " }\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 23,\n", + " \"output_tokens\": 18,\n", + " \"total_tokens\": 41\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "Tool calling with Bedrock models works in a similar way to [other models](/docs/how_to/tool_calling), but note that not all Bedrock models support tool calling. Please refer to the [AWS model documentation](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html) for more information." + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `ChatBedrockConverse` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_aws.ChatBedrockConverse.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", - "metadata": {}, - "source": [ - "## Tool calling\n", - "\n", - "Tool calling with Bedrock models works in a similar way to [other models](/docs/how_to/tool_calling), but note that not all Bedrock models support tool calling. Please refer to the [AWS model documentation](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html) for more information." - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `ChatBedrockConverse` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_aws.ChatBedrockConverse.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/cloudflare_workersai.ipynb b/docs/core_docs/docs/integrations/chat/cloudflare_workersai.ipynb index 7202104efcd1..70be0217176f 100644 --- a/docs/core_docs/docs/integrations/chat/cloudflare_workersai.ipynb +++ b/docs/core_docs/docs/integrations/chat/cloudflare_workersai.ipynb @@ -1,285 +1,285 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Cloudflare Workers AI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatCloudflareWorkersAI\n", - "\n", - "[Workers AI](https://developers.cloudflare.com/workers-ai/) allows you to run machine learning models, on the Cloudflare network, from your own code.\n", - "\n", - "This will help you getting started with Cloudflare Workers AI [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatCloudflareWorkersAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_cloudflare.ChatCloudflareWorkersAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | PY support | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`ChatCloudflareWorkersAI`](https://api.js.langchain.com/classes/langchain_cloudflare.ChatCloudflareWorkersAI.html) | [`@langchain/cloudflare`](https://npmjs.com/@langchain/cloudflare) | ❌ | ✅ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cloudflare?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cloudflare?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "To access Cloudflare Workers AI models you'll need to create a Cloudflare account, get an API key, and install the `@langchain/cloudflare` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head [to this page](https://developers.cloudflare.com/workers-ai/) to sign up to Cloudflare and generate an API key. Once you've done this, note your `CLOUDFLARE_ACCOUNT_ID` and `CLOUDFLARE_API_TOKEN`.\n", - "\n", - "Passing a binding within a Cloudflare Worker is not yet supported.\n", - "\n", - "### Installation\n", - "\n", - "The LangChain ChatCloudflareWorkersAI integration lives in the `@langchain/cloudflare` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/cloudflare @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "fb6997f0", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "// @ts-expect-error Deno is not recognized\n", - "const CLOUDFLARE_ACCOUNT_ID = Deno.env.get(\"CLOUDFLARE_ACCOUNT_ID\");\n", - "// @ts-expect-error Deno is not recognized\n", - "const CLOUDFLARE_API_TOKEN = Deno.env.get(\"CLOUDFLARE_API_TOKEN\");" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatCloudflareWorkersAI } from \"@langchain/cloudflare\";\n", - "\n", - "const llm = new ChatCloudflareWorkersAI({\n", - " model: \"@cf/meta/llama-2-7b-chat-int8\", // Default value\n", - " cloudflareAccountId: CLOUDFLARE_ACCOUNT_ID,\n", - " cloudflareApiToken: CLOUDFLARE_API_TOKEN,\n", - " // Pass a custom base URL to use Cloudflare AI Gateway\n", - " // baseUrl: `https://gateway.ai.cloudflare.com/v1/{YOUR_ACCOUNT_ID}/{GATEWAY_NAME}/workers-ai/`,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m'I can help with that! The translation of \"I love programming\" in French is:\\n'\u001b[39m +\n", - " \u001b[32m\"\\n\"\u001b[39m +\n", - " \u001b[32m`\"J'adore le programmati`\u001b[39m... 4 more characters,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m'I can help with that! The translation of \"I love programming\" in French is:\\n'\u001b[39m +\n", - " \u001b[32m\"\\n\"\u001b[39m +\n", - " \u001b[32m`\"J'adore le programmati`\u001b[39m... 4 more characters,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Cloudflare Workers AI\n", + "---" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "I can help with that! The translation of \"I love programming\" in French is:\n", - "\n", - "\"J'adore le programmation.\"\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatCloudflareWorkersAI\n", + "\n", + "[Workers AI](https://developers.cloudflare.com/workers-ai/) allows you to run machine learning models, on the Cloudflare network, from your own code.\n", + "\n", + "This will help you getting started with Cloudflare Workers AI [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatCloudflareWorkersAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_cloudflare.ChatCloudflareWorkersAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | PY support | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`ChatCloudflareWorkersAI`](https://api.js.langchain.com/classes/langchain_cloudflare.ChatCloudflareWorkersAI.html) | [`@langchain/cloudflare`](https://npmjs.com/@langchain/cloudflare) | ❌ | ✅ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cloudflare?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cloudflare?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "To access Cloudflare Workers AI models you'll need to create a Cloudflare account, get an API key, and install the `@langchain/cloudflare` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head [to this page](https://developers.cloudflare.com/workers-ai/) to sign up to Cloudflare and generate an API key. Once you've done this, note your `CLOUDFLARE_ACCOUNT_ID` and `CLOUDFLARE_API_TOKEN`.\n", + "\n", + "Passing a binding within a Cloudflare Worker is not yet supported.\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatCloudflareWorkersAI integration lives in the `@langchain/cloudflare` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/cloudflare @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Das Programmieren ist für mich sehr Valent sein!\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Das Programmieren ist für mich sehr Valent sein!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "code", + "execution_count": 1, + "id": "fb6997f0", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "// @ts-expect-error Deno is not recognized\n", + "const CLOUDFLARE_ACCOUNT_ID = Deno.env.get(\"CLOUDFLARE_ACCOUNT_ID\");\n", + "// @ts-expect-error Deno is not recognized\n", + "const CLOUDFLARE_API_TOKEN = Deno.env.get(\"CLOUDFLARE_API_TOKEN\");" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatCloudflareWorkersAI } from \"@langchain/cloudflare\";\n", + "\n", + "const llm = new ChatCloudflareWorkersAI({\n", + " model: \"@cf/meta/llama-2-7b-chat-int8\", // Default value\n", + " cloudflareAccountId: CLOUDFLARE_ACCOUNT_ID,\n", + " cloudflareApiToken: CLOUDFLARE_API_TOKEN,\n", + " // Pass a custom base URL to use Cloudflare AI Gateway\n", + " // baseUrl: `https://gateway.ai.cloudflare.com/v1/{YOUR_ACCOUNT_ID}/{GATEWAY_NAME}/workers-ai/`,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m'I can help with that! The translation of \"I love programming\" in French is:\\n'\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m`\"J'adore le programmati`\u001b[39m... 4 more characters,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m'I can help with that! The translation of \"I love programming\" in French is:\\n'\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m`\"J'adore le programmati`\u001b[39m... 4 more characters,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I can help with that! The translation of \"I love programming\" in French is:\n", + "\n", + "\"J'adore le programmation.\"\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"Das Programmieren ist für mich sehr Valent sein!\"\u001b[39m,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"Das Programmieren ist für mich sehr Valent sein!\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {},\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `ChatCloudflareWorkersAI` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cloudflare.ChatCloudflareWorkersAI.html" ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `ChatCloudflareWorkersAI` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cloudflare.ChatCloudflareWorkersAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/cohere.ipynb b/docs/core_docs/docs/integrations/chat/cohere.ipynb index fd14be747f9f..97baaf8081e0 100644 --- a/docs/core_docs/docs/integrations/chat/cohere.ipynb +++ b/docs/core_docs/docs/integrations/chat/cohere.ipynb @@ -1,1670 +1,1670 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Cohere\n", - "lc_docs_skip_validation: true\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatCohere\n", - "\n", - "[Cohere](https://cohere.com/) is a Canadian startup that provides natural language processing models that help companies improve human-machine interactions.\n", - "\n", - "This will help you getting started with Cohere [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatCohere` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/cohere) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatCohere](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html) | [`@langchain/cohere`](https://www.npmjs.com/package/@langchain/cohere) | ❌ | beta | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "In order to use the LangChain.js Cohere integration you'll need an API key.\n", - "You can sign up for a Cohere account and create an API key [here](https://dashboard.cohere.com/welcome/register).\n", - "\n", - "You'll first need to install the [`@langchain/cohere`](https://www.npmjs.com/package/@langchain/cohere) package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [Cohere's website](https://dashboard.cohere.com/welcome/register) to sign up to Cohere and generate an API key. Once you've done this set the `COHERE_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export COHERE_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain ChatCohere integration lives in the `@langchain/cohere` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/cohere @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatCohere } from \"@langchain/cohere\" \n", - "\n", - "const llm = new ChatCohere({\n", - " model: \"command-r-plus\",\n", - " temperature: 0,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "cd31a8b7", - "metadata": {}, - "source": [ - "### Custom client for Cohere on Azure, Cohere on AWS Bedrock, and Standalone Cohere Instance.\n", - "\n", - "We can instantiate a custom `CohereClient` and pass it to the ChatCohere constructor.\n", - "\n", - "**Note:** If a custom client is provided both `COHERE_API_KEY` environment variable and `apiKey` parameter in the constructor will be ignored." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d92326b8", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatCohere } from \"@langchain/cohere\";\n", - "import { CohereClient } from \"cohere-ai\";\n", - "\n", - "const client = new CohereClient({\n", - " token: \"\",\n", - " environment: \"\", //optional\n", - " // other params\n", - "});\n", - "\n", - "const llmWithCustomClient = new ChatCohere({\n", - " client,\n", - " // other params...\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"J'adore programmer.\",\n", - " \"additional_kwargs\": {\n", - " \"response_id\": \"0056057a-6075-4436-b75a-b9455ac39f74\",\n", - " \"generationId\": \"3a0985db-92ff-41d8-b6b9-b7b77e300f3b\",\n", - " \"chatHistory\": [\n", - " {\n", - " \"role\": \"SYSTEM\",\n", - " \"message\": \"You are a helpful assistant that translates English to French. Translate the user sentence.\"\n", - " },\n", - " {\n", - " \"role\": \"USER\",\n", - " \"message\": \"I love programming.\"\n", - " },\n", - " {\n", - " \"role\": \"CHATBOT\",\n", - " \"message\": \"J'adore programmer.\"\n", - " }\n", - " ],\n", - " \"finishReason\": \"COMPLETE\",\n", - " \"meta\": {\n", - " \"apiVersion\": {\n", - " \"version\": \"1\"\n", - " },\n", - " \"billedUnits\": {\n", - " \"inputTokens\": 20,\n", - " \"outputTokens\": 5\n", - " },\n", - " \"tokens\": {\n", - " \"inputTokens\": 89,\n", - " \"outputTokens\": 5\n", - " }\n", - " }\n", - " },\n", - " \"response_metadata\": {\n", - " \"estimatedTokenUsage\": {\n", - " \"completionTokens\": 5,\n", - " \"promptTokens\": 89,\n", - " \"totalTokens\": 94\n", - " },\n", - " \"response_id\": \"0056057a-6075-4436-b75a-b9455ac39f74\",\n", - " \"generationId\": \"3a0985db-92ff-41d8-b6b9-b7b77e300f3b\",\n", - " \"chatHistory\": [\n", - " {\n", - " \"role\": \"SYSTEM\",\n", - " \"message\": \"You are a helpful assistant that translates English to French. Translate the user sentence.\"\n", - " },\n", - " {\n", - " \"role\": \"USER\",\n", - " \"message\": \"I love programming.\"\n", - " },\n", - " {\n", - " \"role\": \"CHATBOT\",\n", - " \"message\": \"J'adore programmer.\"\n", - " }\n", - " ],\n", - " \"finishReason\": \"COMPLETE\",\n", - " \"meta\": {\n", - " \"apiVersion\": {\n", - " \"version\": \"1\"\n", - " },\n", - " \"billedUnits\": {\n", - " \"inputTokens\": 20,\n", - " \"outputTokens\": 5\n", - " },\n", - " \"tokens\": {\n", - " \"inputTokens\": 89,\n", - " \"outputTokens\": 5\n", - " }\n", - " }\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 89,\n", - " \"output_tokens\": 5,\n", - " \"total_tokens\": 94\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Cohere\n", + "lc_docs_skip_validation: true\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore programmer.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatCohere\n", + "\n", + "[Cohere](https://cohere.com/) is a Canadian startup that provides natural language processing models that help companies improve human-machine interactions.\n", + "\n", + "This will help you getting started with Cohere [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatCohere` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/cohere) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatCohere](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html) | [`@langchain/cohere`](https://www.npmjs.com/package/@langchain/cohere) | ❌ | beta | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "In order to use the LangChain.js Cohere integration you'll need an API key.\n", + "You can sign up for a Cohere account and create an API key [here](https://dashboard.cohere.com/welcome/register).\n", + "\n", + "You'll first need to install the [`@langchain/cohere`](https://www.npmjs.com/package/@langchain/cohere) package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [Cohere's website](https://dashboard.cohere.com/welcome/register) to sign up to Cohere and generate an API key. Once you've done this set the `COHERE_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export COHERE_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatCohere integration lives in the `@langchain/cohere` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/cohere @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"Ich liebe Programmieren.\",\n", - " \"additional_kwargs\": {\n", - " \"response_id\": \"271e1439-7220-40fa-953d-c9f2947e451a\",\n", - " \"generationId\": \"f99970a4-7b1c-4d76-a73a-4467a1db759c\",\n", - " \"chatHistory\": [\n", - " {\n", - " \"role\": \"SYSTEM\",\n", - " \"message\": \"You are a helpful assistant that translates English to German.\"\n", - " },\n", - " {\n", - " \"role\": \"USER\",\n", - " \"message\": \"I love programming.\"\n", - " },\n", - " {\n", - " \"role\": \"CHATBOT\",\n", - " \"message\": \"Ich liebe Programmieren.\"\n", - " }\n", - " ],\n", - " \"finishReason\": \"COMPLETE\",\n", - " \"meta\": {\n", - " \"apiVersion\": {\n", - " \"version\": \"1\"\n", - " },\n", - " \"billedUnits\": {\n", - " \"inputTokens\": 15,\n", - " \"outputTokens\": 6\n", - " },\n", - " \"tokens\": {\n", - " \"inputTokens\": 84,\n", - " \"outputTokens\": 6\n", - " }\n", - " }\n", - " },\n", - " \"response_metadata\": {\n", - " \"estimatedTokenUsage\": {\n", - " \"completionTokens\": 6,\n", - " \"promptTokens\": 84,\n", - " \"totalTokens\": 90\n", - " },\n", - " \"response_id\": \"271e1439-7220-40fa-953d-c9f2947e451a\",\n", - " \"generationId\": \"f99970a4-7b1c-4d76-a73a-4467a1db759c\",\n", - " \"chatHistory\": [\n", - " {\n", - " \"role\": \"SYSTEM\",\n", - " \"message\": \"You are a helpful assistant that translates English to German.\"\n", - " },\n", - " {\n", - " \"role\": \"USER\",\n", - " \"message\": \"I love programming.\"\n", - " },\n", - " {\n", - " \"role\": \"CHATBOT\",\n", - " \"message\": \"Ich liebe Programmieren.\"\n", - " }\n", - " ],\n", - " \"finishReason\": \"COMPLETE\",\n", - " \"meta\": {\n", - " \"apiVersion\": {\n", - " \"version\": \"1\"\n", - " },\n", - " \"billedUnits\": {\n", - " \"inputTokens\": 15,\n", - " \"outputTokens\": 6\n", - " },\n", - " \"tokens\": {\n", - " \"inputTokens\": 84,\n", - " \"outputTokens\": 6\n", - " }\n", - " }\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 84,\n", - " \"output_tokens\": 6,\n", - " \"total_tokens\": 90\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "4fecf4e4", - "metadata": {}, - "source": [ - "## RAG\n", - "\n", - "Cohere also comes out of the box with RAG support.\n", - "You can pass in documents as context to the API request and Cohere's models will use them when generating responses." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "74d6320e", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Harrison worked at Kensho as an engineer for 3 years.\n" - ] - } - ], - "source": [ - "import { ChatCohere } from \"@langchain/cohere\";\n", - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const llmForRag = new ChatCohere({\n", - " apiKey: process.env.COHERE_API_KEY, // Default\n", - "});\n", - "\n", - "const documents = [\n", - " {\n", - " title: \"Harrison's work\",\n", - " snippet: \"Harrison worked at Kensho as an engineer.\",\n", - " },\n", - " {\n", - " title: \"Harrison's work duration\",\n", - " snippet: \"Harrison worked at Kensho for 3 years.\",\n", - " },\n", - " {\n", - " title: \"Polar berars in the Appalachian Mountains\",\n", - " snippet:\n", - " \"Polar bears have surprisingly adapted to the Appalachian Mountains, thriving in the diverse, forested terrain despite their traditional arctic habitat. This unique situation has sparked significant interest and study in climate adaptability and wildlife behavior.\",\n", - " },\n", - "];\n", - "\n", - "const ragResponse = await llmForRag.invoke(\n", - " [new HumanMessage(\"Where did Harrison work and for how long?\")],\n", - " {\n", - " documents,\n", - " }\n", - ");\n", - "console.log(ragResponse.content);" - ] - }, - { - "cell_type": "markdown", - "id": "aa13bae8", - "metadata": {}, - "source": [ - "## Connectors\n", - "\n", - "The API also allows for other connections which are not static documents.\n", - "An example of this is their `web-search` connector which allows you to pass in a query and the API will search the web for relevant documents.\n", - "The example below demonstrates how to use this feature." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "478f7c9e", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\" \n", + "\n", + "const llm = new ChatCohere({\n", + " model: \"command-r-plus\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "cd31a8b7", + "metadata": {}, + "source": [ + "### Custom client for Cohere on Azure, Cohere on AWS Bedrock, and Standalone Cohere Instance.\n", + "\n", + "We can instantiate a custom `CohereClient` and pass it to the ChatCohere constructor.\n", + "\n", + "**Note:** If a custom client is provided both `COHERE_API_KEY` environment variable and `apiKey` parameter in the constructor will be ignored." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d92326b8", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\";\n", + "import { CohereClient } from \"cohere-ai\";\n", + "\n", + "const client = new CohereClient({\n", + " token: \"\",\n", + " environment: \"\", //optional\n", + " // other params\n", + "});\n", + "\n", + "const llmWithCustomClient = new ChatCohere({\n", + " client,\n", + " // other params...\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"J'adore programmer.\",\n", + " \"additional_kwargs\": {\n", + " \"response_id\": \"0056057a-6075-4436-b75a-b9455ac39f74\",\n", + " \"generationId\": \"3a0985db-92ff-41d8-b6b9-b7b77e300f3b\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"SYSTEM\",\n", + " \"message\": \"You are a helpful assistant that translates English to French. Translate the user sentence.\"\n", + " },\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"I love programming.\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"J'adore programmer.\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 20,\n", + " \"outputTokens\": 5\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 89,\n", + " \"outputTokens\": 5\n", + " }\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"estimatedTokenUsage\": {\n", + " \"completionTokens\": 5,\n", + " \"promptTokens\": 89,\n", + " \"totalTokens\": 94\n", + " },\n", + " \"response_id\": \"0056057a-6075-4436-b75a-b9455ac39f74\",\n", + " \"generationId\": \"3a0985db-92ff-41d8-b6b9-b7b77e300f3b\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"SYSTEM\",\n", + " \"message\": \"You are a helpful assistant that translates English to French. Translate the user sentence.\"\n", + " },\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"I love programming.\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"J'adore programmer.\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 20,\n", + " \"outputTokens\": 5\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 89,\n", + " \"outputTokens\": 5\n", + " }\n", + " }\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 89,\n", + " \"output_tokens\": 5,\n", + " \"total_tokens\": 94\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore programmer.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Ich liebe Programmieren.\",\n", + " \"additional_kwargs\": {\n", + " \"response_id\": \"271e1439-7220-40fa-953d-c9f2947e451a\",\n", + " \"generationId\": \"f99970a4-7b1c-4d76-a73a-4467a1db759c\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"SYSTEM\",\n", + " \"message\": \"You are a helpful assistant that translates English to German.\"\n", + " },\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"I love programming.\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"Ich liebe Programmieren.\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 15,\n", + " \"outputTokens\": 6\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 84,\n", + " \"outputTokens\": 6\n", + " }\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"estimatedTokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 84,\n", + " \"totalTokens\": 90\n", + " },\n", + " \"response_id\": \"271e1439-7220-40fa-953d-c9f2947e451a\",\n", + " \"generationId\": \"f99970a4-7b1c-4d76-a73a-4467a1db759c\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"SYSTEM\",\n", + " \"message\": \"You are a helpful assistant that translates English to German.\"\n", + " },\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"I love programming.\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"Ich liebe Programmieren.\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 15,\n", + " \"outputTokens\": 6\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 84,\n", + " \"outputTokens\": 6\n", + " }\n", + " }\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 84,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 90\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.',\n", - " additional_kwargs: {\n", - " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", - " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", - " chatHistory: [\n", - " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", - " {\n", - " role: 'CHATBOT',\n", - " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", - " }\n", - " ],\n", - " finishReason: 'COMPLETE',\n", - " meta: {\n", - " apiVersion: { version: '1' },\n", - " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", - " tokens: { inputTokens: 11198, outputTokens: 286 }\n", - " },\n", - " citations: [\n", - " {\n", - " start: 43,\n", - " end: 54,\n", - " text: 'prehistoric',\n", - " documentIds: [ 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 55,\n", - " end: 79,\n", - " text: 'Palaeeudyptes klekowskii',\n", - " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 84,\n", - " end: 102,\n", - " text: '\"colossus penguin\"',\n", - " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 119,\n", - " end: 125,\n", - " text: '6 feet',\n", - " documentIds: [ 'web-search_0', 'web-search_1' ]\n", - " },\n", - " {\n", - " start: 126,\n", - " end: 134,\n", - " text: '6 inches',\n", - " documentIds: [ 'web-search_1' ]\n", - " },\n", - " {\n", - " start: 161,\n", - " end: 172,\n", - " text: 'alive today',\n", - " documentIds: [ 'web-search_0', 'web-search_5' ]\n", - " },\n", - " {\n", - " start: 180,\n", - " end: 195,\n", - " text: 'emperor penguin',\n", - " documentIds: [\n", - " 'web-search_0',\n", - " 'web-search_1',\n", - " 'web-search_2',\n", - " 'web-search_4',\n", - " 'web-search_5'\n", - " ]\n", - " },\n", - " {\n", - " start: 213,\n", - " end: 235,\n", - " text: 'just over 4 feet tall.',\n", - " documentIds: [ 'web-search_0', 'web-search_5' ]\n", - " }\n", - " ],\n", - " documents: [\n", - " {\n", - " id: 'web-search_1',\n", - " snippet: 'Largest species of penguin ever\\n' +\n", - " '\\n' +\n", - " 'TencentContact an Account Manager\\n' +\n", - " '\\n' +\n", - " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", - " '\\n' +\n", - " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", - " '\\n' +\n", - " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", - " '\\n' +\n", - " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", - " '\\n' +\n", - " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", - " '\\n' +\n", - " 'Comments below may relate to previous holders of this record.',\n", - " timestamp: '2024-07-28T02:56:04',\n", - " title: 'Largest species of penguin ever',\n", - " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", - " },\n", - " {\n", - " id: 'web-search_2',\n", - " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", - " '\\n' +\n", - " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", - " '\\n' +\n", - " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", - " '\\n' +\n", - " 'Artwork by Dr. Simone Giovanardi\\n' +\n", - " '\\n' +\n", - " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", - " '\\n' +\n", - " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", - " '\\n' +\n", - " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", - " '\\n' +\n", - " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", - " '\\n' +\n", - " 'Sign up to our Wild Wild Life newsletter\\n' +\n", - " '\\n' +\n", - " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", - " '\\n' +\n", - " 'Sign up to newsletter\\n' +\n", - " '\\n' +\n", - " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", - " '\\n' +\n", - " 'Artwork by Dr. Simone Giovanardi\\n' +\n", - " '\\n' +\n", - " 'Petradyptes stonehousei\\n' +\n", - " '\\n' +\n", - " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", - " '\\n' +\n", - " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", - " '\\n' +\n", - " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", - " '\\n' +\n", - " 'Nature Picture Library / Alamy\\n' +\n", - " '\\n' +\n", - " 'Palaeeudyptes klekowskii\\n' +\n", - " '\\n' +\n", - " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", - " '\\n' +\n", - " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", - " '\\n' +\n", - " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", - " '\\n' +\n", - " 'Pachydyptes ponderosus\\n' +\n", - " '\\n' +\n", - " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", - " '\\n' +\n", - " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", - " '\\n' +\n", - " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", - " '\\n' +\n", - " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", - " '\\n' +\n", - " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", - " '\\n' +\n", - " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", - " '\\n' +\n", - " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'Sign up to our weekly newsletter\\n' +\n", - " '\\n' +\n", - " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", - " '\\n' +\n", - " 'More from New Scientist\\n' +\n", - " '\\n' +\n", - " 'Explore the latest news, articles and features\\n' +\n", - " '\\n' +\n", - " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", - " '\\n' +\n", - " 'How you can help with penguin research by browsing images at home\\n' +\n", - " '\\n' +\n", - " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", - " '\\n' +\n", - " 'Penguins adapt their accents to sound more like their friends\\n' +\n", - " '\\n' +\n", - " 'Trending New Scientist articles\\n' +\n", - " '\\n' +\n", - " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", - " '\\n' +\n", - " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", - " '\\n' +\n", - " 'When is the best time to exercise to get the most from your workout?\\n' +\n", - " '\\n' +\n", - " 'Why slow running could be even more beneficial than running fast\\n' +\n", - " '\\n' +\n", - " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", - " '\\n' +\n", - " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", - " '\\n' +\n", - " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", - " '\\n' +\n", - " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", - " '\\n' +\n", - " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", - " '\\n' +\n", - " 'Why midlife is the perfect time to take control of your future health',\n", - " timestamp: '2024-07-28T02:56:04',\n", - " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", - " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", - " },\n", - " {\n", - " id: 'web-search_0',\n", - " snippet: 'Sustainability for All.\\n' +\n", - " '\\n' +\n", - " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", - " '\\n' +\n", - " 'University of Houston\\n' +\n", - " '\\n' +\n", - " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", - " '\\n' +\n", - " 'Learn about our editorial process\\n' +\n", - " '\\n' +\n", - " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", - " '\\n' +\n", - " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", - " '\\n' +\n", - " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", - " '\\n' +\n", - " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", - " '\\n' +\n", - " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", - " '\\n' +\n", - " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", - " '\\n' +\n", - " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", - " '\\n' +\n", - " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", - " '\\n' +\n", - " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", - " '\\n' +\n", - " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", - " '\\n' +\n", - " '10 of the Largest Living Sea Creatures\\n' +\n", - " '\\n' +\n", - " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", - " '\\n' +\n", - " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", - " '\\n' +\n", - " 'National Monuments Designated By President Obama\\n' +\n", - " '\\n' +\n", - " '20 Pygmy Animal Species From Around the World\\n' +\n", - " '\\n' +\n", - " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", - " '\\n' +\n", - " '16 of the Most Surreal Landscapes on Earth\\n' +\n", - " '\\n' +\n", - " '12 Peculiar Penguin Facts\\n' +\n", - " '\\n' +\n", - " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", - " '\\n' +\n", - " '8 Titanic Facts About Patagotitans\\n' +\n", - " '\\n' +\n", - " '9 Extinct Megafauna That Are Out of This World\\n' +\n", - " '\\n' +\n", - " '10 Places Where Penguins Live in the Wild\\n' +\n", - " '\\n' +\n", - " '16 Animals That Are Living Fossils\\n' +\n", - " '\\n' +\n", - " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", - " '\\n' +\n", - " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", - " '\\n' +\n", - " '12 Dinosaur Theme Parks\\n' +\n", - " '\\n' +\n", - " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", - " '\\n' +\n", - " 'Cookies Settings Accept All Cookies',\n", - " timestamp: '2024-07-27T06:29:15',\n", - " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", - " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", - " },\n", - " {\n", - " id: 'web-search_5',\n", - " snippet: 'Skip to main content\\n' +\n", - " '\\n' +\n", - " 'Smithsonian Institution\\n' +\n", - " '\\n' +\n", - " 'Search Smithsonian Ocean\\n' +\n", - " '\\n' +\n", - " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", - " '\\n' +\n", - " 'How Big Do Penguins Get?\\n' +\n", - " '\\n' +\n", - " '(Smithsonian Institution)\\n' +\n", - " '\\n' +\n", - " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", - " '\\n' +\n", - " 'Coasts & Shallow Water\\n' +\n", - " '\\n' +\n", - " 'Census of Marine Life\\n' +\n", - " '\\n' +\n", - " 'Waves, Storms & Tsunamis\\n' +\n", - " '\\n' +\n", - " 'Temperature & Chemistry\\n' +\n", - " '\\n' +\n", - " 'Solutions & Success Stories\\n' +\n", - " '\\n' +\n", - " 'Books, Film & The Arts\\n' +\n", - " '\\n' +\n", - " 'Search Smithsonian Ocean',\n", - " timestamp: '2024-07-30T03:47:03',\n", - " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", - " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", - " },\n", - " {\n", - " id: 'web-search_4',\n", - " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", - " '\\n' +\n", - " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", - " '\\n' +\n", - " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", - " '\\n' +\n", - " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", - " '\\n' +\n", - " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", - " '\\n' +\n", - " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", - " '\\n' +\n", - " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", - " '\\n' +\n", - " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", - " '\\n' +\n", - " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", - " '\\n' +\n", - " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", - " '\\n' +\n", - " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", - " '\\n' +\n", - " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", - " '\\n' +\n", - " 'Adaptations to pressure and low oxygen\\n' +\n", - " '\\n' +\n", - " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", - " '\\n' +\n", - " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", - " '\\n' +\n", - " 'Distribution and habitat\\n' +\n", - " '\\n' +\n", - " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", - " timestamp: '2024-07-31T07:59:36',\n", - " title: 'Emperor penguin - Wikipedia',\n", - " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", - " }\n", - " ],\n", - " searchResults: [\n", - " {\n", - " searchQuery: {\n", - " text: 'How tall are the largest penguins?',\n", - " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", - " },\n", - " documentIds: [\n", - " 'web-search_0',\n", - " 'web-search_1',\n", - " 'web-search_2',\n", - " 'web-search_3',\n", - " 'web-search_4',\n", - " 'web-search_5'\n", - " ],\n", - " connector: { id: 'web-search' }\n", - " }\n", - " ],\n", - " searchQueries: [\n", - " {\n", - " text: 'How tall are the largest penguins?',\n", - " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", - " }\n", - " ]\n", - " },\n", - " tool_calls: [],\n", - " usage_metadata: { input_tokens: 11198, output_tokens: 286, total_tokens: 11484 },\n", - " invalid_tool_calls: [],\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ 'langchain_core', 'messages' ],\n", - " content: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.',\n", - " name: undefined,\n", - " additional_kwargs: {\n", - " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", - " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", - " chatHistory: [\n", - " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", - " {\n", - " role: 'CHATBOT',\n", - " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", - " }\n", - " ],\n", - " finishReason: 'COMPLETE',\n", - " meta: {\n", - " apiVersion: { version: '1' },\n", - " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", - " tokens: { inputTokens: 11198, outputTokens: 286 }\n", - " },\n", - " citations: [\n", - " {\n", - " start: 43,\n", - " end: 54,\n", - " text: 'prehistoric',\n", - " documentIds: [ 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 55,\n", - " end: 79,\n", - " text: 'Palaeeudyptes klekowskii',\n", - " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 84,\n", - " end: 102,\n", - " text: '\"colossus penguin\"',\n", - " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 119,\n", - " end: 125,\n", - " text: '6 feet',\n", - " documentIds: [ 'web-search_0', 'web-search_1' ]\n", - " },\n", - " {\n", - " start: 126,\n", - " end: 134,\n", - " text: '6 inches',\n", - " documentIds: [ 'web-search_1' ]\n", - " },\n", - " {\n", - " start: 161,\n", - " end: 172,\n", - " text: 'alive today',\n", - " documentIds: [ 'web-search_0', 'web-search_5' ]\n", - " },\n", - " {\n", - " start: 180,\n", - " end: 195,\n", - " text: 'emperor penguin',\n", - " documentIds: [\n", - " 'web-search_0',\n", - " 'web-search_1',\n", - " 'web-search_2',\n", - " 'web-search_4',\n", - " 'web-search_5'\n", - " ]\n", - " },\n", - " {\n", - " start: 213,\n", - " end: 235,\n", - " text: 'just over 4 feet tall.',\n", - " documentIds: [ 'web-search_0', 'web-search_5' ]\n", - " }\n", - " ],\n", - " documents: [\n", - " {\n", - " id: 'web-search_1',\n", - " snippet: 'Largest species of penguin ever\\n' +\n", - " '\\n' +\n", - " 'TencentContact an Account Manager\\n' +\n", - " '\\n' +\n", - " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", - " '\\n' +\n", - " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", - " '\\n' +\n", - " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", - " '\\n' +\n", - " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", - " '\\n' +\n", - " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", - " '\\n' +\n", - " 'Comments below may relate to previous holders of this record.',\n", - " timestamp: '2024-07-28T02:56:04',\n", - " title: 'Largest species of penguin ever',\n", - " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", - " },\n", - " {\n", - " id: 'web-search_2',\n", - " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", - " '\\n' +\n", - " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", - " '\\n' +\n", - " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", - " '\\n' +\n", - " 'Artwork by Dr. Simone Giovanardi\\n' +\n", - " '\\n' +\n", - " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", - " '\\n' +\n", - " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", - " '\\n' +\n", - " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", - " '\\n' +\n", - " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", - " '\\n' +\n", - " 'Sign up to our Wild Wild Life newsletter\\n' +\n", - " '\\n' +\n", - " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", - " '\\n' +\n", - " 'Sign up to newsletter\\n' +\n", - " '\\n' +\n", - " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", - " '\\n' +\n", - " 'Artwork by Dr. Simone Giovanardi\\n' +\n", - " '\\n' +\n", - " 'Petradyptes stonehousei\\n' +\n", - " '\\n' +\n", - " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", - " '\\n' +\n", - " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", - " '\\n' +\n", - " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", - " '\\n' +\n", - " 'Nature Picture Library / Alamy\\n' +\n", - " '\\n' +\n", - " 'Palaeeudyptes klekowskii\\n' +\n", - " '\\n' +\n", - " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", - " '\\n' +\n", - " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", - " '\\n' +\n", - " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", - " '\\n' +\n", - " 'Pachydyptes ponderosus\\n' +\n", - " '\\n' +\n", - " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", - " '\\n' +\n", - " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", - " '\\n' +\n", - " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", - " '\\n' +\n", - " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", - " '\\n' +\n", - " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", - " '\\n' +\n", - " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", - " '\\n' +\n", - " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'Sign up to our weekly newsletter\\n' +\n", - " '\\n' +\n", - " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", - " '\\n' +\n", - " 'More from New Scientist\\n' +\n", - " '\\n' +\n", - " 'Explore the latest news, articles and features\\n' +\n", - " '\\n' +\n", - " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", - " '\\n' +\n", - " 'How you can help with penguin research by browsing images at home\\n' +\n", - " '\\n' +\n", - " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", - " '\\n' +\n", - " 'Penguins adapt their accents to sound more like their friends\\n' +\n", - " '\\n' +\n", - " 'Trending New Scientist articles\\n' +\n", - " '\\n' +\n", - " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", - " '\\n' +\n", - " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", - " '\\n' +\n", - " 'When is the best time to exercise to get the most from your workout?\\n' +\n", - " '\\n' +\n", - " 'Why slow running could be even more beneficial than running fast\\n' +\n", - " '\\n' +\n", - " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", - " '\\n' +\n", - " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", - " '\\n' +\n", - " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", - " '\\n' +\n", - " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", - " '\\n' +\n", - " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", - " '\\n' +\n", - " 'Why midlife is the perfect time to take control of your future health',\n", - " timestamp: '2024-07-28T02:56:04',\n", - " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", - " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", - " },\n", - " {\n", - " id: 'web-search_0',\n", - " snippet: 'Sustainability for All.\\n' +\n", - " '\\n' +\n", - " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", - " '\\n' +\n", - " 'University of Houston\\n' +\n", - " '\\n' +\n", - " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", - " '\\n' +\n", - " 'Learn about our editorial process\\n' +\n", - " '\\n' +\n", - " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", - " '\\n' +\n", - " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", - " '\\n' +\n", - " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", - " '\\n' +\n", - " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", - " '\\n' +\n", - " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", - " '\\n' +\n", - " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", - " '\\n' +\n", - " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", - " '\\n' +\n", - " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", - " '\\n' +\n", - " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", - " '\\n' +\n", - " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", - " '\\n' +\n", - " '10 of the Largest Living Sea Creatures\\n' +\n", - " '\\n' +\n", - " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", - " '\\n' +\n", - " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", - " '\\n' +\n", - " 'National Monuments Designated By President Obama\\n' +\n", - " '\\n' +\n", - " '20 Pygmy Animal Species From Around the World\\n' +\n", - " '\\n' +\n", - " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", - " '\\n' +\n", - " '16 of the Most Surreal Landscapes on Earth\\n' +\n", - " '\\n' +\n", - " '12 Peculiar Penguin Facts\\n' +\n", - " '\\n' +\n", - " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", - " '\\n' +\n", - " '8 Titanic Facts About Patagotitans\\n' +\n", - " '\\n' +\n", - " '9 Extinct Megafauna That Are Out of This World\\n' +\n", - " '\\n' +\n", - " '10 Places Where Penguins Live in the Wild\\n' +\n", - " '\\n' +\n", - " '16 Animals That Are Living Fossils\\n' +\n", - " '\\n' +\n", - " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", - " '\\n' +\n", - " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", - " '\\n' +\n", - " '12 Dinosaur Theme Parks\\n' +\n", - " '\\n' +\n", - " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", - " '\\n' +\n", - " 'Cookies Settings Accept All Cookies',\n", - " timestamp: '2024-07-27T06:29:15',\n", - " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", - " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", - " },\n", - " {\n", - " id: 'web-search_5',\n", - " snippet: 'Skip to main content\\n' +\n", - " '\\n' +\n", - " 'Smithsonian Institution\\n' +\n", - " '\\n' +\n", - " 'Search Smithsonian Ocean\\n' +\n", - " '\\n' +\n", - " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", - " '\\n' +\n", - " 'How Big Do Penguins Get?\\n' +\n", - " '\\n' +\n", - " '(Smithsonian Institution)\\n' +\n", - " '\\n' +\n", - " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", - " '\\n' +\n", - " 'Coasts & Shallow Water\\n' +\n", - " '\\n' +\n", - " 'Census of Marine Life\\n' +\n", - " '\\n' +\n", - " 'Waves, Storms & Tsunamis\\n' +\n", - " '\\n' +\n", - " 'Temperature & Chemistry\\n' +\n", - " '\\n' +\n", - " 'Solutions & Success Stories\\n' +\n", - " '\\n' +\n", - " 'Books, Film & The Arts\\n' +\n", - " '\\n' +\n", - " 'Search Smithsonian Ocean',\n", - " timestamp: '2024-07-30T03:47:03',\n", - " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", - " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", - " },\n", - " {\n", - " id: 'web-search_4',\n", - " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", - " '\\n' +\n", - " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", - " '\\n' +\n", - " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", - " '\\n' +\n", - " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", - " '\\n' +\n", - " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", - " '\\n' +\n", - " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", - " '\\n' +\n", - " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", - " '\\n' +\n", - " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", - " '\\n' +\n", - " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", - " '\\n' +\n", - " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", - " '\\n' +\n", - " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", - " '\\n' +\n", - " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", - " '\\n' +\n", - " 'Adaptations to pressure and low oxygen\\n' +\n", - " '\\n' +\n", - " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", - " '\\n' +\n", - " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", - " '\\n' +\n", - " 'Distribution and habitat\\n' +\n", - " '\\n' +\n", - " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", - " timestamp: '2024-07-31T07:59:36',\n", - " title: 'Emperor penguin - Wikipedia',\n", - " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", - " }\n", - " ],\n", - " searchResults: [\n", - " {\n", - " searchQuery: {\n", - " text: 'How tall are the largest penguins?',\n", - " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", - " },\n", - " documentIds: [\n", - " 'web-search_0',\n", - " 'web-search_1',\n", - " 'web-search_2',\n", - " 'web-search_3',\n", - " 'web-search_4',\n", - " 'web-search_5'\n", - " ],\n", - " connector: { id: 'web-search' }\n", - " }\n", - " ],\n", - " searchQueries: [\n", - " {\n", - " text: 'How tall are the largest penguins?',\n", - " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", - " }\n", - " ]\n", - " },\n", - " response_metadata: {\n", - " estimatedTokenUsage: { completionTokens: 286, promptTokens: 11198, totalTokens: 11484 },\n", - " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", - " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", - " chatHistory: [\n", - " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", - " {\n", - " role: 'CHATBOT',\n", - " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", - " }\n", - " ],\n", - " finishReason: 'COMPLETE',\n", - " meta: {\n", - " apiVersion: { version: '1' },\n", - " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", - " tokens: { inputTokens: 11198, outputTokens: 286 }\n", - " },\n", - " citations: [\n", - " {\n", - " start: 43,\n", - " end: 54,\n", - " text: 'prehistoric',\n", - " documentIds: [ 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 55,\n", - " end: 79,\n", - " text: 'Palaeeudyptes klekowskii',\n", - " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 84,\n", - " end: 102,\n", - " text: '\"colossus penguin\"',\n", - " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", - " },\n", - " {\n", - " start: 119,\n", - " end: 125,\n", - " text: '6 feet',\n", - " documentIds: [ 'web-search_0', 'web-search_1' ]\n", - " },\n", - " {\n", - " start: 126,\n", - " end: 134,\n", - " text: '6 inches',\n", - " documentIds: [ 'web-search_1' ]\n", - " },\n", - " {\n", - " start: 161,\n", - " end: 172,\n", - " text: 'alive today',\n", - " documentIds: [ 'web-search_0', 'web-search_5' ]\n", - " },\n", - " {\n", - " start: 180,\n", - " end: 195,\n", - " text: 'emperor penguin',\n", - " documentIds: [\n", - " 'web-search_0',\n", - " 'web-search_1',\n", - " 'web-search_2',\n", - " 'web-search_4',\n", - " 'web-search_5'\n", - " ]\n", - " },\n", - " {\n", - " start: 213,\n", - " end: 235,\n", - " text: 'just over 4 feet tall.',\n", - " documentIds: [ 'web-search_0', 'web-search_5' ]\n", - " }\n", - " ],\n", - " documents: [\n", - " {\n", - " id: 'web-search_1',\n", - " snippet: 'Largest species of penguin ever\\n' +\n", - " '\\n' +\n", - " 'TencentContact an Account Manager\\n' +\n", - " '\\n' +\n", - " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", - " '\\n' +\n", - " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", - " '\\n' +\n", - " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", - " '\\n' +\n", - " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", - " '\\n' +\n", - " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", - " '\\n' +\n", - " 'Comments below may relate to previous holders of this record.',\n", - " timestamp: '2024-07-28T02:56:04',\n", - " title: 'Largest species of penguin ever',\n", - " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", - " },\n", - " {\n", - " id: 'web-search_2',\n", - " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", - " '\\n' +\n", - " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", - " '\\n' +\n", - " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", - " '\\n' +\n", - " 'Artwork by Dr. Simone Giovanardi\\n' +\n", - " '\\n' +\n", - " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", - " '\\n' +\n", - " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", - " '\\n' +\n", - " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", - " '\\n' +\n", - " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", - " '\\n' +\n", - " 'Sign up to our Wild Wild Life newsletter\\n' +\n", - " '\\n' +\n", - " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", - " '\\n' +\n", - " 'Sign up to newsletter\\n' +\n", - " '\\n' +\n", - " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", - " '\\n' +\n", - " 'Artwork by Dr. Simone Giovanardi\\n' +\n", - " '\\n' +\n", - " 'Petradyptes stonehousei\\n' +\n", - " '\\n' +\n", - " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", - " '\\n' +\n", - " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", - " '\\n' +\n", - " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", - " '\\n' +\n", - " 'Nature Picture Library / Alamy\\n' +\n", - " '\\n' +\n", - " 'Palaeeudyptes klekowskii\\n' +\n", - " '\\n' +\n", - " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", - " '\\n' +\n", - " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", - " '\\n' +\n", - " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", - " '\\n' +\n", - " 'Pachydyptes ponderosus\\n' +\n", - " '\\n' +\n", - " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", - " '\\n' +\n", - " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", - " '\\n' +\n", - " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", - " '\\n' +\n", - " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", - " '\\n' +\n", - " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", - " '\\n' +\n", - " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", - " '\\n' +\n", - " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", - " '\\n' +\n", - " 'Sign up to our weekly newsletter\\n' +\n", - " '\\n' +\n", - " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", - " '\\n' +\n", - " 'More from New Scientist\\n' +\n", - " '\\n' +\n", - " 'Explore the latest news, articles and features\\n' +\n", - " '\\n' +\n", - " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", - " '\\n' +\n", - " 'How you can help with penguin research by browsing images at home\\n' +\n", - " '\\n' +\n", - " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", - " '\\n' +\n", - " 'Penguins adapt their accents to sound more like their friends\\n' +\n", - " '\\n' +\n", - " 'Trending New Scientist articles\\n' +\n", - " '\\n' +\n", - " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", - " '\\n' +\n", - " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", - " '\\n' +\n", - " 'When is the best time to exercise to get the most from your workout?\\n' +\n", - " '\\n' +\n", - " 'Why slow running could be even more beneficial than running fast\\n' +\n", - " '\\n' +\n", - " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", - " '\\n' +\n", - " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", - " '\\n' +\n", - " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", - " '\\n' +\n", - " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", - " '\\n' +\n", - " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", - " '\\n' +\n", - " 'Why midlife is the perfect time to take control of your future health',\n", - " timestamp: '2024-07-28T02:56:04',\n", - " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", - " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", - " },\n", - " {\n", - " id: 'web-search_0',\n", - " snippet: 'Sustainability for All.\\n' +\n", - " '\\n' +\n", - " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", - " '\\n' +\n", - " 'University of Houston\\n' +\n", - " '\\n' +\n", - " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", - " '\\n' +\n", - " 'Learn about our editorial process\\n' +\n", - " '\\n' +\n", - " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", - " '\\n' +\n", - " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", - " '\\n' +\n", - " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", - " '\\n' +\n", - " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", - " '\\n' +\n", - " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", - " '\\n' +\n", - " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", - " '\\n' +\n", - " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", - " '\\n' +\n", - " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", - " '\\n' +\n", - " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", - " '\\n' +\n", - " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", - " '\\n' +\n", - " '10 of the Largest Living Sea Creatures\\n' +\n", - " '\\n' +\n", - " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", - " '\\n' +\n", - " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", - " '\\n' +\n", - " 'National Monuments Designated By President Obama\\n' +\n", - " '\\n' +\n", - " '20 Pygmy Animal Species From Around the World\\n' +\n", - " '\\n' +\n", - " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", - " '\\n' +\n", - " '16 of the Most Surreal Landscapes on Earth\\n' +\n", - " '\\n' +\n", - " '12 Peculiar Penguin Facts\\n' +\n", - " '\\n' +\n", - " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", - " '\\n' +\n", - " '8 Titanic Facts About Patagotitans\\n' +\n", - " '\\n' +\n", - " '9 Extinct Megafauna That Are Out of This World\\n' +\n", - " '\\n' +\n", - " '10 Places Where Penguins Live in the Wild\\n' +\n", - " '\\n' +\n", - " '16 Animals That Are Living Fossils\\n' +\n", - " '\\n' +\n", - " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", - " '\\n' +\n", - " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", - " '\\n' +\n", - " '12 Dinosaur Theme Parks\\n' +\n", - " '\\n' +\n", - " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", - " '\\n' +\n", - " 'Cookies Settings Accept All Cookies',\n", - " timestamp: '2024-07-27T06:29:15',\n", - " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", - " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", - " },\n", - " {\n", - " id: 'web-search_5',\n", - " snippet: 'Skip to main content\\n' +\n", - " '\\n' +\n", - " 'Smithsonian Institution\\n' +\n", - " '\\n' +\n", - " 'Search Smithsonian Ocean\\n' +\n", - " '\\n' +\n", - " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", - " '\\n' +\n", - " 'How Big Do Penguins Get?\\n' +\n", - " '\\n' +\n", - " '(Smithsonian Institution)\\n' +\n", - " '\\n' +\n", - " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", - " '\\n' +\n", - " 'Coasts & Shallow Water\\n' +\n", - " '\\n' +\n", - " 'Census of Marine Life\\n' +\n", - " '\\n' +\n", - " 'Waves, Storms & Tsunamis\\n' +\n", - " '\\n' +\n", - " 'Temperature & Chemistry\\n' +\n", - " '\\n' +\n", - " 'Solutions & Success Stories\\n' +\n", - " '\\n' +\n", - " 'Books, Film & The Arts\\n' +\n", - " '\\n' +\n", - " 'Search Smithsonian Ocean',\n", - " timestamp: '2024-07-30T03:47:03',\n", - " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", - " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", - " },\n", - " {\n", - " id: 'web-search_4',\n", - " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", - " '\\n' +\n", - " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", - " '\\n' +\n", - " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", - " '\\n' +\n", - " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", - " '\\n' +\n", - " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", - " '\\n' +\n", - " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", - " '\\n' +\n", - " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", - " '\\n' +\n", - " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", - " '\\n' +\n", - " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", - " '\\n' +\n", - " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", - " '\\n' +\n", - " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", - " '\\n' +\n", - " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", - " '\\n' +\n", - " 'Adaptations to pressure and low oxygen\\n' +\n", - " '\\n' +\n", - " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", - " '\\n' +\n", - " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", - " '\\n' +\n", - " 'Distribution and habitat\\n' +\n", - " '\\n' +\n", - " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", - " timestamp: '2024-07-31T07:59:36',\n", - " title: 'Emperor penguin - Wikipedia',\n", - " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", - " }\n", - " ],\n", - " searchResults: [\n", - " {\n", - " searchQuery: {\n", - " text: 'How tall are the largest penguins?',\n", - " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", - " },\n", - " documentIds: [\n", - " 'web-search_0',\n", - " 'web-search_1',\n", - " 'web-search_2',\n", - " 'web-search_3',\n", - " 'web-search_4',\n", - " 'web-search_5'\n", - " ],\n", - " connector: { id: 'web-search' }\n", - " }\n", - " ],\n", - " searchQueries: [\n", - " {\n", - " text: 'How tall are the largest penguins?',\n", - " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", - " }\n", - " ]\n", - " },\n", - " id: undefined,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " usage_metadata: { input_tokens: 11198, output_tokens: 286, total_tokens: 11484 }\n", - "}\n" - ] + "cell_type": "markdown", + "id": "4fecf4e4", + "metadata": {}, + "source": [ + "## RAG\n", + "\n", + "Cohere also comes out of the box with RAG support.\n", + "You can pass in documents as context to the API request and Cohere's models will use them when generating responses." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "74d6320e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Harrison worked at Kensho as an engineer for 3 years.\n" + ] + } + ], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const llmForRag = new ChatCohere({\n", + " apiKey: process.env.COHERE_API_KEY, // Default\n", + "});\n", + "\n", + "const documents = [\n", + " {\n", + " title: \"Harrison's work\",\n", + " snippet: \"Harrison worked at Kensho as an engineer.\",\n", + " },\n", + " {\n", + " title: \"Harrison's work duration\",\n", + " snippet: \"Harrison worked at Kensho for 3 years.\",\n", + " },\n", + " {\n", + " title: \"Polar berars in the Appalachian Mountains\",\n", + " snippet:\n", + " \"Polar bears have surprisingly adapted to the Appalachian Mountains, thriving in the diverse, forested terrain despite their traditional arctic habitat. This unique situation has sparked significant interest and study in climate adaptability and wildlife behavior.\",\n", + " },\n", + "];\n", + "\n", + "const ragResponse = await llmForRag.invoke(\n", + " [new HumanMessage(\"Where did Harrison work and for how long?\")],\n", + " {\n", + " documents,\n", + " }\n", + ");\n", + "console.log(ragResponse.content);" + ] + }, + { + "cell_type": "markdown", + "id": "aa13bae8", + "metadata": {}, + "source": [ + "## Connectors\n", + "\n", + "The API also allows for other connections which are not static documents.\n", + "An example of this is their `web-search` connector which allows you to pass in a query and the API will search the web for relevant documents.\n", + "The example below demonstrates how to use this feature." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "478f7c9e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.',\n", + " additional_kwargs: {\n", + " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", + " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", + " chatHistory: [\n", + " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", + " {\n", + " role: 'CHATBOT',\n", + " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", + " }\n", + " ],\n", + " finishReason: 'COMPLETE',\n", + " meta: {\n", + " apiVersion: { version: '1' },\n", + " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", + " tokens: { inputTokens: 11198, outputTokens: 286 }\n", + " },\n", + " citations: [\n", + " {\n", + " start: 43,\n", + " end: 54,\n", + " text: 'prehistoric',\n", + " documentIds: [ 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 55,\n", + " end: 79,\n", + " text: 'Palaeeudyptes klekowskii',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 84,\n", + " end: 102,\n", + " text: '\"colossus penguin\"',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 119,\n", + " end: 125,\n", + " text: '6 feet',\n", + " documentIds: [ 'web-search_0', 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 126,\n", + " end: 134,\n", + " text: '6 inches',\n", + " documentIds: [ 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 161,\n", + " end: 172,\n", + " text: 'alive today',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " },\n", + " {\n", + " start: 180,\n", + " end: 195,\n", + " text: 'emperor penguin',\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ]\n", + " },\n", + " {\n", + " start: 213,\n", + " end: 235,\n", + " text: 'just over 4 feet tall.',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " }\n", + " ],\n", + " documents: [\n", + " {\n", + " id: 'web-search_1',\n", + " snippet: 'Largest species of penguin ever\\n' +\n", + " '\\n' +\n", + " 'TencentContact an Account Manager\\n' +\n", + " '\\n' +\n", + " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", + " '\\n' +\n", + " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", + " '\\n' +\n", + " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", + " '\\n' +\n", + " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", + " '\\n' +\n", + " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", + " '\\n' +\n", + " 'Comments below may relate to previous holders of this record.',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Largest species of penguin ever',\n", + " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", + " },\n", + " {\n", + " id: 'web-search_2',\n", + " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", + " '\\n' +\n", + " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", + " '\\n' +\n", + " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", + " '\\n' +\n", + " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", + " '\\n' +\n", + " 'Sign up to our Wild Wild Life newsletter\\n' +\n", + " '\\n' +\n", + " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", + " '\\n' +\n", + " 'Sign up to newsletter\\n' +\n", + " '\\n' +\n", + " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Petradyptes stonehousei\\n' +\n", + " '\\n' +\n", + " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", + " '\\n' +\n", + " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", + " '\\n' +\n", + " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", + " '\\n' +\n", + " 'Nature Picture Library / Alamy\\n' +\n", + " '\\n' +\n", + " 'Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", + " '\\n' +\n", + " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", + " '\\n' +\n", + " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", + " '\\n' +\n", + " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", + " '\\n' +\n", + " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", + " '\\n' +\n", + " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", + " '\\n' +\n", + " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", + " '\\n' +\n", + " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Sign up to our weekly newsletter\\n' +\n", + " '\\n' +\n", + " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", + " '\\n' +\n", + " 'More from New Scientist\\n' +\n", + " '\\n' +\n", + " 'Explore the latest news, articles and features\\n' +\n", + " '\\n' +\n", + " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", + " '\\n' +\n", + " 'How you can help with penguin research by browsing images at home\\n' +\n", + " '\\n' +\n", + " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", + " '\\n' +\n", + " 'Penguins adapt their accents to sound more like their friends\\n' +\n", + " '\\n' +\n", + " 'Trending New Scientist articles\\n' +\n", + " '\\n' +\n", + " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", + " '\\n' +\n", + " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", + " '\\n' +\n", + " 'When is the best time to exercise to get the most from your workout?\\n' +\n", + " '\\n' +\n", + " 'Why slow running could be even more beneficial than running fast\\n' +\n", + " '\\n' +\n", + " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", + " '\\n' +\n", + " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", + " '\\n' +\n", + " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", + " '\\n' +\n", + " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", + " '\\n' +\n", + " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", + " '\\n' +\n", + " 'Why midlife is the perfect time to take control of your future health',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", + " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", + " },\n", + " {\n", + " id: 'web-search_0',\n", + " snippet: 'Sustainability for All.\\n' +\n", + " '\\n' +\n", + " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", + " '\\n' +\n", + " 'University of Houston\\n' +\n", + " '\\n' +\n", + " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", + " '\\n' +\n", + " 'Learn about our editorial process\\n' +\n", + " '\\n' +\n", + " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", + " '\\n' +\n", + " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", + " '\\n' +\n", + " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", + " '\\n' +\n", + " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", + " '\\n' +\n", + " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", + " '\\n' +\n", + " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", + " '\\n' +\n", + " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", + " '\\n' +\n", + " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", + " '\\n' +\n", + " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", + " '\\n' +\n", + " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", + " '\\n' +\n", + " '10 of the Largest Living Sea Creatures\\n' +\n", + " '\\n' +\n", + " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", + " '\\n' +\n", + " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", + " '\\n' +\n", + " 'National Monuments Designated By President Obama\\n' +\n", + " '\\n' +\n", + " '20 Pygmy Animal Species From Around the World\\n' +\n", + " '\\n' +\n", + " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", + " '\\n' +\n", + " '16 of the Most Surreal Landscapes on Earth\\n' +\n", + " '\\n' +\n", + " '12 Peculiar Penguin Facts\\n' +\n", + " '\\n' +\n", + " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", + " '\\n' +\n", + " '8 Titanic Facts About Patagotitans\\n' +\n", + " '\\n' +\n", + " '9 Extinct Megafauna That Are Out of This World\\n' +\n", + " '\\n' +\n", + " '10 Places Where Penguins Live in the Wild\\n' +\n", + " '\\n' +\n", + " '16 Animals That Are Living Fossils\\n' +\n", + " '\\n' +\n", + " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", + " '\\n' +\n", + " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", + " '\\n' +\n", + " '12 Dinosaur Theme Parks\\n' +\n", + " '\\n' +\n", + " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", + " '\\n' +\n", + " 'Cookies Settings Accept All Cookies',\n", + " timestamp: '2024-07-27T06:29:15',\n", + " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", + " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", + " },\n", + " {\n", + " id: 'web-search_5',\n", + " snippet: 'Skip to main content\\n' +\n", + " '\\n' +\n", + " 'Smithsonian Institution\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean\\n' +\n", + " '\\n' +\n", + " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", + " '\\n' +\n", + " 'How Big Do Penguins Get?\\n' +\n", + " '\\n' +\n", + " '(Smithsonian Institution)\\n' +\n", + " '\\n' +\n", + " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", + " '\\n' +\n", + " 'Coasts & Shallow Water\\n' +\n", + " '\\n' +\n", + " 'Census of Marine Life\\n' +\n", + " '\\n' +\n", + " 'Waves, Storms & Tsunamis\\n' +\n", + " '\\n' +\n", + " 'Temperature & Chemistry\\n' +\n", + " '\\n' +\n", + " 'Solutions & Success Stories\\n' +\n", + " '\\n' +\n", + " 'Books, Film & The Arts\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean',\n", + " timestamp: '2024-07-30T03:47:03',\n", + " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", + " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", + " },\n", + " {\n", + " id: 'web-search_4',\n", + " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", + " '\\n' +\n", + " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", + " '\\n' +\n", + " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", + " '\\n' +\n", + " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", + " '\\n' +\n", + " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", + " '\\n' +\n", + " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", + " '\\n' +\n", + " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", + " '\\n' +\n", + " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", + " '\\n' +\n", + " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", + " '\\n' +\n", + " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", + " '\\n' +\n", + " 'Adaptations to pressure and low oxygen\\n' +\n", + " '\\n' +\n", + " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", + " '\\n' +\n", + " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", + " '\\n' +\n", + " 'Distribution and habitat\\n' +\n", + " '\\n' +\n", + " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", + " timestamp: '2024-07-31T07:59:36',\n", + " title: 'Emperor penguin - Wikipedia',\n", + " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", + " }\n", + " ],\n", + " searchResults: [\n", + " {\n", + " searchQuery: {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " },\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_3',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ],\n", + " connector: { id: 'web-search' }\n", + " }\n", + " ],\n", + " searchQueries: [\n", + " {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " }\n", + " ]\n", + " },\n", + " tool_calls: [],\n", + " usage_metadata: { input_tokens: 11198, output_tokens: 286, total_tokens: 11484 },\n", + " invalid_tool_calls: [],\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.',\n", + " name: undefined,\n", + " additional_kwargs: {\n", + " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", + " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", + " chatHistory: [\n", + " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", + " {\n", + " role: 'CHATBOT',\n", + " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", + " }\n", + " ],\n", + " finishReason: 'COMPLETE',\n", + " meta: {\n", + " apiVersion: { version: '1' },\n", + " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", + " tokens: { inputTokens: 11198, outputTokens: 286 }\n", + " },\n", + " citations: [\n", + " {\n", + " start: 43,\n", + " end: 54,\n", + " text: 'prehistoric',\n", + " documentIds: [ 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 55,\n", + " end: 79,\n", + " text: 'Palaeeudyptes klekowskii',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 84,\n", + " end: 102,\n", + " text: '\"colossus penguin\"',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 119,\n", + " end: 125,\n", + " text: '6 feet',\n", + " documentIds: [ 'web-search_0', 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 126,\n", + " end: 134,\n", + " text: '6 inches',\n", + " documentIds: [ 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 161,\n", + " end: 172,\n", + " text: 'alive today',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " },\n", + " {\n", + " start: 180,\n", + " end: 195,\n", + " text: 'emperor penguin',\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ]\n", + " },\n", + " {\n", + " start: 213,\n", + " end: 235,\n", + " text: 'just over 4 feet tall.',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " }\n", + " ],\n", + " documents: [\n", + " {\n", + " id: 'web-search_1',\n", + " snippet: 'Largest species of penguin ever\\n' +\n", + " '\\n' +\n", + " 'TencentContact an Account Manager\\n' +\n", + " '\\n' +\n", + " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", + " '\\n' +\n", + " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", + " '\\n' +\n", + " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", + " '\\n' +\n", + " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", + " '\\n' +\n", + " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", + " '\\n' +\n", + " 'Comments below may relate to previous holders of this record.',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Largest species of penguin ever',\n", + " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", + " },\n", + " {\n", + " id: 'web-search_2',\n", + " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", + " '\\n' +\n", + " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", + " '\\n' +\n", + " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", + " '\\n' +\n", + " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", + " '\\n' +\n", + " 'Sign up to our Wild Wild Life newsletter\\n' +\n", + " '\\n' +\n", + " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", + " '\\n' +\n", + " 'Sign up to newsletter\\n' +\n", + " '\\n' +\n", + " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Petradyptes stonehousei\\n' +\n", + " '\\n' +\n", + " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", + " '\\n' +\n", + " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", + " '\\n' +\n", + " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", + " '\\n' +\n", + " 'Nature Picture Library / Alamy\\n' +\n", + " '\\n' +\n", + " 'Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", + " '\\n' +\n", + " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", + " '\\n' +\n", + " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", + " '\\n' +\n", + " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", + " '\\n' +\n", + " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", + " '\\n' +\n", + " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", + " '\\n' +\n", + " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", + " '\\n' +\n", + " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Sign up to our weekly newsletter\\n' +\n", + " '\\n' +\n", + " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", + " '\\n' +\n", + " 'More from New Scientist\\n' +\n", + " '\\n' +\n", + " 'Explore the latest news, articles and features\\n' +\n", + " '\\n' +\n", + " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", + " '\\n' +\n", + " 'How you can help with penguin research by browsing images at home\\n' +\n", + " '\\n' +\n", + " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", + " '\\n' +\n", + " 'Penguins adapt their accents to sound more like their friends\\n' +\n", + " '\\n' +\n", + " 'Trending New Scientist articles\\n' +\n", + " '\\n' +\n", + " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", + " '\\n' +\n", + " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", + " '\\n' +\n", + " 'When is the best time to exercise to get the most from your workout?\\n' +\n", + " '\\n' +\n", + " 'Why slow running could be even more beneficial than running fast\\n' +\n", + " '\\n' +\n", + " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", + " '\\n' +\n", + " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", + " '\\n' +\n", + " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", + " '\\n' +\n", + " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", + " '\\n' +\n", + " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", + " '\\n' +\n", + " 'Why midlife is the perfect time to take control of your future health',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", + " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", + " },\n", + " {\n", + " id: 'web-search_0',\n", + " snippet: 'Sustainability for All.\\n' +\n", + " '\\n' +\n", + " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", + " '\\n' +\n", + " 'University of Houston\\n' +\n", + " '\\n' +\n", + " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", + " '\\n' +\n", + " 'Learn about our editorial process\\n' +\n", + " '\\n' +\n", + " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", + " '\\n' +\n", + " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", + " '\\n' +\n", + " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", + " '\\n' +\n", + " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", + " '\\n' +\n", + " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", + " '\\n' +\n", + " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", + " '\\n' +\n", + " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", + " '\\n' +\n", + " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", + " '\\n' +\n", + " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", + " '\\n' +\n", + " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", + " '\\n' +\n", + " '10 of the Largest Living Sea Creatures\\n' +\n", + " '\\n' +\n", + " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", + " '\\n' +\n", + " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", + " '\\n' +\n", + " 'National Monuments Designated By President Obama\\n' +\n", + " '\\n' +\n", + " '20 Pygmy Animal Species From Around the World\\n' +\n", + " '\\n' +\n", + " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", + " '\\n' +\n", + " '16 of the Most Surreal Landscapes on Earth\\n' +\n", + " '\\n' +\n", + " '12 Peculiar Penguin Facts\\n' +\n", + " '\\n' +\n", + " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", + " '\\n' +\n", + " '8 Titanic Facts About Patagotitans\\n' +\n", + " '\\n' +\n", + " '9 Extinct Megafauna That Are Out of This World\\n' +\n", + " '\\n' +\n", + " '10 Places Where Penguins Live in the Wild\\n' +\n", + " '\\n' +\n", + " '16 Animals That Are Living Fossils\\n' +\n", + " '\\n' +\n", + " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", + " '\\n' +\n", + " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", + " '\\n' +\n", + " '12 Dinosaur Theme Parks\\n' +\n", + " '\\n' +\n", + " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", + " '\\n' +\n", + " 'Cookies Settings Accept All Cookies',\n", + " timestamp: '2024-07-27T06:29:15',\n", + " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", + " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", + " },\n", + " {\n", + " id: 'web-search_5',\n", + " snippet: 'Skip to main content\\n' +\n", + " '\\n' +\n", + " 'Smithsonian Institution\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean\\n' +\n", + " '\\n' +\n", + " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", + " '\\n' +\n", + " 'How Big Do Penguins Get?\\n' +\n", + " '\\n' +\n", + " '(Smithsonian Institution)\\n' +\n", + " '\\n' +\n", + " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", + " '\\n' +\n", + " 'Coasts & Shallow Water\\n' +\n", + " '\\n' +\n", + " 'Census of Marine Life\\n' +\n", + " '\\n' +\n", + " 'Waves, Storms & Tsunamis\\n' +\n", + " '\\n' +\n", + " 'Temperature & Chemistry\\n' +\n", + " '\\n' +\n", + " 'Solutions & Success Stories\\n' +\n", + " '\\n' +\n", + " 'Books, Film & The Arts\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean',\n", + " timestamp: '2024-07-30T03:47:03',\n", + " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", + " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", + " },\n", + " {\n", + " id: 'web-search_4',\n", + " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", + " '\\n' +\n", + " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", + " '\\n' +\n", + " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", + " '\\n' +\n", + " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", + " '\\n' +\n", + " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", + " '\\n' +\n", + " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", + " '\\n' +\n", + " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", + " '\\n' +\n", + " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", + " '\\n' +\n", + " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", + " '\\n' +\n", + " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", + " '\\n' +\n", + " 'Adaptations to pressure and low oxygen\\n' +\n", + " '\\n' +\n", + " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", + " '\\n' +\n", + " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", + " '\\n' +\n", + " 'Distribution and habitat\\n' +\n", + " '\\n' +\n", + " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", + " timestamp: '2024-07-31T07:59:36',\n", + " title: 'Emperor penguin - Wikipedia',\n", + " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", + " }\n", + " ],\n", + " searchResults: [\n", + " {\n", + " searchQuery: {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " },\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_3',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ],\n", + " connector: { id: 'web-search' }\n", + " }\n", + " ],\n", + " searchQueries: [\n", + " {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " }\n", + " ]\n", + " },\n", + " response_metadata: {\n", + " estimatedTokenUsage: { completionTokens: 286, promptTokens: 11198, totalTokens: 11484 },\n", + " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", + " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", + " chatHistory: [\n", + " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", + " {\n", + " role: 'CHATBOT',\n", + " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", + " }\n", + " ],\n", + " finishReason: 'COMPLETE',\n", + " meta: {\n", + " apiVersion: { version: '1' },\n", + " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", + " tokens: { inputTokens: 11198, outputTokens: 286 }\n", + " },\n", + " citations: [\n", + " {\n", + " start: 43,\n", + " end: 54,\n", + " text: 'prehistoric',\n", + " documentIds: [ 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 55,\n", + " end: 79,\n", + " text: 'Palaeeudyptes klekowskii',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 84,\n", + " end: 102,\n", + " text: '\"colossus penguin\"',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 119,\n", + " end: 125,\n", + " text: '6 feet',\n", + " documentIds: [ 'web-search_0', 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 126,\n", + " end: 134,\n", + " text: '6 inches',\n", + " documentIds: [ 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 161,\n", + " end: 172,\n", + " text: 'alive today',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " },\n", + " {\n", + " start: 180,\n", + " end: 195,\n", + " text: 'emperor penguin',\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ]\n", + " },\n", + " {\n", + " start: 213,\n", + " end: 235,\n", + " text: 'just over 4 feet tall.',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " }\n", + " ],\n", + " documents: [\n", + " {\n", + " id: 'web-search_1',\n", + " snippet: 'Largest species of penguin ever\\n' +\n", + " '\\n' +\n", + " 'TencentContact an Account Manager\\n' +\n", + " '\\n' +\n", + " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", + " '\\n' +\n", + " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", + " '\\n' +\n", + " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", + " '\\n' +\n", + " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", + " '\\n' +\n", + " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", + " '\\n' +\n", + " 'Comments below may relate to previous holders of this record.',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Largest species of penguin ever',\n", + " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", + " },\n", + " {\n", + " id: 'web-search_2',\n", + " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", + " '\\n' +\n", + " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", + " '\\n' +\n", + " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", + " '\\n' +\n", + " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", + " '\\n' +\n", + " 'Sign up to our Wild Wild Life newsletter\\n' +\n", + " '\\n' +\n", + " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", + " '\\n' +\n", + " 'Sign up to newsletter\\n' +\n", + " '\\n' +\n", + " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Petradyptes stonehousei\\n' +\n", + " '\\n' +\n", + " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", + " '\\n' +\n", + " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", + " '\\n' +\n", + " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", + " '\\n' +\n", + " 'Nature Picture Library / Alamy\\n' +\n", + " '\\n' +\n", + " 'Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", + " '\\n' +\n", + " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", + " '\\n' +\n", + " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", + " '\\n' +\n", + " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", + " '\\n' +\n", + " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", + " '\\n' +\n", + " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", + " '\\n' +\n", + " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", + " '\\n' +\n", + " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Sign up to our weekly newsletter\\n' +\n", + " '\\n' +\n", + " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", + " '\\n' +\n", + " 'More from New Scientist\\n' +\n", + " '\\n' +\n", + " 'Explore the latest news, articles and features\\n' +\n", + " '\\n' +\n", + " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", + " '\\n' +\n", + " 'How you can help with penguin research by browsing images at home\\n' +\n", + " '\\n' +\n", + " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", + " '\\n' +\n", + " 'Penguins adapt their accents to sound more like their friends\\n' +\n", + " '\\n' +\n", + " 'Trending New Scientist articles\\n' +\n", + " '\\n' +\n", + " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", + " '\\n' +\n", + " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", + " '\\n' +\n", + " 'When is the best time to exercise to get the most from your workout?\\n' +\n", + " '\\n' +\n", + " 'Why slow running could be even more beneficial than running fast\\n' +\n", + " '\\n' +\n", + " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", + " '\\n' +\n", + " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", + " '\\n' +\n", + " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", + " '\\n' +\n", + " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", + " '\\n' +\n", + " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", + " '\\n' +\n", + " 'Why midlife is the perfect time to take control of your future health',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", + " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", + " },\n", + " {\n", + " id: 'web-search_0',\n", + " snippet: 'Sustainability for All.\\n' +\n", + " '\\n' +\n", + " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", + " '\\n' +\n", + " 'University of Houston\\n' +\n", + " '\\n' +\n", + " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", + " '\\n' +\n", + " 'Learn about our editorial process\\n' +\n", + " '\\n' +\n", + " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", + " '\\n' +\n", + " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", + " '\\n' +\n", + " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", + " '\\n' +\n", + " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", + " '\\n' +\n", + " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", + " '\\n' +\n", + " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", + " '\\n' +\n", + " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", + " '\\n' +\n", + " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", + " '\\n' +\n", + " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", + " '\\n' +\n", + " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", + " '\\n' +\n", + " '10 of the Largest Living Sea Creatures\\n' +\n", + " '\\n' +\n", + " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", + " '\\n' +\n", + " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", + " '\\n' +\n", + " 'National Monuments Designated By President Obama\\n' +\n", + " '\\n' +\n", + " '20 Pygmy Animal Species From Around the World\\n' +\n", + " '\\n' +\n", + " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", + " '\\n' +\n", + " '16 of the Most Surreal Landscapes on Earth\\n' +\n", + " '\\n' +\n", + " '12 Peculiar Penguin Facts\\n' +\n", + " '\\n' +\n", + " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", + " '\\n' +\n", + " '8 Titanic Facts About Patagotitans\\n' +\n", + " '\\n' +\n", + " '9 Extinct Megafauna That Are Out of This World\\n' +\n", + " '\\n' +\n", + " '10 Places Where Penguins Live in the Wild\\n' +\n", + " '\\n' +\n", + " '16 Animals That Are Living Fossils\\n' +\n", + " '\\n' +\n", + " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", + " '\\n' +\n", + " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", + " '\\n' +\n", + " '12 Dinosaur Theme Parks\\n' +\n", + " '\\n' +\n", + " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", + " '\\n' +\n", + " 'Cookies Settings Accept All Cookies',\n", + " timestamp: '2024-07-27T06:29:15',\n", + " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", + " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", + " },\n", + " {\n", + " id: 'web-search_5',\n", + " snippet: 'Skip to main content\\n' +\n", + " '\\n' +\n", + " 'Smithsonian Institution\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean\\n' +\n", + " '\\n' +\n", + " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", + " '\\n' +\n", + " 'How Big Do Penguins Get?\\n' +\n", + " '\\n' +\n", + " '(Smithsonian Institution)\\n' +\n", + " '\\n' +\n", + " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", + " '\\n' +\n", + " 'Coasts & Shallow Water\\n' +\n", + " '\\n' +\n", + " 'Census of Marine Life\\n' +\n", + " '\\n' +\n", + " 'Waves, Storms & Tsunamis\\n' +\n", + " '\\n' +\n", + " 'Temperature & Chemistry\\n' +\n", + " '\\n' +\n", + " 'Solutions & Success Stories\\n' +\n", + " '\\n' +\n", + " 'Books, Film & The Arts\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean',\n", + " timestamp: '2024-07-30T03:47:03',\n", + " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", + " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", + " },\n", + " {\n", + " id: 'web-search_4',\n", + " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", + " '\\n' +\n", + " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", + " '\\n' +\n", + " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", + " '\\n' +\n", + " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", + " '\\n' +\n", + " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", + " '\\n' +\n", + " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", + " '\\n' +\n", + " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", + " '\\n' +\n", + " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", + " '\\n' +\n", + " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", + " '\\n' +\n", + " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", + " '\\n' +\n", + " 'Adaptations to pressure and low oxygen\\n' +\n", + " '\\n' +\n", + " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", + " '\\n' +\n", + " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", + " '\\n' +\n", + " 'Distribution and habitat\\n' +\n", + " '\\n' +\n", + " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", + " timestamp: '2024-07-31T07:59:36',\n", + " title: 'Emperor penguin - Wikipedia',\n", + " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", + " }\n", + " ],\n", + " searchResults: [\n", + " {\n", + " searchQuery: {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " },\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_3',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ],\n", + " connector: { id: 'web-search' }\n", + " }\n", + " ],\n", + " searchQueries: [\n", + " {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " }\n", + " ]\n", + " },\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: { input_tokens: 11198, output_tokens: 286, total_tokens: 11484 }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const llmWithConnectors = new ChatCohere({\n", + " apiKey: process.env.COHERE_API_KEY, // Default\n", + "});\n", + "\n", + "const connectorsRes = await llmWithConnectors.invoke(\n", + " [new HumanMessage(\"How tall are the largest pengiuns?\")],\n", + " {\n", + " connectors: [{ id: \"web-search\" }],\n", + " }\n", + ");\n", + "console.dir(connectorsRes, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "f90cedf9", + "metadata": {}, + "source": [ + "We can see in the `additional_kwargs` object that the API request did a few things:\n", + "\n", + "- Performed a search query, storing the result data in the `searchQueries` and `searchResults` fields. In the `searchQueries` field we see they rephrased our query for better results.\n", + "- Generated three documents from the search query.\n", + "- Generated a list of citations\n", + "- Generated a final response based on the above actions & content." + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatCohere features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatCohere } from \"@langchain/cohere\";\n", - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const llmWithConnectors = new ChatCohere({\n", - " apiKey: process.env.COHERE_API_KEY, // Default\n", - "});\n", - "\n", - "const connectorsRes = await llmWithConnectors.invoke(\n", - " [new HumanMessage(\"How tall are the largest pengiuns?\")],\n", - " {\n", - " connectors: [{ id: \"web-search\" }],\n", - " }\n", - ");\n", - "console.dir(connectorsRes, { depth: null });" - ] - }, - { - "cell_type": "markdown", - "id": "f90cedf9", - "metadata": {}, - "source": [ - "We can see in the `additional_kwargs` object that the API request did a few things:\n", - "\n", - "- Performed a search query, storing the result data in the `searchQueries` and `searchResults` fields. In the `searchQueries` field we see they rephrased our query for better results.\n", - "- Generated three documents from the search query.\n", - "- Generated a list of citations\n", - "- Generated a final response based on the above actions & content." - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatCohere features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/deep_infra.mdx b/docs/core_docs/docs/integrations/chat/deep_infra.mdx index 291e539d0c37..8ff146b26508 100644 --- a/docs/core_docs/docs/integrations/chat/deep_infra.mdx +++ b/docs/core_docs/docs/integrations/chat/deep_infra.mdx @@ -26,5 +26,5 @@ import Example from "@examples/models/chat/integration_deepinfra.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/fake.mdx b/docs/core_docs/docs/integrations/chat/fake.mdx index 6f6470f04b7b..ee26daa3e53a 100644 --- a/docs/core_docs/docs/integrations/chat/fake.mdx +++ b/docs/core_docs/docs/integrations/chat/fake.mdx @@ -11,5 +11,5 @@ import FakeListChatExample from "@examples/models/chat/integration_fake.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/fireworks.ipynb b/docs/core_docs/docs/integrations/chat/fireworks.ipynb index 8b2528768b7f..b3115487d98e 100644 --- a/docs/core_docs/docs/integrations/chat/fireworks.ipynb +++ b/docs/core_docs/docs/integrations/chat/fireworks.ipynb @@ -1,289 +1,289 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Fireworks\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatFireworks\n", - "\n", - "[Fireworks AI](https://fireworks.ai/) is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n", - "\n", - "This guide will help you getting started with `ChatFireworks` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatFireworks` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatFireworks](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", - "\n", - "## Setup\n", - "\n", - "To access `ChatFireworks` models you'll need to create a Fireworks account, get an API key, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [the Fireworks website](https://fireworks.ai/login) to sign up to Fireworks and generate an API key. Once you've done this set the `FIREWORKS_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export FIREWORKS_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `ChatFireworks` integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatFireworks } from \"@langchain/community/chat_models/fireworks\" \n", - "\n", - "const llm = new ChatFireworks({\n", - " model: \"accounts/fireworks/models/llama-v3p1-70b-instruct\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " timeout: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-9rBYHbb6QYRrKyr2tMhO9pH4AYXR4\",\n", - " \"content\": \"J'adore la programmation.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 31,\n", - " \"totalTokens\": 39\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 31,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 39\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Fireworks\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatFireworks\n", + "\n", + "[Fireworks AI](https://fireworks.ai/) is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n", + "\n", + "This guide will help you getting started with `ChatFireworks` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatFireworks` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatFireworks](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access `ChatFireworks` models you'll need to create a Fireworks account, get an API key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [the Fireworks website](https://fireworks.ai/login) to sign up to Fireworks and generate an API key. Once you've done this set the `FIREWORKS_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export FIREWORKS_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `ChatFireworks` integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-9rBYM3KSIhHOuTXpBvA5oFyk8RSaN\",\n", - " \"content\": \"Ich liebe das Programmieren.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 6,\n", - " \"promptTokens\": 26,\n", - " \"totalTokens\": 32\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 26,\n", - " \"output_tokens\": 6,\n", - " \"total_tokens\": 32\n", - " }\n", - "}\n" - ] + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatFireworks } from \"@langchain/community/chat_models/fireworks\" \n", + "\n", + "const llm = new ChatFireworks({\n", + " model: \"accounts/fireworks/models/llama-v3p1-70b-instruct\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rBYHbb6QYRrKyr2tMhO9pH4AYXR4\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rBYM3KSIhHOuTXpBvA5oFyk8RSaN\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "Behind the scenes, Fireworks AI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", + "\n", + "- Certain properties are not supported by the Fireworks API, see [here](https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility).\n", + "- Generation using multiple prompts is not supported." + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatFireworks features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", - "metadata": {}, - "source": [ - "Behind the scenes, Fireworks AI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", - "\n", - "- Certain properties are not supported by the Fireworks API, see [here](https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility).\n", - "- Generation using multiple prompts is not supported." - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatFireworks features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/friendli.mdx b/docs/core_docs/docs/integrations/chat/friendli.mdx index 16a14debf09d..764e60159358 100644 --- a/docs/core_docs/docs/integrations/chat/friendli.mdx +++ b/docs/core_docs/docs/integrations/chat/friendli.mdx @@ -30,5 +30,5 @@ import Example from "@examples/models/chat/friendli.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb index 56594e736378..39d017da6ddc 100644 --- a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb +++ b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb @@ -1,477 +1,477 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "46f7ac07", - "metadata": {}, - "source": [ - "---\n", - "sidebar_label: Google GenAI\n", - "keywords: [gemini, gemini-pro, ChatGoogleGenerativeAI]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatGoogleGenerativeAI\n", - "\n", - "[Google AI](https://ai.google.dev/) offers a number of different chat models, including the powerful Gemini series. For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini).\n", - "\n", - "This will help you getting started with `ChatGoogleGenerativeAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatGoogleGenerativeAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/google_generative_ai) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatGoogleGenerativeAI](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html) | [@langchain/google-genai](https://api.js.langchain.com/modules/langchain_google_genai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-genai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-genai?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "You can access Google's `gemini` and `gemini-vision` models, as well as other\n", - "generative models in LangChain through `ChatGoogleGenerativeAI` class in the\n", - "`@langchain/google-genai` integration package.\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip\n", - "You can also access Google's `gemini` family of models via the LangChain VertexAI and VertexAI-web integrations.\n", - "\n", - "Click [here](/docs/integrations/chat/google_vertex_ai) to read the docs.\n", - ":::\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "Get an API key here: [https://ai.google.dev/tutorials/setup](https://ai.google.dev/tutorials/setup)\n", - "\n", - "Then set the `GOOGLE_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export GOOGLE_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `ChatGoogleGenerativeAI` integration lives in the `@langchain/google-genai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/google-genai @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\"\n", - "\n", - "const llm = new ChatGoogleGenerativeAI({\n", - " model: \"gemini-1.5-pro\",\n", - " temperature: 0,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"J'adore programmer. \\n\",\n", - " \"additional_kwargs\": {\n", - " \"finishReason\": \"STOP\",\n", - " \"index\": 0,\n", - " \"safetyRatings\": [\n", - " {\n", - " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"finishReason\": \"STOP\",\n", - " \"index\": 0,\n", - " \"safetyRatings\": [\n", - " {\n", - " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " }\n", - " ]\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 21,\n", - " \"output_tokens\": 5,\n", - " \"total_tokens\": 26\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "46f7ac07", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Google GenAI\n", + "keywords: [gemini, gemini-pro, ChatGoogleGenerativeAI]\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore programmer. \n", - "\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatGoogleGenerativeAI\n", + "\n", + "[Google AI](https://ai.google.dev/) offers a number of different chat models, including the powerful Gemini series. For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini).\n", + "\n", + "This will help you getting started with `ChatGoogleGenerativeAI` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatGoogleGenerativeAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/google_generative_ai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatGoogleGenerativeAI](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html) | [@langchain/google-genai](https://api.js.langchain.com/modules/langchain_google_genai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-genai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-genai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "You can access Google's `gemini` and `gemini-vision` models, as well as other\n", + "generative models in LangChain through `ChatGoogleGenerativeAI` class in the\n", + "`@langchain/google-genai` integration package.\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip\n", + "You can also access Google's `gemini` family of models via the LangChain VertexAI and VertexAI-web integrations.\n", + "\n", + "Click [here](/docs/integrations/chat/google_vertex_ai) to read the docs.\n", + ":::\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Get an API key here: [https://ai.google.dev/tutorials/setup](https://ai.google.dev/tutorials/setup)\n", + "\n", + "Then set the `GOOGLE_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export GOOGLE_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `ChatGoogleGenerativeAI` integration lives in the `@langchain/google-genai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/google-genai @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"Ich liebe das Programmieren. \\n\",\n", - " \"additional_kwargs\": {\n", - " \"finishReason\": \"STOP\",\n", - " \"index\": 0,\n", - " \"safetyRatings\": [\n", - " {\n", - " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"finishReason\": \"STOP\",\n", - " \"index\": 0,\n", - " \"safetyRatings\": [\n", - " {\n", - " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " },\n", - " {\n", - " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", - " \"probability\": \"NEGLIGIBLE\"\n", - " }\n", - " ]\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 16,\n", - " \"output_tokens\": 7,\n", - " \"total_tokens\": 23\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "6a44de56", - "metadata": {}, - "source": [ - "## Safety Settings\n", - "\n", - "Gemini models have default safety settings that can be overridden. If you are receiving lots of \"Safety Warnings\" from your models, you can try tweaking the safety_settings attribute of the model. For example, to turn off safety blocking for dangerous content, you can import enums from the `@google/generative-ai` package, then construct your LLM as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "92db2f25", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", - "import { HarmBlockThreshold, HarmCategory } from \"@google/generative-ai\";\n", - "\n", - "const llmWithSafetySettings = new ChatGoogleGenerativeAI({\n", - " model: \"gemini-1.5-pro\",\n", - " temperature: 0,\n", - " safetySettings: [\n", - " {\n", - " category: HarmCategory.HARM_CATEGORY_HARASSMENT,\n", - " threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,\n", - " },\n", - " ],\n", - " // other params...\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", - "metadata": {}, - "source": [ - "## Tool calling\n", - "\n", - "Tool calling with Google AI is mostly the same [as tool calling with other models](/docs/how_to/tool_calling), but has a few restrictions on schema.\n", - "\n", - "The Google AI API does not allow tool schemas to contain an object with unknown properties. For example, the following Zod schemas will throw an error:\n", - "\n", - "`const invalidSchema = z.object({ properties: z.record(z.unknown()) });`\n", - "\n", - "and\n", - "\n", - "`const invalidSchema2 = z.record(z.unknown());`\n", - "\n", - "Instead, you should explicitly define the properties of the object field. Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d6805c40", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'browser_tool',\n", - " args: {\n", - " url: 'https://www.weather.com',\n", - " query: 'weather tonight in new york'\n", - " },\n", - " type: 'tool_call'\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\"\n", + "\n", + "const llm = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-1.5-pro\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"J'adore programmer. \\n\",\n", + " \"additional_kwargs\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 21,\n", + " \"output_tokens\": 5,\n", + " \"total_tokens\": 26\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore programmer. \n", + "\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Ich liebe das Programmieren. \\n\",\n", + " \"additional_kwargs\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 7,\n", + " \"total_tokens\": 23\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6a44de56", + "metadata": {}, + "source": [ + "## Safety Settings\n", + "\n", + "Gemini models have default safety settings that can be overridden. If you are receiving lots of \"Safety Warnings\" from your models, you can try tweaking the safety_settings attribute of the model. For example, to turn off safety blocking for dangerous content, you can import enums from the `@google/generative-ai` package, then construct your LLM as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "92db2f25", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", + "import { HarmBlockThreshold, HarmCategory } from \"@google/generative-ai\";\n", + "\n", + "const llmWithSafetySettings = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-1.5-pro\",\n", + " temperature: 0,\n", + " safetySettings: [\n", + " {\n", + " category: HarmCategory.HARM_CATEGORY_HARASSMENT,\n", + " threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,\n", + " },\n", + " ],\n", + " // other params...\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "Tool calling with Google AI is mostly the same [as tool calling with other models](/docs/how_to/tool_calling), but has a few restrictions on schema.\n", + "\n", + "The Google AI API does not allow tool schemas to contain an object with unknown properties. For example, the following Zod schemas will throw an error:\n", + "\n", + "`const invalidSchema = z.object({ properties: z.record(z.unknown()) });`\n", + "\n", + "and\n", + "\n", + "`const invalidSchema2 = z.record(z.unknown());`\n", + "\n", + "Instead, you should explicitly define the properties of the object field. Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d6805c40", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'browser_tool',\n", + " args: {\n", + " url: 'https://www.weather.com',\n", + " query: 'weather tonight in new york'\n", + " },\n", + " type: 'tool_call'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", + "import { z } from \"zod\";\n", + "\n", + "// Define your tool\n", + "const fakeBrowserTool = tool((_) => {\n", + " return \"The search result is xyz...\"\n", + "}, {\n", + " name: \"browser_tool\",\n", + " description: \"Useful for when you need to find something on the web or summarize a webpage.\",\n", + " schema: z.object({\n", + " url: z.string().describe(\"The URL of the webpage to search.\"),\n", + " query: z.string().optional().describe(\"An optional search query to use.\"),\n", + " }),\n", + "})\n", + "\n", + "const llmWithTool = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-pro\",\n", + "}).bindTools([fakeBrowserTool]) // Bind your tools to the model\n", + "\n", + "const toolRes = await llmWithTool.invoke([\n", + " [\n", + " \"human\",\n", + " \"Search the web and tell me what the weather will be like tonight in new york. use a popular weather website\",\n", + " ],\n", + "]);\n", + "\n", + "console.log(toolRes.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "0c6a950f", + "metadata": {}, + "source": [ + "## Gemini Prompting FAQs\n", + "\n", + "As of the time this doc was written (2023/12/12), Gemini has some restrictions on the types and structure of prompts it accepts. Specifically:\n", + "\n", + "1. When providing multimodal (image) inputs, you are restricted to at most 1 message of \"human\" (user) type. You cannot pass multiple messages (though the single human message may have multiple content entries)\n", + "2. System messages are not natively supported, and will be merged with the first human message if present.\n", + "3. For regular chat conversations, messages must follow the human/ai/human/ai alternating pattern. You may not provide 2 AI or human messages in sequence.\n", + "4. Message may be blocked if they violate the safety checks of the LLM. In this case, the model will return an empty response.\n" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { tool } from \"@langchain/core/tools\";\n", - "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", - "import { z } from \"zod\";\n", - "\n", - "// Define your tool\n", - "const fakeBrowserTool = tool((_) => {\n", - " return \"The search result is xyz...\"\n", - "}, {\n", - " name: \"browser_tool\",\n", - " description: \"Useful for when you need to find something on the web or summarize a webpage.\",\n", - " schema: z.object({\n", - " url: z.string().describe(\"The URL of the webpage to search.\"),\n", - " query: z.string().optional().describe(\"An optional search query to use.\"),\n", - " }),\n", - "})\n", - "\n", - "const llmWithTool = new ChatGoogleGenerativeAI({\n", - " model: \"gemini-pro\",\n", - "}).bindTools([fakeBrowserTool]) // Bind your tools to the model\n", - "\n", - "const toolRes = await llmWithTool.invoke([\n", - " [\n", - " \"human\",\n", - " \"Search the web and tell me what the weather will be like tonight in new york. use a popular weather website\",\n", - " ],\n", - "]);\n", - "\n", - "console.log(toolRes.tool_calls);" - ] - }, - { - "cell_type": "markdown", - "id": "0c6a950f", - "metadata": {}, - "source": [ - "## Gemini Prompting FAQs\n", - "\n", - "As of the time this doc was written (2023/12/12), Gemini has some restrictions on the types and structure of prompts it accepts. Specifically:\n", - "\n", - "1. When providing multimodal (image) inputs, you are restricted to at most 1 message of \"human\" (user) type. You cannot pass multiple messages (though the single human message may have multiple content entries)\n", - "2. System messages are not natively supported, and will be merged with the first human message if present.\n", - "3. For regular chat conversations, messages must follow the human/ai/human/ai alternating pattern. You may not provide 2 AI or human messages in sequence.\n", - "4. Message may be blocked if they violate the safety checks of the LLM. In this case, the model will return an empty response.\n" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb index 2579e14501d2..158e71453fcb 100644 --- a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb +++ b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb @@ -1,282 +1,282 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Google Vertex AI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatVertexAI\n", - "\n", - "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc.\n", - "\n", - "This will help you getting started with `ChatVertexAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatVertexAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html).\n", - "\n", - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatVertexAI](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html) | [`@langchain/google-vertexai`](https://www.npmjs.com/package/@langchain/google-vertexai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-vertexai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-vertexai?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "LangChain.js supports two different authentication methods based on whether\n", - "you're running in a Node.js environment or a web environment.\n", - "\n", - "To access `ChatVertexAI` models you'll need to setup Google VertexAI in your Google Cloud Platform (GCP) account, save the credentials file, and install the `@langchain/google-vertexai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to your [GCP account](https://console.cloud.google.com/) and generate a credentials file. Once you've done this set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable:\n", - "\n", - "```bash\n", - "export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/your/credentials.json\"\n", - "```\n", - "\n", - "If running in a web environment, you should set the `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable as a JSON stringified object, and install the `@langchain/google-vertexai-web` package:\n", - "\n", - "```bash\n", - "GOOGLE_VERTEX_AI_WEB_CREDENTIALS={\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...}\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `ChatVertexAI` integration lives in the `@langchain/google-vertexai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/google-vertexai @langchain/core\n", - "\n", - "\n", - "Or if using in a web environment like a [Vercel Edge function](https://vercel.com/blog/edge-functions-generally-available):\n", - "\n", - "\n", - " @langchain/google-vertexai-web @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatVertexAI } from \"@langchain/google-vertexai\"\n", - "// Uncomment the following line if you're running in a web environment:\n", - "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\"\n", - "\n", - "const llm = new ChatVertexAI({\n", - " model: \"gemini-1.5-pro\",\n", - " temperature: 0,\n", - " maxRetries: 2,\n", - " // For web, authOptions.credentials\n", - " // authOptions: { ... }\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessageChunk {\n", - " \"content\": \"J'adore programmer. \\n\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"tool_call_chunks\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 20,\n", - " \"output_tokens\": 7,\n", - " \"total_tokens\": 27\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Google Vertex AI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore programmer. \n", - "\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatVertexAI\n", + "\n", + "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc.\n", + "\n", + "This will help you getting started with `ChatVertexAI` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatVertexAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html).\n", + "\n", + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatVertexAI](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html) | [`@langchain/google-vertexai`](https://www.npmjs.com/package/@langchain/google-vertexai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-vertexai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-vertexai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "LangChain.js supports two different authentication methods based on whether\n", + "you're running in a Node.js environment or a web environment.\n", + "\n", + "To access `ChatVertexAI` models you'll need to setup Google VertexAI in your Google Cloud Platform (GCP) account, save the credentials file, and install the `@langchain/google-vertexai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to your [GCP account](https://console.cloud.google.com/) and generate a credentials file. Once you've done this set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable:\n", + "\n", + "```bash\n", + "export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/your/credentials.json\"\n", + "```\n", + "\n", + "If running in a web environment, you should set the `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable as a JSON stringified object, and install the `@langchain/google-vertexai-web` package:\n", + "\n", + "```bash\n", + "GOOGLE_VERTEX_AI_WEB_CREDENTIALS={\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...}\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `ChatVertexAI` integration lives in the `@langchain/google-vertexai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/google-vertexai @langchain/core\n", + "\n", + "\n", + "Or if using in a web environment like a [Vercel Edge function](https://vercel.com/blog/edge-functions-generally-available):\n", + "\n", + "\n", + " @langchain/google-vertexai-web @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatVertexAI } from \"@langchain/google-vertexai\"\n", + "// Uncomment the following line if you're running in a web environment:\n", + "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\"\n", + "\n", + "const llm = new ChatVertexAI({\n", + " model: \"gemini-1.5-pro\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // For web, authOptions.credentials\n", + " // authOptions: { ... }\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessageChunk {\n", + " \"content\": \"J'adore programmer. \\n\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"tool_call_chunks\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 20,\n", + " \"output_tokens\": 7,\n", + " \"total_tokens\": 27\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessageChunk {\n", - " \"content\": \"Ich liebe das Programmieren. \\n\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"tool_call_chunks\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 15,\n", - " \"output_tokens\": 9,\n", - " \"total_tokens\": 24\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore programmer. \n", + "\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessageChunk {\n", + " \"content\": \"Ich liebe das Programmieren. \\n\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"tool_call_chunks\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 15,\n", + " \"output_tokens\": 9,\n", + " \"total_tokens\": 24\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatVertexAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatVertexAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/groq.ipynb b/docs/core_docs/docs/integrations/chat/groq.ipynb index b6a58553ad3b..279fa1a74769 100644 --- a/docs/core_docs/docs/integrations/chat/groq.ipynb +++ b/docs/core_docs/docs/integrations/chat/groq.ipynb @@ -1,312 +1,312 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Groq\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatGroq\n", - "\n", - "[Groq](https://groq.com/) is a company that offers fast AI inference, powered by LPU™ AI inference technology which delivers fast, affordable, and energy efficient AI.\n", - "\n", - "This will help you getting started with ChatGroq [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/groq) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatGroq](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html) | [`@langchain/groq`](https://www.npmjs.com/package/@langchain/groq) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/groq?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/groq?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", - "\n", - "## Setup\n", - "\n", - "To access ChatGroq models you'll need to create a Groq account, get an API key, and install the `@langchain/groq` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "In order to use the Groq API you'll need an API key. You can sign up for a Groq account and create an API key [here](https://wow.groq.com/).\n", - "Then, you can set the API key as an environment variable in your terminal:\n", - "\n", - "```bash\n", - "export GROQ_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain ChatGroq integration lives in the `@langchain/groq` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/groq @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatGroq } from \"@langchain/groq\" \n", - "\n", - "const llm = new ChatGroq({\n", - " model: \"mixtral-8x7b-32768\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"I enjoy programming. (The French translation is: \\\"J'aime programmer.\\\")\\n\\nNote: I chose to translate \\\"I love programming\\\" as \\\"J'aime programmer\\\" instead of \\\"Je suis amoureux de programmer\\\" because the latter has a romantic connotation that is not present in the original English sentence.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 73,\n", - " \"promptTokens\": 31,\n", - " \"totalTokens\": 104\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " {\n", - " role: \"system\",\n", - " content: \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " },\n", - " { role: \"user\", content: \"I love programming.\" },\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Groq\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "I enjoy programming. (The French translation is: \"J'aime programmer.\")\n", - "\n", - "Note: I chose to translate \"I love programming\" as \"J'aime programmer\" instead of \"Je suis amoureux de programmer\" because the latter has a romantic connotation that is not present in the original English sentence.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "ce0414fe", - "metadata": {}, - "source": [ - "## Json invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "3f0a7a2a", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatGroq\n", + "\n", + "[Groq](https://groq.com/) is a company that offers fast AI inference, powered by LPU™ AI inference technology which delivers fast, affordable, and energy efficient AI.\n", + "\n", + "This will help you getting started with ChatGroq [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/groq) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatGroq](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html) | [`@langchain/groq`](https://www.npmjs.com/package/@langchain/groq) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/groq?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/groq?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access ChatGroq models you'll need to create a Groq account, get an API key, and install the `@langchain/groq` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "In order to use the Groq API you'll need an API key. You can sign up for a Groq account and create an API key [here](https://wow.groq.com/).\n", + "Then, you can set the API key as an environment variable in your terminal:\n", + "\n", + "```bash\n", + "export GROQ_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatGroq integration lives in the `@langchain/groq` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/groq @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " aiInvokeMsgContent: '{\\n\"result\": 6\\n}',\n", - " aiBindMsg: '{\\n\"result\": 6\\n}'\n", - "}\n" - ] - } - ], - "source": [ - "const messages = [\n", - " {\n", - " role: \"system\",\n", - " content: \"You are a math tutor that handles math exercises and makes output in json in format { result: number }.\",\n", - " },\n", - " { role: \"user\", content: \"2 + 2 * 2\" },\n", - "];\n", - "\n", - "const aiInvokeMsg = await llm.invoke(messages, { response_format: { type: \"json_object\" } });\n", - "\n", - "// if you want not to pass response_format in every invoke, you can bind it to the instance\n", - "const llmWithResponseFormat = llm.bind({ response_format: { type: \"json_object\" } });\n", - "const aiBindMsg = await llmWithResponseFormat.invoke(messages);\n", - "\n", - "// they are the same\n", - "console.log({ aiInvokeMsgContent: aiInvokeMsg.content, aiBindMsg: aiBindMsg.content });" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatGroq } from \"@langchain/groq\" \n", + "\n", + "const llm = new ChatGroq({\n", + " model: \"mixtral-8x7b-32768\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"I enjoy programming. (The French translation is: \\\"J'aime programmer.\\\")\\n\\nNote: I chose to translate \\\"I love programming\\\" as \\\"J'aime programmer\\\" instead of \\\"Je suis amoureux de programmer\\\" because the latter has a romantic connotation that is not present in the original English sentence.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 73,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 104\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " {\n", + " role: \"system\",\n", + " content: \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " },\n", + " { role: \"user\", content: \"I love programming.\" },\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I enjoy programming. (The French translation is: \"J'aime programmer.\")\n", + "\n", + "Note: I chose to translate \"I love programming\" as \"J'aime programmer\" instead of \"Je suis amoureux de programmer\" because the latter has a romantic connotation that is not present in the original English sentence.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"That's great! I can help you translate English phrases related to programming into German.\\n\\n\\\"I love programming\\\" can be translated to German as \\\"Ich liebe Programmieren\\\".\\n\\nHere are some more programming-related phrases translated into German:\\n\\n* \\\"Programming language\\\" = \\\"Programmiersprache\\\"\\n* \\\"Code\\\" = \\\"Code\\\"\\n* \\\"Variable\\\" = \\\"Variable\\\"\\n* \\\"Function\\\" = \\\"Funktion\\\"\\n* \\\"Array\\\" = \\\"Array\\\"\\n* \\\"Object-oriented programming\\\" = \\\"Objektorientierte Programmierung\\\"\\n* \\\"Algorithm\\\" = \\\"Algorithmus\\\"\\n* \\\"Data structure\\\" = \\\"Datenstruktur\\\"\\n* \\\"Debugging\\\" = \\\"Debuggen\\\"\\n* \\\"Compile\\\" = \\\"Kompilieren\\\"\\n* \\\"Link\\\" = \\\"Verknüpfen\\\"\\n* \\\"Run\\\" = \\\"Ausführen\\\"\\n* \\\"Test\\\" = \\\"Testen\\\"\\n* \\\"Deploy\\\" = \\\"Bereitstellen\\\"\\n* \\\"Version control\\\" = \\\"Versionskontrolle\\\"\\n* \\\"Open source\\\" = \\\"Open Source\\\"\\n* \\\"Software development\\\" = \\\"Softwareentwicklung\\\"\\n* \\\"Agile methodology\\\" = \\\"Agile Methodik\\\"\\n* \\\"DevOps\\\" = \\\"DevOps\\\"\\n* \\\"Cloud computing\\\" = \\\"Cloud Computing\\\"\\n\\nI hope this helps! Let me know if you have any other questions or if you need further translations.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 327,\n", - " \"promptTokens\": 25,\n", - " \"totalTokens\": 352\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - "}\n" - ] + "cell_type": "markdown", + "id": "ce0414fe", + "metadata": {}, + "source": [ + "## Json invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3f0a7a2a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " aiInvokeMsgContent: '{\\n\"result\": 6\\n}',\n", + " aiBindMsg: '{\\n\"result\": 6\\n}'\n", + "}\n" + ] + } + ], + "source": [ + "const messages = [\n", + " {\n", + " role: \"system\",\n", + " content: \"You are a math tutor that handles math exercises and makes output in json in format { result: number }.\",\n", + " },\n", + " { role: \"user\", content: \"2 + 2 * 2\" },\n", + "];\n", + "\n", + "const aiInvokeMsg = await llm.invoke(messages, { response_format: { type: \"json_object\" } });\n", + "\n", + "// if you want not to pass response_format in every invoke, you can bind it to the instance\n", + "const llmWithResponseFormat = llm.bind({ response_format: { type: \"json_object\" } });\n", + "const aiBindMsg = await llmWithResponseFormat.invoke(messages);\n", + "\n", + "// they are the same\n", + "console.log({ aiInvokeMsgContent: aiInvokeMsg.content, aiBindMsg: aiBindMsg.content });" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"That's great! I can help you translate English phrases related to programming into German.\\n\\n\\\"I love programming\\\" can be translated to German as \\\"Ich liebe Programmieren\\\".\\n\\nHere are some more programming-related phrases translated into German:\\n\\n* \\\"Programming language\\\" = \\\"Programmiersprache\\\"\\n* \\\"Code\\\" = \\\"Code\\\"\\n* \\\"Variable\\\" = \\\"Variable\\\"\\n* \\\"Function\\\" = \\\"Funktion\\\"\\n* \\\"Array\\\" = \\\"Array\\\"\\n* \\\"Object-oriented programming\\\" = \\\"Objektorientierte Programmierung\\\"\\n* \\\"Algorithm\\\" = \\\"Algorithmus\\\"\\n* \\\"Data structure\\\" = \\\"Datenstruktur\\\"\\n* \\\"Debugging\\\" = \\\"Debuggen\\\"\\n* \\\"Compile\\\" = \\\"Kompilieren\\\"\\n* \\\"Link\\\" = \\\"Verknüpfen\\\"\\n* \\\"Run\\\" = \\\"Ausführen\\\"\\n* \\\"Test\\\" = \\\"Testen\\\"\\n* \\\"Deploy\\\" = \\\"Bereitstellen\\\"\\n* \\\"Version control\\\" = \\\"Versionskontrolle\\\"\\n* \\\"Open source\\\" = \\\"Open Source\\\"\\n* \\\"Software development\\\" = \\\"Softwareentwicklung\\\"\\n* \\\"Agile methodology\\\" = \\\"Agile Methodik\\\"\\n* \\\"DevOps\\\" = \\\"DevOps\\\"\\n* \\\"Cloud computing\\\" = \\\"Cloud Computing\\\"\\n\\nI hope this helps! Let me know if you have any other questions or if you need further translations.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 327,\n", + " \"promptTokens\": 25,\n", + " \"totalTokens\": 352\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatGroq features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatGroq features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/ibm.ipynb b/docs/core_docs/docs/integrations/chat/ibm.ipynb index 5c86adad492f..46cb7bf92d74 100644 --- a/docs/core_docs/docs/integrations/chat/ibm.ipynb +++ b/docs/core_docs/docs/integrations/chat/ibm.ipynb @@ -1,592 +1,592 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: IBM watsonx.ai\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# IBM watsonx.ai\n", - "\n", - "This will help you getting started with IBM watsonx.ai [chat models](/docs/concepts/#chat-models). For detailed documentation of all `IBM watsonx.ai` features and configurations head to the [IBM watsonx.ai](https://api.js.langchain.com/classes/_langchain_community.chat_models_ibm.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/ibm_watsonx/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`ChatWatsonx`](https://api.js.langchain.com/classes/_langchain_community.chat_models_ibm.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "To access IBM watsonx.ai models you'll need to create a/an IBM watsonx.ai account, get an API key, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "\n", - "Head to [IBM Cloud](https://cloud.ibm.com/login) to sign up to IBM watsonx.ai and generate an API key or provide any other authentication form as presented below.\n", - "\n", - "#### IAM authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=iam\n", - "export WATSONX_AI_APIKEY=\n", - "```\n", - "\n", - "#### Bearer token authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=bearertoken\n", - "export WATSONX_AI_BEARER_TOKEN=\n", - "```\n", - "\n", - "#### CP4D authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=cp4d\n", - "export WATSONX_AI_USERNAME=\n", - "export WATSONX_AI_PASSWORD=\n", - "export WATSONX_AI_URL=\n", - "```\n", - "\n", - "Once these are places in your enviromental variables and object is initialized authentication will proceed automatically.\n", - "\n", - "Authentication can also be accomplished by passing these values as parameters to a new instance.\n", - "\n", - "## IAM authentication\n", - "\n", - "```typescript\n", - "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"iam\",\n", - " watsonxAIApikey: \"\",\n", - "};\n", - "const instance = new WatsonxLLM(props);\n", - "```\n", - "\n", - "## Bearer token authentication\n", - "\n", - "```typescript\n", - "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"bearertoken\",\n", - " watsonxAIBearerToken: \"\",\n", - "};\n", - "const instance = new WatsonxLLM(props);\n", - "```\n", - "\n", - "### CP4D authentication\n", - "\n", - "```typescript\n", - "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"cp4d\",\n", - " watsonxAIUsername: \"\",\n", - " watsonxAIPassword: \"\",\n", - " watsonxAIUrl: \"\",\n", - "};\n", - "const instance = new WatsonxLLM(props);\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain IBM watsonx.ai integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " __package_name__ @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatWatsonx } from \"@langchain/community/chat_models/ibm\";\n", - "const props = {\n", - " maxTokens: 200,\n", - " temperature: 0.5\n", - "};\n", - "\n", - "const instance = new ChatWatsonx({\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: process.env.API_URL,\n", - " projectId: \"\",\n", - " spaceId: \"\",\n", - " model: \"\",\n", - " ...props\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "30cb3968", - "metadata": {}, - "source": [ - "Note:\n", - "\n", - "- You must provide `spaceId` or `projectId` in order to proceed.\n", - "- Depending on the region of your provisioned service instance, use correct serviceUrl." - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chat-c5341b2062dc42f091e5ae2558e905e3\",\n", - " \"content\": \" J'adore la programmation.\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": []\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completion_tokens\": 10,\n", - " \"prompt_tokens\": 28,\n", - " \"total_tokens\": 38\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 28,\n", - " \"output_tokens\": 10,\n", - " \"total_tokens\": 38\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await instance.invoke([{\n", - " role: \"system\",\n", - " content: \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - "},\n", - "{\n", - " role: \"user\",\n", - " content: \"I love programming.\"\n", - "}]);\n", - "console.log(aiMsg)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: IBM watsonx.ai\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# IBM watsonx.ai\n", + "\n", + "This will help you getting started with IBM watsonx.ai [chat models](/docs/concepts/chat_models). For detailed documentation of all `IBM watsonx.ai` features and configurations head to the [IBM watsonx.ai](https://api.js.langchain.com/classes/_langchain_community.chat_models_ibm.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/ibm_watsonx/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`ChatWatsonx`](https://api.js.langchain.com/classes/_langchain_community.chat_models_ibm.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "To access IBM watsonx.ai models you'll need to create a/an IBM watsonx.ai account, get an API key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "\n", + "Head to [IBM Cloud](https://cloud.ibm.com/login) to sign up to IBM watsonx.ai and generate an API key or provide any other authentication form as presented below.\n", + "\n", + "#### IAM authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=iam\n", + "export WATSONX_AI_APIKEY=\n", + "```\n", + "\n", + "#### Bearer token authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=bearertoken\n", + "export WATSONX_AI_BEARER_TOKEN=\n", + "```\n", + "\n", + "#### CP4D authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=cp4d\n", + "export WATSONX_AI_USERNAME=\n", + "export WATSONX_AI_PASSWORD=\n", + "export WATSONX_AI_URL=\n", + "```\n", + "\n", + "Once these are places in your enviromental variables and object is initialized authentication will proceed automatically.\n", + "\n", + "Authentication can also be accomplished by passing these values as parameters to a new instance.\n", + "\n", + "## IAM authentication\n", + "\n", + "```typescript\n", + "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"iam\",\n", + " watsonxAIApikey: \"\",\n", + "};\n", + "const instance = new WatsonxLLM(props);\n", + "```\n", + "\n", + "## Bearer token authentication\n", + "\n", + "```typescript\n", + "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"bearertoken\",\n", + " watsonxAIBearerToken: \"\",\n", + "};\n", + "const instance = new WatsonxLLM(props);\n", + "```\n", + "\n", + "### CP4D authentication\n", + "\n", + "```typescript\n", + "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"cp4d\",\n", + " watsonxAIUsername: \"\",\n", + " watsonxAIPassword: \"\",\n", + " watsonxAIUrl: \"\",\n", + "};\n", + "const instance = new WatsonxLLM(props);\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain IBM watsonx.ai integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " __package_name__ @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chat-c5c2c08d3c984254acc48225c39c6a08\",\n", - " \"content\": \" Ich liebe Programmieren.\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": []\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completion_tokens\": 8,\n", - " \"prompt_tokens\": 22,\n", - " \"total_tokens\": 30\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 22,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 30\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "const chain = prompt.pipe(instance);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - " )" - ] - }, - { - "cell_type": "markdown", - "id": "2896aae5", - "metadata": {}, - "source": [ - "## Streaming the Model output" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "cd21e356", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " The\n", - " Moon\n", - " is\n", - " Earth\n", - "'\n", - "s\n", - " only\n", - " natural\n", - " satellite\n", - " and\n", - " the\n", - " fifth\n", - " largest\n", - " satellite\n", - " in\n", - " the\n", - " Solar\n", - " System\n", - ".\n", - " It\n", - " or\n", - "bits\n", - " Earth\n", - " every\n", - " \n", - "2\n", - "7\n", - ".\n", - "3\n", - " days\n", - " and\n", - " rot\n", - "ates\n", - " on\n", - " its\n", - " axis\n", - " in\n", - " the\n", - " same\n", - " amount\n", - " of\n", - " time\n", - ",\n", - " which\n", - " is\n", - " why\n", - " we\n", - " always\n", - " see\n", - " the\n", - " same\n", - " side\n", - " of\n", - " it\n", - ".\n", - " The\n", - " Moon\n", - "'\n", - "s\n", - " phases\n", - " change\n", - " as\n", - " it\n", - " or\n", - "bits\n", - " Earth\n", - ",\n", - " going\n", - " through\n", - " cycles\n", - " of\n", - " new\n", - ",\n", - " c\n", - "res\n", - "cent\n", - ",\n", - " half\n", - ",\n", - " g\n", - "ib\n", - "b\n", - "ous\n", - ",\n", - " and\n", - " full\n", - " phases\n", - ".\n", - " Its\n", - " gravity\n", - " influences\n", - " Earth\n", - "'\n", - "s\n", - " t\n", - "ides\n", - " and\n", - " stabil\n", - "izes\n", - " our\n" - ] - } - ], - "source": [ - "import { HumanMessage, SystemMessage } from \"@langchain/core/messages\";\n", - "\n", - "const messages = [\n", - " new SystemMessage('You are a helpful assistant which telling short-info about provided topic.'),\n", - " new HumanMessage(\"moon\")\n", - "]\n", - "const stream = await instance.stream(messages);\n", - "for await(const chunk of stream){\n", - " console.log(chunk)\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "65ed0609", - "metadata": {}, - "source": [ - "## Tool calling" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "f32f8cb0", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": null, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatWatsonx } from \"@langchain/community/chat_models/ibm\";\n", + "const props = {\n", + " maxTokens: 200,\n", + " temperature: 0.5\n", + "};\n", + "\n", + "const instance = new ChatWatsonx({\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: process.env.API_URL,\n", + " projectId: \"\",\n", + " spaceId: \"\",\n", + " model: \"\",\n", + " ...props\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "30cb3968", + "metadata": {}, + "source": [ + "Note:\n", + "\n", + "- You must provide `spaceId` or `projectId` in order to proceed.\n", + "- Depending on the region of your provisioned service instance, use correct serviceUrl." + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chat-c5341b2062dc42f091e5ae2558e905e3\",\n", + " \"content\": \" J'adore la programmation.\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": []\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completion_tokens\": 10,\n", + " \"prompt_tokens\": 28,\n", + " \"total_tokens\": 38\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 28,\n", + " \"output_tokens\": 10,\n", + " \"total_tokens\": 38\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await instance.invoke([{\n", + " role: \"system\",\n", + " content: \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + "},\n", + "{\n", + " role: \"user\",\n", + " content: \"I love programming.\"\n", + "}]);\n", + "console.log(aiMsg)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chat-d2214d0bdb794483a213b3211cf0d819\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"chatcmpl-tool-257f3d39532141b89178c2120f81f0cb\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completion_tokens\": 38,\n", - " \"prompt_tokens\": 177,\n", - " \"total_tokens\": 215\n", - " },\n", - " \"finish_reason\": \"tool_calls\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"calculator\",\n", - " \"args\": {\n", - " \"number1\": 3,\n", - " \"number2\": 12,\n", - " \"operation\": \"multiply\"\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"chatcmpl-tool-257f3d39532141b89178c2120f81f0cb\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 177,\n", - " \"output_tokens\": 38,\n", - " \"total_tokens\": 215\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chat-c5c2c08d3c984254acc48225c39c6a08\",\n", + " \"content\": \" Ich liebe Programmieren.\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": []\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completion_tokens\": 8,\n", + " \"prompt_tokens\": 22,\n", + " \"total_tokens\": 30\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 22,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 30\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "const chain = prompt.pipe(instance);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "2896aae5", + "metadata": {}, + "source": [ + "## Streaming the Model output" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "cd21e356", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " The\n", + " Moon\n", + " is\n", + " Earth\n", + "'\n", + "s\n", + " only\n", + " natural\n", + " satellite\n", + " and\n", + " the\n", + " fifth\n", + " largest\n", + " satellite\n", + " in\n", + " the\n", + " Solar\n", + " System\n", + ".\n", + " It\n", + " or\n", + "bits\n", + " Earth\n", + " every\n", + " \n", + "2\n", + "7\n", + ".\n", + "3\n", + " days\n", + " and\n", + " rot\n", + "ates\n", + " on\n", + " its\n", + " axis\n", + " in\n", + " the\n", + " same\n", + " amount\n", + " of\n", + " time\n", + ",\n", + " which\n", + " is\n", + " why\n", + " we\n", + " always\n", + " see\n", + " the\n", + " same\n", + " side\n", + " of\n", + " it\n", + ".\n", + " The\n", + " Moon\n", + "'\n", + "s\n", + " phases\n", + " change\n", + " as\n", + " it\n", + " or\n", + "bits\n", + " Earth\n", + ",\n", + " going\n", + " through\n", + " cycles\n", + " of\n", + " new\n", + ",\n", + " c\n", + "res\n", + "cent\n", + ",\n", + " half\n", + ",\n", + " g\n", + "ib\n", + "b\n", + "ous\n", + ",\n", + " and\n", + " full\n", + " phases\n", + ".\n", + " Its\n", + " gravity\n", + " influences\n", + " Earth\n", + "'\n", + "s\n", + " t\n", + "ides\n", + " and\n", + " stabil\n", + "izes\n", + " our\n" + ] + } + ], + "source": [ + "import { HumanMessage, SystemMessage } from \"@langchain/core/messages\";\n", + "\n", + "const messages = [\n", + " new SystemMessage('You are a helpful assistant which telling short-info about provided topic.'),\n", + " new HumanMessage(\"moon\")\n", + "]\n", + "const stream = await instance.stream(messages);\n", + "for await(const chunk of stream){\n", + " console.log(chunk)\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "65ed0609", + "metadata": {}, + "source": [ + "## Tool calling" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f32f8cb0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chat-d2214d0bdb794483a213b3211cf0d819\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"chatcmpl-tool-257f3d39532141b89178c2120f81f0cb\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completion_tokens\": 38,\n", + " \"prompt_tokens\": 177,\n", + " \"total_tokens\": 215\n", + " },\n", + " \"finish_reason\": \"tool_calls\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"calculator\",\n", + " \"args\": {\n", + " \"number1\": 3,\n", + " \"number2\": 12,\n", + " \"operation\": \"multiply\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"chatcmpl-tool-257f3d39532141b89178c2120f81f0cb\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 177,\n", + " \"output_tokens\": 38,\n", + " \"total_tokens\": 215\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { z } from \"zod\";\n", + "\n", + "const calculatorSchema = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + " });\n", + " \n", + "const calculatorTool = tool(\n", + "async ({ operation, number1, number2 }) => {\n", + " if (operation === \"add\") {\n", + " return `${number1 + number2}`;\n", + " } else if (operation === \"subtract\") {\n", + " return `${number1 - number2}`;\n", + " } else if (operation === \"multiply\") {\n", + " return `${number1 * number2}`;\n", + " } else if (operation === \"divide\") {\n", + " return `${number1 / number2}`;\n", + " } else {\n", + " throw new Error(\"Invalid operation.\");\n", + " }\n", + "},\n", + "{\n", + " name: \"calculator\",\n", + " description: \"Can perform mathematical operations.\",\n", + " schema: calculatorSchema,\n", + "}\n", + ");\n", + "\n", + "const instanceWithTools = instance.bindTools([calculatorTool]);\n", + "\n", + "const res = await instanceWithTools.invoke(\"What is 3 * 12\");\n", + "console.log(res)" + ] + }, + { + "cell_type": "markdown", + "id": "6339db97", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `IBM watsonx.ai` features and configurations head to the API reference: [API docs](https://api.js.langchain.com/modules/_langchain_community.embeddings_ibm.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "JavaScript (Node.js)", + "language": "javascript", + "name": "javascript" + }, + "language_info": { + "file_extension": ".js", + "mimetype": "application/javascript", + "name": "javascript", + "version": "20.17.0" } - ], - "source": [ - "import { tool } from \"@langchain/core/tools\";\n", - "import { z } from \"zod\";\n", - "\n", - "const calculatorSchema = z.object({\n", - " operation: z\n", - " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", - " .describe(\"The type of operation to execute.\"),\n", - " number1: z.number().describe(\"The first number to operate on.\"),\n", - " number2: z.number().describe(\"The second number to operate on.\"),\n", - " });\n", - " \n", - "const calculatorTool = tool(\n", - "async ({ operation, number1, number2 }) => {\n", - " if (operation === \"add\") {\n", - " return `${number1 + number2}`;\n", - " } else if (operation === \"subtract\") {\n", - " return `${number1 - number2}`;\n", - " } else if (operation === \"multiply\") {\n", - " return `${number1 * number2}`;\n", - " } else if (operation === \"divide\") {\n", - " return `${number1 / number2}`;\n", - " } else {\n", - " throw new Error(\"Invalid operation.\");\n", - " }\n", - "},\n", - "{\n", - " name: \"calculator\",\n", - " description: \"Can perform mathematical operations.\",\n", - " schema: calculatorSchema,\n", - "}\n", - ");\n", - "\n", - "const instanceWithTools = instance.bindTools([calculatorTool]);\n", - "\n", - "const res = await instanceWithTools.invoke(\"What is 3 * 12\");\n", - "console.log(res)" - ] - }, - { - "cell_type": "markdown", - "id": "6339db97", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `IBM watsonx.ai` features and configurations head to the API reference: [API docs](https://api.js.langchain.com/modules/_langchain_community.embeddings_ibm.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "JavaScript (Node.js)", - "language": "javascript", - "name": "javascript" }, - "language_info": { - "file_extension": ".js", - "mimetype": "application/javascript", - "name": "javascript", - "version": "20.17.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/index.mdx b/docs/core_docs/docs/integrations/chat/index.mdx index 0b4fa43125a5..a94f1ccd6076 100644 --- a/docs/core_docs/docs/integrations/chat/index.mdx +++ b/docs/core_docs/docs/integrations/chat/index.mdx @@ -6,7 +6,7 @@ hide_table_of_contents: true # Chat models -[Chat models](/docs/concepts/#chat-models) are language models that use a sequence of [messages](/docs/concepts/#messages) as inputs and return messages as outputs (as opposed to using plain text). These are generally newer models. +[Chat models](/docs/concepts/chat_models) are language models that use a sequence of [messages](/docs/concepts/messages) as inputs and return messages as outputs (as opposed to using plain text). These are generally newer models. :::info If you'd like to write your own chat model, see [this how-to](/docs/how_to/custom_chat). If you'd like to contribute an integration, see [Contributing integrations](/docs/contributing). diff --git a/docs/core_docs/docs/integrations/chat/llama_cpp.mdx b/docs/core_docs/docs/integrations/chat/llama_cpp.mdx index cd71225ef640..c0190f3041f6 100644 --- a/docs/core_docs/docs/integrations/chat/llama_cpp.mdx +++ b/docs/core_docs/docs/integrations/chat/llama_cpp.mdx @@ -79,5 +79,5 @@ import StreamInvokeExample from "@examples/models/chat/integration_llama_cpp_str ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/minimax.mdx b/docs/core_docs/docs/integrations/chat/minimax.mdx index 8f2cc3224722..658ba25d2f9a 100644 --- a/docs/core_docs/docs/integrations/chat/minimax.mdx +++ b/docs/core_docs/docs/integrations/chat/minimax.mdx @@ -76,5 +76,5 @@ import MinimaxPlugins from "@examples/models/chat/minimax_plugins.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/mistral.ipynb b/docs/core_docs/docs/integrations/chat/mistral.ipynb index 422f80205fb8..ccb2c6590f90 100644 --- a/docs/core_docs/docs/integrations/chat/mistral.ipynb +++ b/docs/core_docs/docs/integrations/chat/mistral.ipynb @@ -1,357 +1,357 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: MistralAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatMistralAI\n", - "\n", - "[Mistral AI](https://mistral.ai/) is a platform that offers hosting for their powerful [open source models](https://docs.mistral.ai/getting-started/models/).\n", - "\n", - "This will help you getting started with ChatMistralAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatMistralAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/mistralai) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatMistralAI](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html) | [`@langchain/mistralai`](https://www.npmjs.com/package/@langchain/mistralai) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "To access Mistral AI models you'll need to create a Mistral AI account, get an API key, and install the `@langchain/mistralai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head [here](https://console.mistral.ai/) to sign up to Mistral AI and generate an API key. Once you've done this set the `MISTRAL_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export MISTRAL_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain ChatMistralAI integration lives in the `@langchain/mistralai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - "@langchain/mistralai @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatMistralAI } from \"@langchain/mistralai\" \n", - "\n", - "const llm = new ChatMistralAI({\n", - " model: \"mistral-large-latest\",\n", - " temperature: 0,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation\n", - "\n", - "When sending chat messages to mistral, there are a few requirements to follow:\n", - "\n", - "- The first message can _*not*_ be an assistant (ai) message.\n", - "- Messages _*must*_ alternate between user and assistant (ai) messages.\n", - "- Messages can _*not*_ end with an assistant (ai) or system message." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"J'adore la programmation.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 9,\n", - " \"promptTokens\": 27,\n", - " \"totalTokens\": 36\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 27,\n", - " \"output_tokens\": 9,\n", - " \"total_tokens\": 36\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: MistralAI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatMistralAI\n", + "\n", + "[Mistral AI](https://mistral.ai/) is a platform that offers hosting for their powerful [open source models](https://docs.mistral.ai/getting-started/models/).\n", + "\n", + "This will help you getting started with ChatMistralAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatMistralAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/mistralai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatMistralAI](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html) | [`@langchain/mistralai`](https://www.npmjs.com/package/@langchain/mistralai) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "To access Mistral AI models you'll need to create a Mistral AI account, get an API key, and install the `@langchain/mistralai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head [here](https://console.mistral.ai/) to sign up to Mistral AI and generate an API key. Once you've done this set the `MISTRAL_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export MISTRAL_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatMistralAI integration lives in the `@langchain/mistralai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + "@langchain/mistralai @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"Ich liebe Programmieren.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 7,\n", - " \"promptTokens\": 21,\n", - " \"totalTokens\": 28\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 21,\n", - " \"output_tokens\": 7,\n", - " \"total_tokens\": 28\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", - "metadata": {}, - "source": [ - "## Tool calling\n", - "\n", - "Mistral's API supports [tool calling](/docs/concepts#functiontool-calling) for a subset of their models. You can see which models support tool calling [on this page](https://docs.mistral.ai/capabilities/function_calling/).\n", - "\n", - "The examples below demonstrates how to use it:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "98d9034c", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatMistralAI } from \"@langchain/mistralai\" \n", + "\n", + "const llm = new ChatMistralAI({\n", + " model: \"mistral-large-latest\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "When sending chat messages to mistral, there are a few requirements to follow:\n", + "\n", + "- The first message can _*not*_ be an assistant (ai) message.\n", + "- Messages _*must*_ alternate between user and assistant (ai) messages.\n", + "- Messages can _*not*_ end with an assistant (ai) or system message." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 9,\n", + " \"promptTokens\": 27,\n", + " \"totalTokens\": 36\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 27,\n", + " \"output_tokens\": 9,\n", + " \"total_tokens\": 36\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'calculator',\n", - " args: { operation: 'add', number1: 2, number2: 2 },\n", - " type: 'tool_call',\n", - " id: 'DD9diCL1W'\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Ich liebe Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 7,\n", + " \"promptTokens\": 21,\n", + " \"totalTokens\": 28\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 21,\n", + " \"output_tokens\": 7,\n", + " \"total_tokens\": 28\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "Mistral's API supports [tool calling](/docs/concepts/tool_calling) for a subset of their models. You can see which models support tool calling [on this page](https://docs.mistral.ai/capabilities/function_calling/).\n", + "\n", + "The examples below demonstrates how to use it:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "98d9034c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'calculator',\n", + " args: { operation: 'add', number1: 2, number2: 2 },\n", + " type: 'tool_call',\n", + " id: 'DD9diCL1W'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { ChatMistralAI } from \"@langchain/mistralai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const calculatorSchema = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + "});\n", + "\n", + "const calculatorTool = tool((input) => {\n", + " return JSON.stringify(input);\n", + "}, {\n", + " name: \"calculator\",\n", + " description: \"A simple calculator tool\",\n", + " schema: calculatorSchema,\n", + "});\n", + "\n", + "// Bind the tool to the model\n", + "const modelWithTool = new ChatMistralAI({\n", + " model: \"mistral-large-latest\",\n", + "}).bindTools([calculatorTool]);\n", + "\n", + "\n", + "const calcToolPrompt = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant who always needs to use a calculator.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "// Chain your prompt, model, and output parser together\n", + "const chainWithCalcTool = calcToolPrompt.pipe(modelWithTool);\n", + "\n", + "const calcToolRes = await chainWithCalcTool.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});\n", + "console.log(calcToolRes.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatMistralAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatMistralAI } from \"@langchain/mistralai\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "\n", - "const calculatorSchema = z.object({\n", - " operation: z\n", - " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", - " .describe(\"The type of operation to execute.\"),\n", - " number1: z.number().describe(\"The first number to operate on.\"),\n", - " number2: z.number().describe(\"The second number to operate on.\"),\n", - "});\n", - "\n", - "const calculatorTool = tool((input) => {\n", - " return JSON.stringify(input);\n", - "}, {\n", - " name: \"calculator\",\n", - " description: \"A simple calculator tool\",\n", - " schema: calculatorSchema,\n", - "});\n", - "\n", - "// Bind the tool to the model\n", - "const modelWithTool = new ChatMistralAI({\n", - " model: \"mistral-large-latest\",\n", - "}).bindTools([calculatorTool]);\n", - "\n", - "\n", - "const calcToolPrompt = ChatPromptTemplate.fromMessages([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant who always needs to use a calculator.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "// Chain your prompt, model, and output parser together\n", - "const chainWithCalcTool = calcToolPrompt.pipe(modelWithTool);\n", - "\n", - "const calcToolRes = await chainWithCalcTool.invoke({\n", - " input: \"What is 2 + 2?\",\n", - "});\n", - "console.log(calcToolRes.tool_calls);" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatMistralAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/moonshot.mdx b/docs/core_docs/docs/integrations/chat/moonshot.mdx index 1fb914893f4c..7f1b5e399244 100644 --- a/docs/core_docs/docs/integrations/chat/moonshot.mdx +++ b/docs/core_docs/docs/integrations/chat/moonshot.mdx @@ -36,5 +36,5 @@ import Moonshot from "@examples/models/chat/integration_moonshot.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/ni_bittensor.mdx b/docs/core_docs/docs/integrations/chat/ni_bittensor.mdx index 3ce1be4ff745..7fa0b16af923 100644 --- a/docs/core_docs/docs/integrations/chat/ni_bittensor.mdx +++ b/docs/core_docs/docs/integrations/chat/ni_bittensor.mdx @@ -30,5 +30,5 @@ console.log({ res }); ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/ollama.ipynb b/docs/core_docs/docs/integrations/chat/ollama.ipynb index 6a1fdb36f40d..12ea4b4e35c8 100644 --- a/docs/core_docs/docs/integrations/chat/ollama.ipynb +++ b/docs/core_docs/docs/integrations/chat/ollama.ipynb @@ -1,563 +1,563 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Ollama\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatOllama\n", - "\n", - "[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 3.1, locally.\n", - "\n", - "Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage.\n", - "\n", - "This guide will help you getting started with `ChatOllama` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatOllama` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "Ollama allows you to use a wide range of models with different capabilities. Some of the fields in the details table below only apply to a subset of models that Ollama offers.\n", - "\n", - "For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.com/search) and search by tag.\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/ollama) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatOllama](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html) | [`@langchain/ollama`](https://www.npmjs.com/package/@langchain/ollama) | ✅ | beta | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/ollama?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/ollama?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", - "\n", - "## Setup\n", - "\n", - "Follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance. Then, download the `@langchain/ollama` package.\n", - "\n", - "### Credentials\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain ChatOllama integration lives in the `@langchain/ollama` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/ollama @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOllama } from \"@langchain/ollama\"\n", - "\n", - "const llm = new ChatOllama({\n", - " model: \"llama3\",\n", - " temperature: 0,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"Je adore le programmation.\\n\\n(Note: \\\"programmation\\\" is the feminine form of the noun in French, but if you want to use the masculine form, it would be \\\"le programme\\\" instead.)\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"model\": \"llama3\",\n", - " \"created_at\": \"2024-08-01T16:59:17.359302Z\",\n", - " \"done_reason\": \"stop\",\n", - " \"done\": true,\n", - " \"total_duration\": 6399311167,\n", - " \"load_duration\": 5575776417,\n", - " \"prompt_eval_count\": 35,\n", - " \"prompt_eval_duration\": 110053000,\n", - " \"eval_count\": 43,\n", - " \"eval_duration\": 711744000\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 35,\n", - " \"output_tokens\": 43,\n", - " \"total_tokens\": 78\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Ollama\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Je adore le programmation.\n", - "\n", - "(Note: \"programmation\" is the feminine form of the noun in French, but if you want to use the masculine form, it would be \"le programme\" instead.)\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatOllama\n", + "\n", + "[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 3.1, locally.\n", + "\n", + "Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage.\n", + "\n", + "This guide will help you getting started with `ChatOllama` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatOllama` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "Ollama allows you to use a wide range of models with different capabilities. Some of the fields in the details table below only apply to a subset of models that Ollama offers.\n", + "\n", + "For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.com/search) and search by tag.\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/ollama) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatOllama](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html) | [`@langchain/ollama`](https://www.npmjs.com/package/@langchain/ollama) | ✅ | beta | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/ollama?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/ollama?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "Follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance. Then, download the `@langchain/ollama` package.\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatOllama integration lives in the `@langchain/ollama` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/ollama @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"Ich liebe Programmieren!\\n\\n(Note: \\\"Ich liebe\\\" means \\\"I love\\\", \\\"Programmieren\\\" is the verb for \\\"programming\\\")\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"model\": \"llama3\",\n", - " \"created_at\": \"2024-08-01T16:59:18.088423Z\",\n", - " \"done_reason\": \"stop\",\n", - " \"done\": true,\n", - " \"total_duration\": 585146125,\n", - " \"load_duration\": 27557166,\n", - " \"prompt_eval_count\": 30,\n", - " \"prompt_eval_duration\": 74241000,\n", - " \"eval_count\": 29,\n", - " \"eval_duration\": 481195000\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 30,\n", - " \"output_tokens\": 29,\n", - " \"total_tokens\": 59\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", - "metadata": {}, - "source": [ - "## Tools\n", - "\n", - "Ollama now offers support for native tool calling [for a subset of their available models](https://ollama.com/search?c=tools). The example below demonstrates how you can invoke a tool from an Ollama model." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "d2502c0d", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"model\": \"llama3-groq-tool-use\",\n", - " \"created_at\": \"2024-08-01T18:43:13.2181Z\",\n", - " \"done_reason\": \"stop\",\n", - " \"done\": true,\n", - " \"total_duration\": 2311023875,\n", - " \"load_duration\": 1560670292,\n", - " \"prompt_eval_count\": 177,\n", - " \"prompt_eval_duration\": 263603000,\n", - " \"eval_count\": 30,\n", - " \"eval_duration\": 485582000\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"get_current_weather\",\n", - " \"args\": {\n", - " \"location\": \"San Francisco, CA\"\n", - " },\n", - " \"id\": \"c7a9d590-99ad-42af-9996-41b90efcf827\",\n", - " \"type\": \"tool_call\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 177,\n", - " \"output_tokens\": 30,\n", - " \"total_tokens\": 207\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { tool } from \"@langchain/core/tools\";\n", - "import { ChatOllama } from \"@langchain/ollama\";\n", - "import { z } from \"zod\";\n", - "\n", - "const weatherTool = tool((_) => \"Da weather is weatherin\", {\n", - " name: \"get_current_weather\",\n", - " description: \"Get the current weather in a given location\",\n", - " schema: z.object({\n", - " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", - " }),\n", - "});\n", - "\n", - "// Define the model\n", - "const llmForTool = new ChatOllama({\n", - " model: \"llama3-groq-tool-use\",\n", - "});\n", - "\n", - "// Bind the tool to the model\n", - "const llmWithTools = llmForTool.bindTools([weatherTool]);\n", - "\n", - "const resultFromTool = await llmWithTools.invoke(\n", - " \"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool.\"\n", - ");\n", - "\n", - "console.log(resultFromTool);" - ] - }, - { - "cell_type": "markdown", - "id": "47faa093", - "metadata": {}, - "source": [ - "### `.withStructuredOutput`\n", - "\n", - "For [models that support tool calling](https://ollama.com/search?c=tools), you can also call `.withStructuredOutput()` to get a structured output from the tool." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "759924f6", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOllama } from \"@langchain/ollama\"\n", + "\n", + "const llm = new ChatOllama({\n", + " model: \"llama3\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{ location: 'San Francisco, CA' }\n" - ] - } - ], - "source": [ - "import { ChatOllama } from \"@langchain/ollama\";\n", - "import { z } from \"zod\";\n", - "\n", - "// Define the model\n", - "const llmForWSO = new ChatOllama({\n", - " model: \"llama3-groq-tool-use\",\n", - "});\n", - "\n", - "// Define the tool schema you'd like the model to use.\n", - "const schemaForWSO = z.object({\n", - " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", - "});\n", - "\n", - "// Pass the schema to the withStructuredOutput method to bind it to the model.\n", - "const llmWithStructuredOutput = llmForWSO.withStructuredOutput(schemaForWSO, {\n", - " name: \"get_current_weather\",\n", - "});\n", - "\n", - "const resultFromWSO = await llmWithStructuredOutput.invoke(\n", - " \"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool.\"\n", - ");\n", - "console.log(resultFromWSO);" - ] - }, - { - "cell_type": "markdown", - "id": "cb1377af", - "metadata": {}, - "source": [ - "### JSON mode\n", - "\n", - "Ollama also supports a JSON mode for all chat models that coerces model outputs to only return JSON. Here's an example of how this can be useful for extraction:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "de94282b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \"{\\n\\\"original\\\": \\\"I love programming\\\",\\n\\\"translated\\\": \\\"Ich liebe Programmierung\\\"\\n}\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"model\": \"llama3\",\n", - " \"created_at\": \"2024-08-01T17:24:54.35568Z\",\n", - " \"done_reason\": \"stop\",\n", - " \"done\": true,\n", - " \"total_duration\": 1754811583,\n", - " \"load_duration\": 1297200208,\n", - " \"prompt_eval_count\": 47,\n", - " \"prompt_eval_duration\": 128532000,\n", - " \"eval_count\": 20,\n", - " \"eval_duration\": 318519000\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 47,\n", - " \"output_tokens\": 20,\n", - " \"total_tokens\": 67\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatOllama } from \"@langchain/ollama\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const promptForJsonMode = ChatPromptTemplate.fromMessages([\n", - " [\n", - " \"system\",\n", - " `You are an expert translator. Format all responses as JSON objects with two keys: \"original\" and \"translated\".`,\n", - " ],\n", - " [\"human\", `Translate \"{input}\" into {language}.`],\n", - "]);\n", - "\n", - "const llmJsonMode = new ChatOllama({\n", - " baseUrl: \"http://localhost:11434\", // Default value\n", - " model: \"llama3\",\n", - " format: \"json\",\n", - "});\n", - "\n", - "const chainForJsonMode = promptForJsonMode.pipe(llmJsonMode);\n", - "\n", - "const resultFromJsonMode = await chainForJsonMode.invoke({\n", - " input: \"I love programming\",\n", - " language: \"German\",\n", - "});\n", - "\n", - "console.log(resultFromJsonMode);" - ] - }, - { - "cell_type": "markdown", - "id": "9881d422", - "metadata": {}, - "source": [ - "## Multimodal models\n", - "\n", - "Ollama supports open source multimodal models like [LLaVA](https://ollama.ai/library/llava) in versions 0.1.15 and up.\n", - "You can pass images as part of a message's `content` field to [multimodal-capable](/docs/how_to/multimodal_inputs/) models like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "958171d7", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Je adore le programmation.\\n\\n(Note: \\\"programmation\\\" is the feminine form of the noun in French, but if you want to use the masculine form, it would be \\\"le programme\\\" instead.)\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llama3\",\n", + " \"created_at\": \"2024-08-01T16:59:17.359302Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 6399311167,\n", + " \"load_duration\": 5575776417,\n", + " \"prompt_eval_count\": 35,\n", + " \"prompt_eval_duration\": 110053000,\n", + " \"eval_count\": 43,\n", + " \"eval_duration\": 711744000\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 35,\n", + " \"output_tokens\": 43,\n", + " \"total_tokens\": 78\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Je adore le programmation.\n", + "\n", + "(Note: \"programmation\" is the feminine form of the noun in French, but if you want to use the masculine form, it would be \"le programme\" instead.)\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Ich liebe Programmieren!\\n\\n(Note: \\\"Ich liebe\\\" means \\\"I love\\\", \\\"Programmieren\\\" is the verb for \\\"programming\\\")\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llama3\",\n", + " \"created_at\": \"2024-08-01T16:59:18.088423Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 585146125,\n", + " \"load_duration\": 27557166,\n", + " \"prompt_eval_count\": 30,\n", + " \"prompt_eval_duration\": 74241000,\n", + " \"eval_count\": 29,\n", + " \"eval_duration\": 481195000\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 30,\n", + " \"output_tokens\": 29,\n", + " \"total_tokens\": 59\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "Ollama now offers support for native tool calling [for a subset of their available models](https://ollama.com/search?c=tools). The example below demonstrates how you can invoke a tool from an Ollama model." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"content\": \" The image shows a hot dog in a bun, which appears to be a footlong. It has been cooked or grilled to the point where it's browned and possibly has some blackened edges, indicating it might be slightly overcooked. Accompanying the hot dog is a bun that looks toasted as well. There are visible char marks on both the hot dog and the bun, suggesting they have been cooked directly over a source of heat, such as a grill or broiler. The background is white, which puts the focus entirely on the hot dog and its bun. \",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"model\": \"llava\",\n", - " \"created_at\": \"2024-08-01T17:25:02.169957Z\",\n", - " \"done_reason\": \"stop\",\n", - " \"done\": true,\n", - " \"total_duration\": 5700249458,\n", - " \"load_duration\": 2543040666,\n", - " \"prompt_eval_count\": 1,\n", - " \"prompt_eval_duration\": 1032591000,\n", - " \"eval_count\": 127,\n", - " \"eval_duration\": 2114201000\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 1,\n", - " \"output_tokens\": 127,\n", - " \"total_tokens\": 128\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 1, + "id": "d2502c0d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llama3-groq-tool-use\",\n", + " \"created_at\": \"2024-08-01T18:43:13.2181Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 2311023875,\n", + " \"load_duration\": 1560670292,\n", + " \"prompt_eval_count\": 177,\n", + " \"prompt_eval_duration\": 263603000,\n", + " \"eval_count\": 30,\n", + " \"eval_duration\": 485582000\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"get_current_weather\",\n", + " \"args\": {\n", + " \"location\": \"San Francisco, CA\"\n", + " },\n", + " \"id\": \"c7a9d590-99ad-42af-9996-41b90efcf827\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 177,\n", + " \"output_tokens\": 30,\n", + " \"total_tokens\": 207\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { ChatOllama } from \"@langchain/ollama\";\n", + "import { z } from \"zod\";\n", + "\n", + "const weatherTool = tool((_) => \"Da weather is weatherin\", {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather in a given location\",\n", + " schema: z.object({\n", + " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", + " }),\n", + "});\n", + "\n", + "// Define the model\n", + "const llmForTool = new ChatOllama({\n", + " model: \"llama3-groq-tool-use\",\n", + "});\n", + "\n", + "// Bind the tool to the model\n", + "const llmWithTools = llmForTool.bindTools([weatherTool]);\n", + "\n", + "const resultFromTool = await llmWithTools.invoke(\n", + " \"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool.\"\n", + ");\n", + "\n", + "console.log(resultFromTool);" + ] + }, + { + "cell_type": "markdown", + "id": "47faa093", + "metadata": {}, + "source": [ + "### `.withStructuredOutput`\n", + "\n", + "For [models that support tool calling](https://ollama.com/search?c=tools), you can also call `.withStructuredOutput()` to get a structured output from the tool." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "759924f6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ location: 'San Francisco, CA' }\n" + ] + } + ], + "source": [ + "import { ChatOllama } from \"@langchain/ollama\";\n", + "import { z } from \"zod\";\n", + "\n", + "// Define the model\n", + "const llmForWSO = new ChatOllama({\n", + " model: \"llama3-groq-tool-use\",\n", + "});\n", + "\n", + "// Define the tool schema you'd like the model to use.\n", + "const schemaForWSO = z.object({\n", + " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", + "});\n", + "\n", + "// Pass the schema to the withStructuredOutput method to bind it to the model.\n", + "const llmWithStructuredOutput = llmForWSO.withStructuredOutput(schemaForWSO, {\n", + " name: \"get_current_weather\",\n", + "});\n", + "\n", + "const resultFromWSO = await llmWithStructuredOutput.invoke(\n", + " \"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool.\"\n", + ");\n", + "console.log(resultFromWSO);" + ] + }, + { + "cell_type": "markdown", + "id": "cb1377af", + "metadata": {}, + "source": [ + "### JSON mode\n", + "\n", + "Ollama also supports a JSON mode for all chat models that coerces model outputs to only return JSON. Here's an example of how this can be useful for extraction:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "de94282b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"{\\n\\\"original\\\": \\\"I love programming\\\",\\n\\\"translated\\\": \\\"Ich liebe Programmierung\\\"\\n}\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llama3\",\n", + " \"created_at\": \"2024-08-01T17:24:54.35568Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 1754811583,\n", + " \"load_duration\": 1297200208,\n", + " \"prompt_eval_count\": 47,\n", + " \"prompt_eval_duration\": 128532000,\n", + " \"eval_count\": 20,\n", + " \"eval_duration\": 318519000\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 47,\n", + " \"output_tokens\": 20,\n", + " \"total_tokens\": 67\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOllama } from \"@langchain/ollama\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const promptForJsonMode = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " `You are an expert translator. Format all responses as JSON objects with two keys: \"original\" and \"translated\".`,\n", + " ],\n", + " [\"human\", `Translate \"{input}\" into {language}.`],\n", + "]);\n", + "\n", + "const llmJsonMode = new ChatOllama({\n", + " baseUrl: \"http://localhost:11434\", // Default value\n", + " model: \"llama3\",\n", + " format: \"json\",\n", + "});\n", + "\n", + "const chainForJsonMode = promptForJsonMode.pipe(llmJsonMode);\n", + "\n", + "const resultFromJsonMode = await chainForJsonMode.invoke({\n", + " input: \"I love programming\",\n", + " language: \"German\",\n", + "});\n", + "\n", + "console.log(resultFromJsonMode);" + ] + }, + { + "cell_type": "markdown", + "id": "9881d422", + "metadata": {}, + "source": [ + "## Multimodal models\n", + "\n", + "Ollama supports open source multimodal models like [LLaVA](https://ollama.ai/library/llava) in versions 0.1.15 and up.\n", + "You can pass images as part of a message's `content` field to [multimodal-capable](/docs/how_to/multimodal_inputs/) models like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "958171d7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \" The image shows a hot dog in a bun, which appears to be a footlong. It has been cooked or grilled to the point where it's browned and possibly has some blackened edges, indicating it might be slightly overcooked. Accompanying the hot dog is a bun that looks toasted as well. There are visible char marks on both the hot dog and the bun, suggesting they have been cooked directly over a source of heat, such as a grill or broiler. The background is white, which puts the focus entirely on the hot dog and its bun. \",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llava\",\n", + " \"created_at\": \"2024-08-01T17:25:02.169957Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 5700249458,\n", + " \"load_duration\": 2543040666,\n", + " \"prompt_eval_count\": 1,\n", + " \"prompt_eval_duration\": 1032591000,\n", + " \"eval_count\": 127,\n", + " \"eval_duration\": 2114201000\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 1,\n", + " \"output_tokens\": 127,\n", + " \"total_tokens\": 128\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOllama } from \"@langchain/ollama\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import * as fs from \"node:fs/promises\";\n", + "\n", + "const imageData = await fs.readFile(\"../../../../../examples/hotdog.jpg\");\n", + "const llmForMultiModal = new ChatOllama({\n", + " model: \"llava\",\n", + " baseUrl: \"http://127.0.0.1:11434\",\n", + "});\n", + "const multiModalRes = await llmForMultiModal.invoke([\n", + " new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"What is in this image?\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: `data:image/jpeg;base64,${imageData.toString(\"base64\")}`,\n", + " },\n", + " ],\n", + " }),\n", + "]);\n", + "console.log(multiModalRes);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatOllama features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatOllama } from \"@langchain/ollama\";\n", - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "import * as fs from \"node:fs/promises\";\n", - "\n", - "const imageData = await fs.readFile(\"../../../../../examples/hotdog.jpg\");\n", - "const llmForMultiModal = new ChatOllama({\n", - " model: \"llava\",\n", - " baseUrl: \"http://127.0.0.1:11434\",\n", - "});\n", - "const multiModalRes = await llmForMultiModal.invoke([\n", - " new HumanMessage({\n", - " content: [\n", - " {\n", - " type: \"text\",\n", - " text: \"What is in this image?\",\n", - " },\n", - " {\n", - " type: \"image_url\",\n", - " image_url: `data:image/jpeg;base64,${imageData.toString(\"base64\")}`,\n", - " },\n", - " ],\n", - " }),\n", - "]);\n", - "console.log(multiModalRes);" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatOllama features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/ollama_functions.mdx b/docs/core_docs/docs/integrations/chat/ollama_functions.mdx index 3c009b3539be..fbcedcf4a6bc 100644 --- a/docs/core_docs/docs/integrations/chat/ollama_functions.mdx +++ b/docs/core_docs/docs/integrations/chat/ollama_functions.mdx @@ -71,5 +71,5 @@ import OllamaFunctionsCustomPrompt from "@examples/models/chat/ollama_functions/ ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/openai.ipynb b/docs/core_docs/docs/integrations/chat/openai.ipynb index 083e64b88c27..e70ca09d5c43 100644 --- a/docs/core_docs/docs/integrations/chat/openai.ipynb +++ b/docs/core_docs/docs/integrations/chat/openai.ipynb @@ -1,1215 +1,1215 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: OpenAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatOpenAI\n", - "\n", - "[OpenAI](https://en.wikipedia.org/wiki/OpenAI) is an artificial intelligence (AI) research laboratory.\n", - "\n", - "This guide will help you getting started with ChatOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/openai) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html) | [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", - "\n", - "## Setup\n", - "\n", - "To access OpenAI chat models you'll need to create an OpenAI account, get an API key, and install the `@langchain/openai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [OpenAI's website](https://platform.openai.com/) to sign up for OpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export OPENAI_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `ChatOpenAI` integration lives in the `@langchain/openai` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\" \n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ADItECqSPuuEuBHHPjeCkh9wIO1H5\",\n", - " \"content\": \"J'adore la programmation.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 5,\n", - " \"promptTokens\": 31,\n", - " \"totalTokens\": 36\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_5796ac6771\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 31,\n", - " \"output_tokens\": 5,\n", - " \"total_tokens\": 36\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " {\n", - " role: \"system\",\n", - " content: \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " },\n", - " {\n", - " role: \"user\",\n", - " content: \"I love programming.\"\n", - " },\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: OpenAI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatOpenAI\n", + "\n", + "[OpenAI](https://en.wikipedia.org/wiki/OpenAI) is an artificial intelligence (AI) research laboratory.\n", + "\n", + "This guide will help you getting started with ChatOpenAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/openai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html) | [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access OpenAI chat models you'll need to create an OpenAI account, get an API key, and install the `@langchain/openai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [OpenAI's website](https://platform.openai.com/) to sign up for OpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `ChatOpenAI` integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ADItFaWFNqkSjSmlxeGk6HxcBHzVN\",\n", - " \"content\": \"Ich liebe Programmieren.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 5,\n", - " \"promptTokens\": 26,\n", - " \"totalTokens\": 31\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_5796ac6771\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 26,\n", - " \"output_tokens\": 5,\n", - " \"total_tokens\": 31\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "06ffc86b", - "metadata": {}, - "source": [ - "## Custom URLs\n", - "\n", - "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19a092b9", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llmWithCustomURL = new ChatOpenAI({\n", - " temperature: 0.9,\n", - " configuration: {\n", - " baseURL: \"https://your_custom_url.com\",\n", - " },\n", - "});\n", - "\n", - "await llmWithCustomURL.invoke(\"Hi there!\");" - ] - }, - { - "cell_type": "markdown", - "id": "20b60ccb", - "metadata": {}, - "source": [ - "The `configuration` field also accepts other `ClientOptions` parameters accepted by the official SDK.\n", - "\n", - "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/chat/azure).\n", - "\n", - "## Custom headers\n", - "\n", - "You can specify custom headers in the same `configuration` field:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cd612609", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llmWithCustomHeaders = new ChatOpenAI({\n", - " temperature: 0.9,\n", - " configuration: {\n", - " defaultHeaders: {\n", - " \"Authorization\": `Bearer SOME_CUSTOM_VALUE`,\n", - " },\n", - " },\n", - "});\n", - "\n", - "await llmWithCustomHeaders.invoke(\"Hi there!\");" - ] - }, - { - "cell_type": "markdown", - "id": "7af61d1d", - "metadata": {}, - "source": [ - "## Disabling streaming usage metadata\n", - "\n", - "Some proxies or third-party providers present largely the same API interface as OpenAI, but don't support the more recently added `stream_options` parameter to return streaming usage. You can use `ChatOpenAI` to access these providers by disabling streaming usage like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0ff40bd7", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llmWithoutStreamUsage = new ChatOpenAI({\n", - " temperature: 0.9,\n", - " streamUsage: false,\n", - " configuration: {\n", - " baseURL: \"https://proxy.com\",\n", - " },\n", - "});\n", - "\n", - "await llmWithoutStreamUsage.invoke(\"Hi there!\");" - ] - }, - { - "cell_type": "markdown", - "id": "013b6300", - "metadata": {}, - "source": [ - "## Calling fine-tuned models\n", - "\n", - "You can call fine-tuned OpenAI models by passing in your corresponding `modelName` parameter.\n", - "\n", - "This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7448f6a9", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const fineTunedLlm = new ChatOpenAI({\n", - " temperature: 0.9,\n", - " model: \"ft:gpt-3.5-turbo-0613:{ORG_NAME}::{MODEL_ID}\",\n", - "});\n", - "\n", - "await fineTunedLlm.invoke(\"Hi there!\");" - ] - }, - { - "cell_type": "markdown", - "id": "a2270901", - "metadata": {}, - "source": [ - "## Generation metadata\n", - "\n", - "If you need additional information like logprobs or token usage, these will be returned directly in the `.invoke` response within the `response_metadata` field on the message.\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip\n", - "Requires `@langchain/core` version >=0.1.48.\n", - ":::\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "2b675330", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " content: [\n", - " {\n", - " token: 'Hello',\n", - " logprob: -0.0004740447,\n", - " bytes: [ 72, 101, 108, 108, 111 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: '!',\n", - " logprob: -0.00004334534,\n", - " bytes: [ 33 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: ' How',\n", - " logprob: -0.000030113732,\n", - " bytes: [ 32, 72, 111, 119 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: ' can',\n", - " logprob: -0.0004797665,\n", - " bytes: [ 32, 99, 97, 110 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: ' I',\n", - " logprob: -7.89631e-7,\n", - " bytes: [ 32, 73 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: ' assist',\n", - " logprob: -0.114006,\n", - " bytes: [\n", - " 32, 97, 115,\n", - " 115, 105, 115,\n", - " 116\n", - " ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: ' you',\n", - " logprob: -4.3202e-7,\n", - " bytes: [ 32, 121, 111, 117 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: ' today',\n", - " logprob: -0.00004501419,\n", - " bytes: [ 32, 116, 111, 100, 97, 121 ],\n", - " top_logprobs: []\n", - " },\n", - " {\n", - " token: '?',\n", - " logprob: -0.000010206721,\n", - " bytes: [ 63 ],\n", - " top_logprobs: []\n", - " }\n", - " ],\n", - " refusal: null\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "// See https://cookbook.openai.com/examples/using_logprobs for details\n", - "const llmWithLogprobs = new ChatOpenAI({\n", - " logprobs: true,\n", - " // topLogprobs: 5,\n", - "});\n", - "\n", - "const responseMessageWithLogprobs = await llmWithLogprobs.invoke(\"Hi there!\");\n", - "console.dir(responseMessageWithLogprobs.response_metadata.logprobs, { depth: null });" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## Tool calling\n", - "\n", - "Tool calling with OpenAI models works in a similar to [other models](/docs/how_to/tool_calling). Additionally, the following guides have some information especially relevant to OpenAI:\n", - "\n", - "- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel/)\n", - "- [How to: force a tool call](/docs/how_to/tool_choice/)\n", - "- [How to: bind model-specific tool formats to a model](/docs/how_to/tool_calling#binding-model-specific-formats-advanced)." - ] - }, - { - "cell_type": "markdown", - "id": "3392390e", - "metadata": {}, - "source": [ - "## ``strict: true``\n", - "\n", - "As of Aug 6, 2024, OpenAI supports a `strict` argument when calling tools that will enforce that the tool argument schema is respected by the model. See more here: https://platform.openai.com/docs/guides/function-calling.\n", - "\n", - "```{=mdx}\n", - "\n", - ":::info Requires ``@langchain/openai >= 0.2.6``\n", - "\n", - "**Note**: If ``strict: true`` the tool definition will also be validated, and a subset of JSON schema are accepted. Crucially, schema cannot have optional args (those with default values). Read the full docs on what types of schema are supported here: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas. \n", - ":::\n", - "\n", - "\n", - "```\n", - "\n", - "Here's an example with tool calling. Passing an extra `strict: true` argument to `.bindTools` will pass the param through to all tool definitions:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "90f0d465", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\" \n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + " // other params...\n", + "})" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'get_current_weather',\n", - " args: { location: 'current' },\n", - " type: 'tool_call',\n", - " id: 'call_hVFyYNRwc6CoTgr9AQFQVjm9'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "import { z } from \"zod\";\n", - "\n", - "const weatherTool = tool((_) => \"no-op\", {\n", - " name: \"get_current_weather\",\n", - " description: \"Get the current weather\",\n", - " schema: z.object({\n", - " location: z.string(),\n", - " }),\n", - "})\n", - "\n", - "const llmWithStrictTrue = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - "}).bindTools([weatherTool], {\n", - " strict: true,\n", - " tool_choice: weatherTool.name,\n", - "});\n", - "\n", - "// Although the question is not about the weather, it will call the tool with the correct arguments\n", - "// because we passed `tool_choice` and `strict: true`.\n", - "const strictTrueResult = await llmWithStrictTrue.invoke(\"What is 127862 times 12898 divided by 2?\");\n", - "\n", - "console.dir(strictTrueResult.tool_calls, { depth: null });" - ] - }, - { - "cell_type": "markdown", - "id": "6c46a668", - "metadata": {}, - "source": [ - "If you only want to apply this parameter to a select number of tools, you can also pass OpenAI formatted tool schemas directly:" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "e2da9ead", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'get_current_weather',\n", - " args: { location: 'London' },\n", - " type: 'tool_call',\n", - " id: 'call_EOSejtax8aYtqpchY8n8O82l'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { zodToJsonSchema } from \"zod-to-json-schema\";\n", - "\n", - "const toolSchema = {\n", - " type: \"function\",\n", - " function: {\n", - " name: \"get_current_weather\",\n", - " description: \"Get the current weather\",\n", - " strict: true,\n", - " parameters: zodToJsonSchema(\n", - " z.object({\n", - " location: z.string(),\n", - " })\n", - " ),\n", - " },\n", - "};\n", - "\n", - "const llmWithStrictTrueTools = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - "}).bindTools([toolSchema], {\n", - " strict: true,\n", - "});\n", - "\n", - "const weatherToolResult = await llmWithStrictTrueTools.invoke([{\n", - " role: \"user\",\n", - " content: \"What is the current weather in London?\"\n", - "}])\n", - "\n", - "weatherToolResult.tool_calls;" - ] - }, - { - "cell_type": "markdown", - "id": "045668fe", - "metadata": {}, - "source": [ - "### Structured output\n", - "\n", - "We can also pass `strict: true` to the [`.withStructuredOutput()`](https://js.langchain.com/docs/how_to/structured_output/#the-.withstructuredoutput-method). Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "8e8171a5", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ADItECqSPuuEuBHHPjeCkh9wIO1H5\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 5,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 36\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_5796ac6771\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 5,\n", + " \"total_tokens\": 36\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " {\n", + " role: \"system\",\n", + " content: \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " },\n", + " {\n", + " role: \"user\",\n", + " content: \"I love programming.\"\n", + " },\n", + "])\n", + "aiMsg" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{ traits: [ `6'5\" tall`, 'love fruit' ] }\n" - ] - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const traitSchema = z.object({\n", - " traits: z.array(z.string()).describe(\"A list of traits contained in the input\"),\n", - "});\n", - "\n", - "const structuredLlm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - "}).withStructuredOutput(traitSchema, {\n", - " name: \"extract_traits\",\n", - " strict: true,\n", - "});\n", - "\n", - "await structuredLlm.invoke([{\n", - " role: \"user\",\n", - " content: `I am 6'5\" tall and love fruit.`\n", - "}]);" - ] - }, - { - "cell_type": "markdown", - "id": "af20e756", - "metadata": {}, - "source": [ - "## Prompt caching\n", - "\n", - "Newer OpenAI models will automatically [cache parts of your prompt](https://openai.com/index/api-prompt-caching/) if your inputs are above a certain size (1024 tokens at the time of writing) in order to reduce costs for use-cases that require long context.\n", - "\n", - "**Note:** The number of tokens cached for a given query is not yet standardized in `AIMessage.usage_metadata`, and is instead contained in the `AIMessage.response_metadata` field.\n", - "\n", - "Here's an example" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb4e4fd0", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "const CACHED_TEXT = `## Components\n", - "\n", - "LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs.\n", - "Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.\n", - "\n", - "### Chat models\n", - "\n", - "\n", - "\n", - "Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).\n", - "These are generally newer models (older models are generally \\`LLMs\\`, see below).\n", - "Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.\n", - "\n", - "Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input.\n", - "This gives them the same interface as LLMs (and simpler to use).\n", - "When a string is passed in as input, it will be converted to a \\`HumanMessage\\` under the hood before being passed to the underlying model.\n", - "\n", - "LangChain does not host any Chat Models, rather we rely on third party integrations.\n", - "\n", - "We have some standardized parameters when constructing ChatModels:\n", - "\n", - "- \\`model\\`: the name of the model\n", - "\n", - "Chat Models also accept other parameters that are specific to that integration.\n", - "\n", - ":::important\n", - "Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it.\n", - "Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.\n", - "Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.\n", - ":::\n", - "\n", - "For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).\n", - "\n", - "#### Multimodality\n", - "\n", - "Some chat models are multimodal, accepting images, audio and even video as inputs.\n", - "These are still less common, meaning model providers haven't standardized on the \"best\" way to define the API.\n", - "Multimodal outputs are even less common. As such, we've kept our multimodal abstractions fairly light weight\n", - "and plan to further solidify the multimodal APIs and interaction patterns as the field matures.\n", - "\n", - "In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format.\n", - "So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.\n", - "\n", - "For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).\n", - "\n", - "### LLMs\n", - "\n", - "\n", - "\n", - ":::caution\n", - "Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),\n", - "even for non-chat use cases.\n", - "\n", - "You are probably looking for [the section above instead](/docs/concepts/#chat-models).\n", - ":::\n", - "\n", - "Language models that takes a string as input and returns a string.\n", - "These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above).\n", - "\n", - "Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.\n", - "This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).\n", - "When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model.\n", - "\n", - "LangChain does not host any LLMs, rather we rely on third party integrations.\n", - "\n", - "For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).\n", - "\n", - "### Message types\n", - "\n", - "Some language models take an array of messages as input and return a message.\n", - "There are a few different types of messages.\n", - "All messages have a \\`role\\`, \\`content\\`, and \\`response_metadata\\` property.\n", - "\n", - "The \\`role\\` describes WHO is saying the message.\n", - "LangChain has different message classes for different roles.\n", - "\n", - "The \\`content\\` property describes the content of the message.\n", - "This can be a few different things:\n", - "\n", - "- A string (most models deal this type of content)\n", - "- A List of objects (this is used for multi-modal input, where the object contains information about that input type and that input location)\n", - "\n", - "#### HumanMessage\n", - "\n", - "This represents a message from the user.\n", - "\n", - "#### AIMessage\n", - "\n", - "This represents a message from the model. In addition to the \\`content\\` property, these messages also have:\n", - "\n", - "**\\`response_metadata\\`**\n", - "\n", - "The \\`response_metadata\\` property contains additional metadata about the response. The data here is often specific to each model provider.\n", - "This is where information like log-probs and token usage may be stored.\n", - "\n", - "**\\`tool_calls\\`**\n", - "\n", - "These represent a decision from an language model to call a tool. They are included as part of an \\`AIMessage\\` output.\n", - "They can be accessed from there with the \\`.tool_calls\\` property.\n", - "\n", - "This property returns a list of \\`ToolCall\\`s. A \\`ToolCall\\` is an object with the following arguments:\n", - "\n", - "- \\`name\\`: The name of the tool that should be called.\n", - "- \\`args\\`: The arguments to that tool.\n", - "- \\`id\\`: The id of that tool call.\n", - "\n", - "#### SystemMessage\n", - "\n", - "This represents a system message, which tells the model how to behave. Not every model provider supports this.\n", - "\n", - "#### ToolMessage\n", - "\n", - "This represents the result of a tool call. In addition to \\`role\\` and \\`content\\`, this message has:\n", - "\n", - "- a \\`tool_call_id\\` field which conveys the id of the call to the tool that was called to produce this result.\n", - "- an \\`artifact\\` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.\n", - "\n", - "#### (Legacy) FunctionMessage\n", - "\n", - "This is a legacy message type, corresponding to OpenAI's legacy function-calling API. \\`ToolMessage\\` should be used instead to correspond to the updated tool-calling API.\n", - "\n", - "This represents the result of a function call. In addition to \\`role\\` and \\`content\\`, this message has a \\`name\\` parameter which conveys the name of the function that was called to produce this result.\n", - "\n", - "### Prompt templates\n", - "\n", - "\n", - "\n", - "Prompt templates help to translate user input and parameters into instructions for a language model.\n", - "This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.\n", - "\n", - "Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in.\n", - "\n", - "Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or an array of messages.\n", - "The reason this PromptValue exists is to make it easy to switch between strings and messages.\n", - "\n", - "There are a few different types of prompt templates:\n", - "\n", - "#### String PromptTemplates\n", - "\n", - "These prompt templates are used to format a single string, and generally are used for simpler inputs.\n", - "For example, a common way to construct and use a PromptTemplate is as follows:\n", - "\n", - "\\`\\`\\`typescript\n", - "import { PromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const promptTemplate = PromptTemplate.fromTemplate(\n", - " \"Tell me a joke about {topic}\"\n", - ");\n", - "\n", - "await promptTemplate.invoke({ topic: \"cats\" });\n", - "\\`\\`\\`\n", - "\n", - "#### ChatPromptTemplates\n", - "\n", - "These prompt templates are used to format an array of messages. These \"templates\" consist of an array of templates themselves.\n", - "For example, a common way to construct and use a ChatPromptTemplate is as follows:\n", - "\n", - "\\`\\`\\`typescript\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const promptTemplate = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant\"],\n", - " [\"user\", \"Tell me a joke about {topic}\"],\n", - "]);\n", - "\n", - "await promptTemplate.invoke({ topic: \"cats\" });\n", - "\\`\\`\\`\n", - "\n", - "In the above example, this ChatPromptTemplate will construct two messages when called.\n", - "The first is a system message, that has no variables to format.\n", - "The second is a HumanMessage, and will be formatted by the \\`topic\\` variable the user passes in.\n", - "\n", - "#### MessagesPlaceholder\n", - "\n", - "\n", - "\n", - "This prompt template is responsible for adding an array of messages in a particular place.\n", - "In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.\n", - "But what if we wanted the user to pass in an array of messages that we would slot into a particular spot?\n", - "This is how you use MessagesPlaceholder.\n", - "\n", - "\\`\\`\\`typescript\n", - "import {\n", - " ChatPromptTemplate,\n", - " MessagesPlaceholder,\n", - "} from \"@langchain/core/prompts\";\n", - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const promptTemplate = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant\"],\n", - " new MessagesPlaceholder(\"msgs\"),\n", - "]);\n", - "\n", - "promptTemplate.invoke({ msgs: [new HumanMessage({ content: \"hi!\" })] });\n", - "\\`\\`\\`\n", - "\n", - "This will produce an array of two messages, the first one being a system message, and the second one being the HumanMessage we passed in.\n", - "If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in).\n", - "This is useful for letting an array of messages be slotted into a particular spot.\n", - "\n", - "An alternative way to accomplish the same thing without using the \\`MessagesPlaceholder\\` class explicitly is:\n", - "\n", - "\\`\\`\\`typescript\n", - "const promptTemplate = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant\"],\n", - " [\"placeholder\", \"{msgs}\"], // <-- This is the changed part\n", - "]);\n", - "\\`\\`\\`\n", - "\n", - "For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).\n", - "\n", - "### Example Selectors\n", - "\n", - "One common prompting technique for achieving better performance is to include examples as part of the prompt.\n", - "This gives the language model concrete examples of how it should behave.\n", - "Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.\n", - "Example Selectors are classes responsible for selecting and then formatting examples into prompts.\n", - "\n", - "For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).\n", - "\n", - "### Output parsers\n", - "\n", - "\n", - "\n", - ":::note\n", - "\n", - "The information here refers to parsers that take a text output from a model try to parse it into a more structured representation.\n", - "More and more models are supporting function (or tool) calling, which handles this automatically.\n", - "It is recommended to use function/tool calling rather than output parsing.\n", - "See documentation for that [here](/docs/concepts/#function-tool-calling).\n", - "\n", - ":::\n", - "\n", - "Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.\n", - "Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs.\n", - "\n", - "There are two main methods an output parser must implement:\n", - "\n", - "- \"Get format instructions\": A method which returns a string containing instructions for how the output of a language model should be formatted.\n", - "- \"Parse\": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.\n", - "\n", - "And then one optional one:\n", - "\n", - "- \"Parse with prompt\": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.\n", - "\n", - "Output parsers accept a string or \\`BaseMessage\\` as input and can return an arbitrary type.\n", - "\n", - "LangChain has many different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information:\n", - "\n", - "**Name**: The name of the output parser\n", - "\n", - "**Supports Streaming**: Whether the output parser supports streaming.\n", - "\n", - "**Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific arguments.\n", - "\n", - "**Output Type**: The output type of the object returned by the parser.\n", - "\n", - "**Description**: Our commentary on this output parser and when to use it.\n", - "\n", - "The current date is ${new Date().toISOString()}`;\n", - "\n", - "// Noop statement to hide output\n", - "void 0;" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "7a43595c", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "USAGE: {\n", - " prompt_tokens: 2624,\n", - " completion_tokens: 263,\n", - " total_tokens: 2887,\n", - " prompt_tokens_details: { cached_tokens: 0 },\n", - " completion_tokens_details: { reasoning_tokens: 0 }\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const modelWithCaching = new ChatOpenAI({\n", - " model: \"gpt-4o-mini-2024-07-18\",\n", - "});\n", - "\n", - "// CACHED_TEXT is some string longer than 1024 tokens\n", - "const LONG_TEXT = `You are a pirate. Always respond in pirate dialect.\n", - "\n", - "Use the following as context when answering questions:\n", - "\n", - "${CACHED_TEXT}`;\n", - "\n", - "const longMessages = [\n", - " {\n", - " role: \"system\",\n", - " content: LONG_TEXT,\n", - " },\n", - " {\n", - " role: \"user\",\n", - " content: \"What types of messages are supported in LangChain?\",\n", - " },\n", - "];\n", - "\n", - "const originalRes = await modelWithCaching.invoke(longMessages);\n", - "\n", - "console.log(\"USAGE:\", originalRes.response_metadata.usage);" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "76c8005e", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "USAGE: {\n", - " prompt_tokens: 2624,\n", - " completion_tokens: 272,\n", - " total_tokens: 2896,\n", - " prompt_tokens_details: { cached_tokens: 2432 },\n", - " completion_tokens_details: { reasoning_tokens: 0 }\n", - "}\n" - ] - } - ], - "source": [ - "const resWitCaching = await modelWithCaching.invoke(longMessages);\n", - "\n", - "console.log(\"USAGE:\", resWitCaching.response_metadata.usage);" - ] - }, - { - "cell_type": "markdown", - "id": "cc8b3c94", - "metadata": {}, - "source": [ - "## Audio output\n", - "\n", - "Some OpenAI models (such as `gpt-4o-audio-preview`) support generating audio output. This example shows how to use that feature:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "b4d579b7", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ADItFaWFNqkSjSmlxeGk6HxcBHzVN\",\n", + " \"content\": \"Ich liebe Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 5,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 31\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_5796ac6771\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 5,\n", + " \"total_tokens\": 31\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " id: 'audio_67129e9466f48190be70372922464162',\n", - " data: 'UklGRgZ4BABXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGHA',\n", - " expires_at: 1729277092,\n", - " transcript: \"Why did the cat sit on the computer's keyboard? Because it wanted to keep an eye on the mouse!\"\n", - "}\n" - ] - } - ], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const modelWithAudioOutput = new ChatOpenAI({\n", - " model: \"gpt-4o-audio-preview\",\n", - " // You may also pass these fields to `.bind` as a call argument.\n", - " modalities: [\"text\", \"audio\"], // Specifies that the model should output audio.\n", - " audio: {\n", - " voice: \"alloy\",\n", - " format: \"wav\",\n", - " },\n", - "});\n", - "\n", - "const audioOutputResult = await modelWithAudioOutput.invoke(\"Tell me a joke about cats.\");\n", - "const castAudioContent = audioOutputResult.additional_kwargs.audio as Record;\n", - "\n", - "console.log({\n", - " ...castAudioContent,\n", - " data: castAudioContent.data.slice(0, 100) // Sliced for brevity\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "bfea3608", - "metadata": {}, - "source": [ - "We see that the audio data is returned inside the `data` field. We are also provided an `expires_at` date field. This field represents the date the audio response will no longer be accessible on the server for use in multi-turn conversations.\n", - "\n", - "### Streaming Audio Output\n", - "\n", - "OpenAI also supports streaming audio output. Here's an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "0fa68183", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "06ffc86b", + "metadata": {}, + "source": [ + "## Custom URLs\n", + "\n", + "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " id: 'audio_67129e976ce081908103ba4947399a3eaudio_67129e976ce081908103ba4947399a3e',\n", - " transcript: 'Why was the cat sitting on the computer? Because it wanted to keep an eye on the mouse!',\n", - " index: 0,\n", - " data: 'CgAGAAIADAAAAA0AAwAJAAcACQAJAAQABQABAAgABQAPAAAACAADAAUAAwD8/wUA+f8MAPv/CAD7/wUA///8/wUA/f8DAPj/AgD6',\n", - " expires_at: 1729277096\n", - "}\n" - ] - } - ], - "source": [ - "import { AIMessageChunk } from \"@langchain/core/messages\";\n", - "import { concat } from \"@langchain/core/utils/stream\"\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const modelWithStreamingAudioOutput = new ChatOpenAI({\n", - " model: \"gpt-4o-audio-preview\",\n", - " modalities: [\"text\", \"audio\"],\n", - " audio: {\n", - " voice: \"alloy\",\n", - " format: \"pcm16\", // Format must be `pcm16` for streaming\n", - " },\n", - "});\n", - "\n", - "const audioOutputStream = await modelWithStreamingAudioOutput.stream(\"Tell me a joke about cats.\");\n", - "let finalAudioOutputMsg: AIMessageChunk | undefined;\n", - "for await (const chunk of audioOutputStream) {\n", - " finalAudioOutputMsg = finalAudioOutputMsg ? concat(finalAudioOutputMsg, chunk) : chunk;\n", - "}\n", - "const castStreamedAudioContent = finalAudioOutputMsg?.additional_kwargs.audio as Record;\n", - "\n", - "console.log({\n", - " ...castStreamedAudioContent,\n", - " data: castStreamedAudioContent.data.slice(0, 100) // Sliced for brevity\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "e8b84aac", - "metadata": {}, - "source": [ - "### Audio input\n", - "\n", - "These models also support passing audio as input. For this, you must specify `input_audio` fields as seen below:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "1a69dad8", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": null, + "id": "19a092b9", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llmWithCustomURL = new ChatOpenAI({\n", + " temperature: 0.9,\n", + " configuration: {\n", + " baseURL: \"https://your_custom_url.com\",\n", + " },\n", + "});\n", + "\n", + "await llmWithCustomURL.invoke(\"Hi there!\");" + ] + }, + { + "cell_type": "markdown", + "id": "20b60ccb", + "metadata": {}, + "source": [ + "The `configuration` field also accepts other `ClientOptions` parameters accepted by the official SDK.\n", + "\n", + "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/chat/azure).\n", + "\n", + "## Custom headers\n", + "\n", + "You can specify custom headers in the same `configuration` field:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd612609", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llmWithCustomHeaders = new ChatOpenAI({\n", + " temperature: 0.9,\n", + " configuration: {\n", + " defaultHeaders: {\n", + " \"Authorization\": `Bearer SOME_CUSTOM_VALUE`,\n", + " },\n", + " },\n", + "});\n", + "\n", + "await llmWithCustomHeaders.invoke(\"Hi there!\");" + ] + }, + { + "cell_type": "markdown", + "id": "7af61d1d", + "metadata": {}, + "source": [ + "## Disabling streaming usage metadata\n", + "\n", + "Some proxies or third-party providers present largely the same API interface as OpenAI, but don't support the more recently added `stream_options` parameter to return streaming usage. You can use `ChatOpenAI` to access these providers by disabling streaming usage like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0ff40bd7", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llmWithoutStreamUsage = new ChatOpenAI({\n", + " temperature: 0.9,\n", + " streamUsage: false,\n", + " configuration: {\n", + " baseURL: \"https://proxy.com\",\n", + " },\n", + "});\n", + "\n", + "await llmWithoutStreamUsage.invoke(\"Hi there!\");" + ] + }, + { + "cell_type": "markdown", + "id": "013b6300", + "metadata": {}, + "source": [ + "## Calling fine-tuned models\n", + "\n", + "You can call fine-tuned OpenAI models by passing in your corresponding `modelName` parameter.\n", + "\n", + "This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7448f6a9", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const fineTunedLlm = new ChatOpenAI({\n", + " temperature: 0.9,\n", + " model: \"ft:gpt-3.5-turbo-0613:{ORG_NAME}::{MODEL_ID}\",\n", + "});\n", + "\n", + "await fineTunedLlm.invoke(\"Hi there!\");" + ] + }, + { + "cell_type": "markdown", + "id": "a2270901", + "metadata": {}, + "source": [ + "## Generation metadata\n", + "\n", + "If you need additional information like logprobs or token usage, these will be returned directly in the `.invoke` response within the `response_metadata` field on the message.\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip\n", + "Requires `@langchain/core` version >=0.1.48.\n", + ":::\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "2b675330", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " content: [\n", + " {\n", + " token: 'Hello',\n", + " logprob: -0.0004740447,\n", + " bytes: [ 72, 101, 108, 108, 111 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: '!',\n", + " logprob: -0.00004334534,\n", + " bytes: [ 33 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' How',\n", + " logprob: -0.000030113732,\n", + " bytes: [ 32, 72, 111, 119 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' can',\n", + " logprob: -0.0004797665,\n", + " bytes: [ 32, 99, 97, 110 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' I',\n", + " logprob: -7.89631e-7,\n", + " bytes: [ 32, 73 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' assist',\n", + " logprob: -0.114006,\n", + " bytes: [\n", + " 32, 97, 115,\n", + " 115, 105, 115,\n", + " 116\n", + " ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' you',\n", + " logprob: -4.3202e-7,\n", + " bytes: [ 32, 121, 111, 117 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' today',\n", + " logprob: -0.00004501419,\n", + " bytes: [ 32, 116, 111, 100, 97, 121 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: '?',\n", + " logprob: -0.000010206721,\n", + " bytes: [ 63 ],\n", + " top_logprobs: []\n", + " }\n", + " ],\n", + " refusal: null\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "// See https://cookbook.openai.com/examples/using_logprobs for details\n", + "const llmWithLogprobs = new ChatOpenAI({\n", + " logprobs: true,\n", + " // topLogprobs: 5,\n", + "});\n", + "\n", + "const responseMessageWithLogprobs = await llmWithLogprobs.invoke(\"Hi there!\");\n", + "console.dir(responseMessageWithLogprobs.response_metadata.logprobs, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "Tool calling with OpenAI models works in a similar to [other models](/docs/how_to/tool_calling). Additionally, the following guides have some information especially relevant to OpenAI:\n", + "\n", + "- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel/)\n", + "- [How to: force a tool call](/docs/how_to/tool_choice/)\n", + "- [How to: bind model-specific tool formats to a model](/docs/how_to/tool_calling#binding-model-specific-formats-advanced)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "That's a great joke! It's always fun to imagine why cats do the funny things they do. Keeping an eye on the \"mouse\" is a creatively punny way to describe it!\n" - ] + "cell_type": "markdown", + "id": "3392390e", + "metadata": {}, + "source": [ + "## ``strict: true``\n", + "\n", + "As of Aug 6, 2024, OpenAI supports a `strict` argument when calling tools that will enforce that the tool argument schema is respected by the model. See more here: https://platform.openai.com/docs/guides/function-calling.\n", + "\n", + "```{=mdx}\n", + "\n", + ":::info Requires ``@langchain/openai >= 0.2.6``\n", + "\n", + "**Note**: If ``strict: true`` the tool definition will also be validated, and a subset of JSON schema are accepted. Crucially, schema cannot have optional args (those with default values). Read the full docs on what types of schema are supported here: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas. \n", + ":::\n", + "\n", + "\n", + "```\n", + "\n", + "Here's an example with tool calling. Passing an extra `strict: true` argument to `.bindTools` will pass the param through to all tool definitions:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "90f0d465", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'get_current_weather',\n", + " args: { location: 'current' },\n", + " type: 'tool_call',\n", + " id: 'call_hVFyYNRwc6CoTgr9AQFQVjm9'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { z } from \"zod\";\n", + "\n", + "const weatherTool = tool((_) => \"no-op\", {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather\",\n", + " schema: z.object({\n", + " location: z.string(),\n", + " }),\n", + "})\n", + "\n", + "const llmWithStrictTrue = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + "}).bindTools([weatherTool], {\n", + " strict: true,\n", + " tool_choice: weatherTool.name,\n", + "});\n", + "\n", + "// Although the question is not about the weather, it will call the tool with the correct arguments\n", + "// because we passed `tool_choice` and `strict: true`.\n", + "const strictTrueResult = await llmWithStrictTrue.invoke(\"What is 127862 times 12898 divided by 2?\");\n", + "\n", + "console.dir(strictTrueResult.tool_calls, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "6c46a668", + "metadata": {}, + "source": [ + "If you only want to apply this parameter to a select number of tools, you can also pass OpenAI formatted tool schemas directly:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "e2da9ead", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'get_current_weather',\n", + " args: { location: 'London' },\n", + " type: 'tool_call',\n", + " id: 'call_EOSejtax8aYtqpchY8n8O82l'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { zodToJsonSchema } from \"zod-to-json-schema\";\n", + "\n", + "const toolSchema = {\n", + " type: \"function\",\n", + " function: {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather\",\n", + " strict: true,\n", + " parameters: zodToJsonSchema(\n", + " z.object({\n", + " location: z.string(),\n", + " })\n", + " ),\n", + " },\n", + "};\n", + "\n", + "const llmWithStrictTrueTools = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + "}).bindTools([toolSchema], {\n", + " strict: true,\n", + "});\n", + "\n", + "const weatherToolResult = await llmWithStrictTrueTools.invoke([{\n", + " role: \"user\",\n", + " content: \"What is the current weather in London?\"\n", + "}])\n", + "\n", + "weatherToolResult.tool_calls;" + ] + }, + { + "cell_type": "markdown", + "id": "045668fe", + "metadata": {}, + "source": [ + "### Structured output\n", + "\n", + "We can also pass `strict: true` to the [`.withStructuredOutput()`](https://js.langchain.com/docs/how_to/structured_output/#the-.withstructuredoutput-method). Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "8e8171a5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ traits: [ `6'5\" tall`, 'love fruit' ] }\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const traitSchema = z.object({\n", + " traits: z.array(z.string()).describe(\"A list of traits contained in the input\"),\n", + "});\n", + "\n", + "const structuredLlm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + "}).withStructuredOutput(traitSchema, {\n", + " name: \"extract_traits\",\n", + " strict: true,\n", + "});\n", + "\n", + "await structuredLlm.invoke([{\n", + " role: \"user\",\n", + " content: `I am 6'5\" tall and love fruit.`\n", + "}]);" + ] + }, + { + "cell_type": "markdown", + "id": "af20e756", + "metadata": {}, + "source": [ + "## Prompt caching\n", + "\n", + "Newer OpenAI models will automatically [cache parts of your prompt](https://openai.com/index/api-prompt-caching/) if your inputs are above a certain size (1024 tokens at the time of writing) in order to reduce costs for use-cases that require long context.\n", + "\n", + "**Note:** The number of tokens cached for a given query is not yet standardized in `AIMessage.usage_metadata`, and is instead contained in the `AIMessage.response_metadata` field.\n", + "\n", + "Here's an example" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb4e4fd0", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "const CACHED_TEXT = `## Components\n", + "\n", + "LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs.\n", + "Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.\n", + "\n", + "### Chat models\n", + "\n", + "\n", + "\n", + "Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).\n", + "These are generally newer models (older models are generally \\`LLMs\\`, see below).\n", + "Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.\n", + "\n", + "Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input.\n", + "This gives them the same interface as LLMs (and simpler to use).\n", + "When a string is passed in as input, it will be converted to a \\`HumanMessage\\` under the hood before being passed to the underlying model.\n", + "\n", + "LangChain does not host any Chat Models, rather we rely on third party integrations.\n", + "\n", + "We have some standardized parameters when constructing ChatModels:\n", + "\n", + "- \\`model\\`: the name of the model\n", + "\n", + "Chat Models also accept other parameters that are specific to that integration.\n", + "\n", + ":::important\n", + "Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it.\n", + "Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.\n", + "Please see the [tool calling section](/docs/concepts/tool_calling) for more information.\n", + ":::\n", + "\n", + "For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).\n", + "\n", + "#### Multimodality\n", + "\n", + "Some chat models are multimodal, accepting images, audio and even video as inputs.\n", + "These are still less common, meaning model providers haven't standardized on the \"best\" way to define the API.\n", + "Multimodal outputs are even less common. As such, we've kept our multimodal abstractions fairly light weight\n", + "and plan to further solidify the multimodal APIs and interaction patterns as the field matures.\n", + "\n", + "In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format.\n", + "So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.\n", + "\n", + "For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).\n", + "\n", + "### LLMs\n", + "\n", + "\n", + "\n", + ":::caution\n", + "Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/chat_models),\n", + "even for non-chat use cases.\n", + "\n", + "You are probably looking for [the section above instead](/docs/concepts/chat_models).\n", + ":::\n", + "\n", + "Language models that takes a string as input and returns a string.\n", + "These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/chat_models), see above).\n", + "\n", + "Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.\n", + "This gives them the same interface as [Chat Models](/docs/concepts/chat_models).\n", + "When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model.\n", + "\n", + "LangChain does not host any LLMs, rather we rely on third party integrations.\n", + "\n", + "For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).\n", + "\n", + "### Message types\n", + "\n", + "Some language models take an array of messages as input and return a message.\n", + "There are a few different types of messages.\n", + "All messages have a \\`role\\`, \\`content\\`, and \\`response_metadata\\` property.\n", + "\n", + "The \\`role\\` describes WHO is saying the message.\n", + "LangChain has different message classes for different roles.\n", + "\n", + "The \\`content\\` property describes the content of the message.\n", + "This can be a few different things:\n", + "\n", + "- A string (most models deal this type of content)\n", + "- A List of objects (this is used for multi-modal input, where the object contains information about that input type and that input location)\n", + "\n", + "#### HumanMessage\n", + "\n", + "This represents a message from the user.\n", + "\n", + "#### AIMessage\n", + "\n", + "This represents a message from the model. In addition to the \\`content\\` property, these messages also have:\n", + "\n", + "**\\`response_metadata\\`**\n", + "\n", + "The \\`response_metadata\\` property contains additional metadata about the response. The data here is often specific to each model provider.\n", + "This is where information like log-probs and token usage may be stored.\n", + "\n", + "**\\`tool_calls\\`**\n", + "\n", + "These represent a decision from an language model to call a tool. They are included as part of an \\`AIMessage\\` output.\n", + "They can be accessed from there with the \\`.tool_calls\\` property.\n", + "\n", + "This property returns a list of \\`ToolCall\\`s. A \\`ToolCall\\` is an object with the following arguments:\n", + "\n", + "- \\`name\\`: The name of the tool that should be called.\n", + "- \\`args\\`: The arguments to that tool.\n", + "- \\`id\\`: The id of that tool call.\n", + "\n", + "#### SystemMessage\n", + "\n", + "This represents a system message, which tells the model how to behave. Not every model provider supports this.\n", + "\n", + "#### ToolMessage\n", + "\n", + "This represents the result of a tool call. In addition to \\`role\\` and \\`content\\`, this message has:\n", + "\n", + "- a \\`tool_call_id\\` field which conveys the id of the call to the tool that was called to produce this result.\n", + "- an \\`artifact\\` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.\n", + "\n", + "#### (Legacy) FunctionMessage\n", + "\n", + "This is a legacy message type, corresponding to OpenAI's legacy function-calling API. \\`ToolMessage\\` should be used instead to correspond to the updated tool-calling API.\n", + "\n", + "This represents the result of a function call. In addition to \\`role\\` and \\`content\\`, this message has a \\`name\\` parameter which conveys the name of the function that was called to produce this result.\n", + "\n", + "### Prompt templates\n", + "\n", + "\n", + "\n", + "Prompt templates help to translate user input and parameters into instructions for a language model.\n", + "This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.\n", + "\n", + "Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in.\n", + "\n", + "Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or an array of messages.\n", + "The reason this PromptValue exists is to make it easy to switch between strings and messages.\n", + "\n", + "There are a few different types of prompt templates:\n", + "\n", + "#### String PromptTemplates\n", + "\n", + "These prompt templates are used to format a single string, and generally are used for simpler inputs.\n", + "For example, a common way to construct and use a PromptTemplate is as follows:\n", + "\n", + "\\`\\`\\`typescript\n", + "import { PromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const promptTemplate = PromptTemplate.fromTemplate(\n", + " \"Tell me a joke about {topic}\"\n", + ");\n", + "\n", + "await promptTemplate.invoke({ topic: \"cats\" });\n", + "\\`\\`\\`\n", + "\n", + "#### ChatPromptTemplates\n", + "\n", + "These prompt templates are used to format an array of messages. These \"templates\" consist of an array of templates themselves.\n", + "For example, a common way to construct and use a ChatPromptTemplate is as follows:\n", + "\n", + "\\`\\`\\`typescript\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const promptTemplate = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"user\", \"Tell me a joke about {topic}\"],\n", + "]);\n", + "\n", + "await promptTemplate.invoke({ topic: \"cats\" });\n", + "\\`\\`\\`\n", + "\n", + "In the above example, this ChatPromptTemplate will construct two messages when called.\n", + "The first is a system message, that has no variables to format.\n", + "The second is a HumanMessage, and will be formatted by the \\`topic\\` variable the user passes in.\n", + "\n", + "#### MessagesPlaceholder\n", + "\n", + "\n", + "\n", + "This prompt template is responsible for adding an array of messages in a particular place.\n", + "In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.\n", + "But what if we wanted the user to pass in an array of messages that we would slot into a particular spot?\n", + "This is how you use MessagesPlaceholder.\n", + "\n", + "\\`\\`\\`typescript\n", + "import {\n", + " ChatPromptTemplate,\n", + " MessagesPlaceholder,\n", + "} from \"@langchain/core/prompts\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const promptTemplate = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " new MessagesPlaceholder(\"msgs\"),\n", + "]);\n", + "\n", + "promptTemplate.invoke({ msgs: [new HumanMessage({ content: \"hi!\" })] });\n", + "\\`\\`\\`\n", + "\n", + "This will produce an array of two messages, the first one being a system message, and the second one being the HumanMessage we passed in.\n", + "If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in).\n", + "This is useful for letting an array of messages be slotted into a particular spot.\n", + "\n", + "An alternative way to accomplish the same thing without using the \\`MessagesPlaceholder\\` class explicitly is:\n", + "\n", + "\\`\\`\\`typescript\n", + "const promptTemplate = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"placeholder\", \"{msgs}\"], // <-- This is the changed part\n", + "]);\n", + "\\`\\`\\`\n", + "\n", + "For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).\n", + "\n", + "### Example Selectors\n", + "\n", + "One common prompting technique for achieving better performance is to include examples as part of the prompt.\n", + "This gives the language model concrete examples of how it should behave.\n", + "Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.\n", + "Example Selectors are classes responsible for selecting and then formatting examples into prompts.\n", + "\n", + "For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).\n", + "\n", + "### Output parsers\n", + "\n", + "\n", + "\n", + ":::note\n", + "\n", + "The information here refers to parsers that take a text output from a model try to parse it into a more structured representation.\n", + "More and more models are supporting function (or tool) calling, which handles this automatically.\n", + "It is recommended to use function/tool calling rather than output parsing.\n", + "See documentation for that [here](/docs/concepts/tool_calling).\n", + "\n", + ":::\n", + "\n", + "Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.\n", + "Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs.\n", + "\n", + "There are two main methods an output parser must implement:\n", + "\n", + "- \"Get format instructions\": A method which returns a string containing instructions for how the output of a language model should be formatted.\n", + "- \"Parse\": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.\n", + "\n", + "And then one optional one:\n", + "\n", + "- \"Parse with prompt\": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.\n", + "\n", + "Output parsers accept a string or \\`BaseMessage\\` as input and can return an arbitrary type.\n", + "\n", + "LangChain has many different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information:\n", + "\n", + "**Name**: The name of the output parser\n", + "\n", + "**Supports Streaming**: Whether the output parser supports streaming.\n", + "\n", + "**Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific arguments.\n", + "\n", + "**Output Type**: The output type of the object returned by the parser.\n", + "\n", + "**Description**: Our commentary on this output parser and when to use it.\n", + "\n", + "The current date is ${new Date().toISOString()}`;\n", + "\n", + "// Noop statement to hide output\n", + "void 0;" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "7a43595c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "USAGE: {\n", + " prompt_tokens: 2624,\n", + " completion_tokens: 263,\n", + " total_tokens: 2887,\n", + " prompt_tokens_details: { cached_tokens: 0 },\n", + " completion_tokens_details: { reasoning_tokens: 0 }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const modelWithCaching = new ChatOpenAI({\n", + " model: \"gpt-4o-mini-2024-07-18\",\n", + "});\n", + "\n", + "// CACHED_TEXT is some string longer than 1024 tokens\n", + "const LONG_TEXT = `You are a pirate. Always respond in pirate dialect.\n", + "\n", + "Use the following as context when answering questions:\n", + "\n", + "${CACHED_TEXT}`;\n", + "\n", + "const longMessages = [\n", + " {\n", + " role: \"system\",\n", + " content: LONG_TEXT,\n", + " },\n", + " {\n", + " role: \"user\",\n", + " content: \"What types of messages are supported in LangChain?\",\n", + " },\n", + "];\n", + "\n", + "const originalRes = await modelWithCaching.invoke(longMessages);\n", + "\n", + "console.log(\"USAGE:\", originalRes.response_metadata.usage);" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "76c8005e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "USAGE: {\n", + " prompt_tokens: 2624,\n", + " completion_tokens: 272,\n", + " total_tokens: 2896,\n", + " prompt_tokens_details: { cached_tokens: 2432 },\n", + " completion_tokens_details: { reasoning_tokens: 0 }\n", + "}\n" + ] + } + ], + "source": [ + "const resWitCaching = await modelWithCaching.invoke(longMessages);\n", + "\n", + "console.log(\"USAGE:\", resWitCaching.response_metadata.usage);" + ] + }, + { + "cell_type": "markdown", + "id": "cc8b3c94", + "metadata": {}, + "source": [ + "## Audio output\n", + "\n", + "Some OpenAI models (such as `gpt-4o-audio-preview`) support generating audio output. This example shows how to use that feature:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "b4d579b7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " id: 'audio_67129e9466f48190be70372922464162',\n", + " data: 'UklGRgZ4BABXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAATElTVBoAAABJTkZPSVNGVA4AAABMYXZmNTguMjkuMTAwAGRhdGHA',\n", + " expires_at: 1729277092,\n", + " transcript: \"Why did the cat sit on the computer's keyboard? Because it wanted to keep an eye on the mouse!\"\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const modelWithAudioOutput = new ChatOpenAI({\n", + " model: \"gpt-4o-audio-preview\",\n", + " // You may also pass these fields to `.bind` as a call argument.\n", + " modalities: [\"text\", \"audio\"], // Specifies that the model should output audio.\n", + " audio: {\n", + " voice: \"alloy\",\n", + " format: \"wav\",\n", + " },\n", + "});\n", + "\n", + "const audioOutputResult = await modelWithAudioOutput.invoke(\"Tell me a joke about cats.\");\n", + "const castAudioContent = audioOutputResult.additional_kwargs.audio as Record;\n", + "\n", + "console.log({\n", + " ...castAudioContent,\n", + " data: castAudioContent.data.slice(0, 100) // Sliced for brevity\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "bfea3608", + "metadata": {}, + "source": [ + "We see that the audio data is returned inside the `data` field. We are also provided an `expires_at` date field. This field represents the date the audio response will no longer be accessible on the server for use in multi-turn conversations.\n", + "\n", + "### Streaming Audio Output\n", + "\n", + "OpenAI also supports streaming audio output. Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "0fa68183", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " id: 'audio_67129e976ce081908103ba4947399a3eaudio_67129e976ce081908103ba4947399a3e',\n", + " transcript: 'Why was the cat sitting on the computer? Because it wanted to keep an eye on the mouse!',\n", + " index: 0,\n", + " data: 'CgAGAAIADAAAAA0AAwAJAAcACQAJAAQABQABAAgABQAPAAAACAADAAUAAwD8/wUA+f8MAPv/CAD7/wUA///8/wUA/f8DAPj/AgD6',\n", + " expires_at: 1729277096\n", + "}\n" + ] + } + ], + "source": [ + "import { AIMessageChunk } from \"@langchain/core/messages\";\n", + "import { concat } from \"@langchain/core/utils/stream\"\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const modelWithStreamingAudioOutput = new ChatOpenAI({\n", + " model: \"gpt-4o-audio-preview\",\n", + " modalities: [\"text\", \"audio\"],\n", + " audio: {\n", + " voice: \"alloy\",\n", + " format: \"pcm16\", // Format must be `pcm16` for streaming\n", + " },\n", + "});\n", + "\n", + "const audioOutputStream = await modelWithStreamingAudioOutput.stream(\"Tell me a joke about cats.\");\n", + "let finalAudioOutputMsg: AIMessageChunk | undefined;\n", + "for await (const chunk of audioOutputStream) {\n", + " finalAudioOutputMsg = finalAudioOutputMsg ? concat(finalAudioOutputMsg, chunk) : chunk;\n", + "}\n", + "const castStreamedAudioContent = finalAudioOutputMsg?.additional_kwargs.audio as Record;\n", + "\n", + "console.log({\n", + " ...castStreamedAudioContent,\n", + " data: castStreamedAudioContent.data.slice(0, 100) // Sliced for brevity\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "e8b84aac", + "metadata": {}, + "source": [ + "### Audio input\n", + "\n", + "These models also support passing audio as input. For this, you must specify `input_audio` fields as seen below:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1a69dad8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "That's a great joke! It's always fun to imagine why cats do the funny things they do. Keeping an eye on the \"mouse\" is a creatively punny way to describe it!\n" + ] + } + ], + "source": [ + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const userInput = new HumanMessage({\n", + " content: [{\n", + " type: \"input_audio\",\n", + " input_audio: {\n", + " data: castAudioContent.data, // Re-use the base64 data from the first example\n", + " format: \"wav\",\n", + " },\n", + " }]\n", + "})\n", + "\n", + "// Re-use the same model instance\n", + "const userInputAudioRes = await modelWithAudioOutput.invoke([userInput]);\n", + "\n", + "console.log((userInputAudioRes.additional_kwargs.audio as Record).transcript);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const userInput = new HumanMessage({\n", - " content: [{\n", - " type: \"input_audio\",\n", - " input_audio: {\n", - " data: castAudioContent.data, // Re-use the base64 data from the first example\n", - " format: \"wav\",\n", - " },\n", - " }]\n", - "})\n", - "\n", - "// Re-use the same model instance\n", - "const userInputAudioRes = await modelWithAudioOutput.invoke([userInput]);\n", - "\n", - "console.log((userInputAudioRes.additional_kwargs.audio as Record).transcript);" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ChatOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/premai.mdx b/docs/core_docs/docs/integrations/chat/premai.mdx index 0f1af8aa7906..e933cd3a78dc 100644 --- a/docs/core_docs/docs/integrations/chat/premai.mdx +++ b/docs/core_docs/docs/integrations/chat/premai.mdx @@ -31,5 +31,5 @@ import PremAI from "@examples/models/chat/integration_premai.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/prompt_layer_openai.mdx b/docs/core_docs/docs/integrations/chat/prompt_layer_openai.mdx index 21a92fda13b5..8b9b163c60ff 100644 --- a/docs/core_docs/docs/integrations/chat/prompt_layer_openai.mdx +++ b/docs/core_docs/docs/integrations/chat/prompt_layer_openai.mdx @@ -54,5 +54,5 @@ console.log(JSON.stringify(respA, null, 3)); ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/tencent_hunyuan.mdx b/docs/core_docs/docs/integrations/chat/tencent_hunyuan.mdx index cb09e71a4164..07e3e5c35121 100644 --- a/docs/core_docs/docs/integrations/chat/tencent_hunyuan.mdx +++ b/docs/core_docs/docs/integrations/chat/tencent_hunyuan.mdx @@ -42,5 +42,5 @@ import TencentHunyuan from "@examples/models/chat/integration_tencent_hunyuan.ts ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/togetherai.ipynb b/docs/core_docs/docs/integrations/chat/togetherai.ipynb index 9c23fd5dc10a..09a0271059d7 100644 --- a/docs/core_docs/docs/integrations/chat/togetherai.ipynb +++ b/docs/core_docs/docs/integrations/chat/togetherai.ipynb @@ -1,276 +1,276 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Together\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ChatTogetherAI\n", - "\n", - "[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/inference-models) in a couple lines of code.\n", - "\n", - "This guide will help you getting started with `ChatTogetherAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatTogetherAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/togetherai) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatTogetherAI](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "### Model features\n", - "\n", - "See the links in the table headers below for guides on how to use specific features.\n", - "\n", - "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | \n", - "\n", - "## Setup\n", - "\n", - "To access `ChatTogetherAI` models you'll need to create a Together account, get an API key [here](https://api.together.xyz/), and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [api.together.ai](https://api.together.ai/) to sign up to TogetherAI and generate an API key. Once you've done this set the `TOGETHER_AI_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export TOGETHER_AI_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain ChatTogetherAI integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatTogetherAI } from \"@langchain/community/chat_models/togetherai\"\n", - "\n", - "const llm = new ChatTogetherAI({\n", - " model: \"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n", - " temperature: 0,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2b4f3e15", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62e0dbc3", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-9rT9qEDPZ6iLCk6jt3XTzVDDH6pcI\",\n", - " \"content\": \"J'adore la programmation.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 8,\n", - " \"promptTokens\": 31,\n", - " \"totalTokens\": 39\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 31,\n", - " \"output_tokens\": 8,\n", - " \"total_tokens\": 39\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const aiMsg = await llm.invoke([\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", - " ],\n", - " [\"human\", \"I love programming.\"],\n", - "])\n", - "aiMsg" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Together\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "J'adore la programmation.\n" - ] - } - ], - "source": [ - "console.log(aiMsg.content)" - ] - }, - { - "cell_type": "markdown", - "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatTogetherAI\n", + "\n", + "[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/inference-models) in a couple lines of code.\n", + "\n", + "This guide will help you getting started with `ChatTogetherAI` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatTogetherAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/togetherai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatTogetherAI](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access `ChatTogetherAI` models you'll need to create a Together account, get an API key [here](https://api.together.xyz/), and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [api.together.ai](https://api.together.ai/) to sign up to TogetherAI and generate an API key. Once you've done this set the `TOGETHER_AI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export TOGETHER_AI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatTogetherAI integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatTogetherAI } from \"@langchain/community/chat_models/togetherai\"\n", + "\n", + "const llm = new ChatTogetherAI({\n", + " model: \"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n", + " temperature: 0,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rT9qEDPZ6iLCk6jt3XTzVDDH6pcI\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-9rT9wolZWfJ3xovORxnkdf1rcPbbY\",\n", - " \"content\": \"Ich liebe das Programmieren.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 6,\n", - " \"promptTokens\": 26,\n", - " \"totalTokens\": 32\n", - " },\n", - " \"finish_reason\": \"stop\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 26,\n", - " \"output_tokens\": 6,\n", - " \"total_tokens\": 32\n", - " }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rT9wolZWfJ3xovORxnkdf1rcPbbY\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "Behind the scenes, TogetherAI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", + "\n", + "## API reference\n", + "\n", + "For detailed documentation of all ChatTogetherAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", - " ],\n", - " [\"human\", \"{input}\"],\n", - " ]\n", - ")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " input_language: \"English\",\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "Behind the scenes, TogetherAI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", - "\n", - "## API reference\n", - "\n", - "For detailed documentation of all ChatTogetherAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/web_llm.mdx b/docs/core_docs/docs/integrations/chat/web_llm.mdx index 9e6720576a68..ccbf7ef37876 100644 --- a/docs/core_docs/docs/integrations/chat/web_llm.mdx +++ b/docs/core_docs/docs/integrations/chat/web_llm.mdx @@ -43,5 +43,5 @@ For a full end-to-end example, check out [this project](https://github.com/jacob ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/yandex.mdx b/docs/core_docs/docs/integrations/chat/yandex.mdx index 481515454ead..f91d6bd44eb6 100644 --- a/docs/core_docs/docs/integrations/chat/yandex.mdx +++ b/docs/core_docs/docs/integrations/chat/yandex.mdx @@ -34,5 +34,5 @@ import YandexGPTChatExample from "@examples/models/chat/integration_yandex.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/chat/zhipuai.mdx b/docs/core_docs/docs/integrations/chat/zhipuai.mdx index 2a3dca4a4d5a..c831a8aa6c6e 100644 --- a/docs/core_docs/docs/integrations/chat/zhipuai.mdx +++ b/docs/core_docs/docs/integrations/chat/zhipuai.mdx @@ -36,5 +36,5 @@ import ZhipuAI from "@examples/models/chat/integration_zhipuai.ts"; ## Related -- Chat model [conceptual guide](/docs/concepts/#chat-models) +- Chat model [conceptual guide](/docs/concepts/chat_models) - Chat model [how-to guides](/docs/how_to/#chat-models) diff --git a/docs/core_docs/docs/integrations/document_loaders/file_loaders/csv.ipynb b/docs/core_docs/docs/integrations/document_loaders/file_loaders/csv.ipynb index b50de026a839..0026061919ad 100644 --- a/docs/core_docs/docs/integrations/document_loaders/file_loaders/csv.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/file_loaders/csv.ipynb @@ -1,226 +1,226 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: CSV\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# CSVLoader\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Compatibility\n", - "\n", - "Only available on Node.js.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "This notebook provides a quick overview for getting started with `CSVLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `CSVLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_csv.CSVLoader.html).\n", - "\n", - "This example goes over how to load data from CSV files. The second argument is the `column` name to extract from the CSV file. One document will be created for each row in the CSV file. When `column` is not specified, each row is converted into a key/value pair with each key/value pair outputted to a new line in the document's `pageContent`. When `column` is specified, one document is created for each row, and the value of the specified column is used as the document's `pageContent`.\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Compatibility | Local | [PY support](https://python.langchain.com/docs/integrations/document_loaders/csv)| \n", - "| :--- | :--- | :---: | :---: | :---: |\n", - "| [CSVLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_csv.CSVLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_fs_csv.html) | Node-only | ✅ | ✅ |\n", - "\n", - "## Setup\n", - "\n", - "To access `CSVLoader` document loader you'll need to install the `@langchain/community` integration, along with the `d3-dsv@2` peer dependency.\n", - "\n", - "### Installation\n", - "\n", - "The LangChain CSVLoader integration lives in the `@langchain/community` integration package.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core d3-dsv@2\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and load documents:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "import { CSVLoader } from \"@langchain/community/document_loaders/fs/csv\"\n", - "\n", - "const exampleCsvPath = \"../../../../../../langchain/src/document_loaders/tests/example_data/example_separator.csv\";\n", - "\n", - "const loader = new CSVLoader(exampleCsvPath)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: 'id|html: 1|\"Corruption discovered at the core of the Banking Clan!\"',\n", - " metadata: {\n", - " source: '../../../../../../langchain/src/document_loaders/tests/example_data/example_separator.csv',\n", - " line: 1\n", - " },\n", - " id: undefined\n", - "}\n" - ] - } - ], - "source": [ - "const docs = await loader.load()\n", - "docs[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: CSV\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " source: '../../../../../../langchain/src/document_loaders/tests/example_data/example_separator.csv',\n", - " line: 1\n", - "}\n" - ] - } - ], - "source": [ - "console.log(docs[0].metadata)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Usage, extracting a single column\n", - "\n", - "Example CSV file:\n", - "\n", - "```csv\n", - "id|html\n", - "1|\"Corruption discovered at the core of the Banking Clan!\"\n", - "2|\"Reunited, Rush Clovis and Senator Amidala\"\n", - "3|\"discover the full extent of the deception.\"\n", - "4|\"Anakin Skywalker is sent to the rescue!\"\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CSVLoader\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "\n", + "Only available on Node.js.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This notebook provides a quick overview for getting started with `CSVLoader` [document loaders](/docs/concepts/document_loaders). For detailed documentation of all `CSVLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_csv.CSVLoader.html).\n", + "\n", + "This example goes over how to load data from CSV files. The second argument is the `column` name to extract from the CSV file. One document will be created for each row in the CSV file. When `column` is not specified, each row is converted into a key/value pair with each key/value pair outputted to a new line in the document's `pageContent`. When `column` is specified, one document is created for each row, and the value of the specified column is used as the document's `pageContent`.\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Compatibility | Local | [PY support](https://python.langchain.com/docs/integrations/document_loaders/csv)| \n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [CSVLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_csv.CSVLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_fs_csv.html) | Node-only | ✅ | ✅ |\n", + "\n", + "## Setup\n", + "\n", + "To access `CSVLoader` document loader you'll need to install the `@langchain/community` integration, along with the `d3-dsv@2` peer dependency.\n", + "\n", + "### Installation\n", + "\n", + "The LangChain CSVLoader integration lives in the `@langchain/community` integration package.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core d3-dsv@2\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: 'Corruption discovered at the core of the Banking Clan!',\n", - " metadata: {\n", - " source: '../../../../../../langchain/src/document_loaders/tests/example_data/example_separator.csv',\n", - " line: 1\n", - " },\n", - " id: undefined\n", - "}\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "import { CSVLoader } from \"@langchain/community/document_loaders/fs/csv\"\n", + "\n", + "const exampleCsvPath = \"../../../../../../langchain/src/document_loaders/tests/example_data/example_separator.csv\";\n", + "\n", + "const loader = new CSVLoader(exampleCsvPath)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: 'id|html: 1|\"Corruption discovered at the core of the Banking Clan!\"',\n", + " metadata: {\n", + " source: '../../../../../../langchain/src/document_loaders/tests/example_data/example_separator.csv',\n", + " line: 1\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " source: '../../../../../../langchain/src/document_loaders/tests/example_data/example_separator.csv',\n", + " line: 1\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Usage, extracting a single column\n", + "\n", + "Example CSV file:\n", + "\n", + "```csv\n", + "id|html\n", + "1|\"Corruption discovered at the core of the Banking Clan!\"\n", + "2|\"Reunited, Rush Clovis and Senator Amidala\"\n", + "3|\"discover the full extent of the deception.\"\n", + "4|\"Anakin Skywalker is sent to the rescue!\"\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: 'Corruption discovered at the core of the Banking Clan!',\n", + " metadata: {\n", + " source: '../../../../../../langchain/src/document_loaders/tests/example_data/example_separator.csv',\n", + " line: 1\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "import { CSVLoader } from \"@langchain/community/document_loaders/fs/csv\";\n", + "\n", + "const singleColumnLoader = new CSVLoader(\n", + " exampleCsvPath,\n", + " {\n", + " column: \"html\",\n", + " separator:\"|\"\n", + " }\n", + ");\n", + "\n", + "const singleColumnDocs = await singleColumnLoader.load();\n", + "console.log(singleColumnDocs[0]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all CSVLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_csv.CSVLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { CSVLoader } from \"@langchain/community/document_loaders/fs/csv\";\n", - "\n", - "const singleColumnLoader = new CSVLoader(\n", - " exampleCsvPath,\n", - " {\n", - " column: \"html\",\n", - " separator:\"|\"\n", - " }\n", - ");\n", - "\n", - "const singleColumnDocs = await singleColumnLoader.load();\n", - "console.log(singleColumnDocs[0]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all CSVLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_csv.CSVLoader.html" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/document_loaders/file_loaders/directory.ipynb b/docs/core_docs/docs/integrations/document_loaders/file_loaders/directory.ipynb index c6c50036c112..3bbe514844d9 100644 --- a/docs/core_docs/docs/integrations/document_loaders/file_loaders/directory.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/file_loaders/directory.ipynb @@ -1,192 +1,192 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": {}, - "source": [ - "---\n", - "sidebar_label: DirectoryLoader\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# DirectoryLoader\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Compatibility\n", - "\n", - "Only available on Node.js.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "This notebook provides a quick overview for getting started with `DirectoryLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `DirectoryLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html).\n", - "\n", - "This example goes over how to load data from folders with multiple files. The second argument is a map of file extensions to loader factories. Each file will be passed to the matching loader, and the resulting documents will be concatenated together.\n", - "\n", - "Example folder:\n", - "\n", - "```text\n", - "src/document_loaders/example_data/example/\n", - "├── example.json\n", - "├── example.jsonl\n", - "├── example.txt\n", - "└── example.csv\n", - "```\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Compatibility | Local | PY support | \n", - "| :--- | :--- | :---: | :---: | :---: |\n", - "| [DirectoryLoader](https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html) | [langchain](https://api.js.langchain.com/modules/langchain.document_loaders_fs_directory.html) | Node-only | ✅ | ✅ |\n", - "\n", - "## Setup\n", - "\n", - "To access `DirectoryLoader` document loader you'll need to install the `langchain` package.\n", - "\n", - "### Installation\n", - "\n", - "The LangChain DirectoryLoader integration lives in the `langchain` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " langchain @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and load documents:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { DirectoryLoader } from \"langchain/document_loaders/fs/directory\";\n", - "import {\n", - " JSONLoader,\n", - " JSONLinesLoader,\n", - "} from \"langchain/document_loaders/fs/json\";\n", - "import { TextLoader } from \"langchain/document_loaders/fs/text\";\n", - "import { CSVLoader } from \"@langchain/community/document_loaders/fs/csv\";\n", - "\n", - "const loader = new DirectoryLoader(\n", - " \"../../../../../../examples/src/document_loaders/example_data\",\n", - " {\n", - " \".json\": (path) => new JSONLoader(path, \"/texts\"),\n", - " \".jsonl\": (path) => new JSONLinesLoader(path, \"/html\"),\n", - " \".txt\": (path) => new TextLoader(path),\n", - " \".csv\": (path) => new CSVLoader(path, \"text\"),\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: 'Foo\\nBar\\nBaz\\n\\n',\n", - " metadata: {\n", - " source: '/Users/bracesproul/code/lang-chain-ai/langchainjs/examples/src/document_loaders/example_data/example.txt'\n", - " },\n", - " id: undefined\n", - "}\n" - ] - } - ], - "source": [ - "const docs = await loader.load()\n", - "// disable console.warn calls\n", - "console.warn = () => {}\n", - "docs[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: DirectoryLoader\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DirectoryLoader\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "\n", + "Only available on Node.js.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This notebook provides a quick overview for getting started with `DirectoryLoader` [document loaders](/docs/concepts/document_loaders). For detailed documentation of all `DirectoryLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html).\n", + "\n", + "This example goes over how to load data from folders with multiple files. The second argument is a map of file extensions to loader factories. Each file will be passed to the matching loader, and the resulting documents will be concatenated together.\n", + "\n", + "Example folder:\n", + "\n", + "```text\n", + "src/document_loaders/example_data/example/\n", + "├── example.json\n", + "├── example.jsonl\n", + "├── example.txt\n", + "└── example.csv\n", + "```\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Compatibility | Local | PY support | \n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [DirectoryLoader](https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html) | [langchain](https://api.js.langchain.com/modules/langchain.document_loaders_fs_directory.html) | Node-only | ✅ | ✅ |\n", + "\n", + "## Setup\n", + "\n", + "To access `DirectoryLoader` document loader you'll need to install the `langchain` package.\n", + "\n", + "### Installation\n", + "\n", + "The LangChain DirectoryLoader integration lives in the `langchain` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " langchain @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " source: '/Users/bracesproul/code/lang-chain-ai/langchainjs/examples/src/document_loaders/example_data/example.txt'\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { DirectoryLoader } from \"langchain/document_loaders/fs/directory\";\n", + "import {\n", + " JSONLoader,\n", + " JSONLinesLoader,\n", + "} from \"langchain/document_loaders/fs/json\";\n", + "import { TextLoader } from \"langchain/document_loaders/fs/text\";\n", + "import { CSVLoader } from \"@langchain/community/document_loaders/fs/csv\";\n", + "\n", + "const loader = new DirectoryLoader(\n", + " \"../../../../../../examples/src/document_loaders/example_data\",\n", + " {\n", + " \".json\": (path) => new JSONLoader(path, \"/texts\"),\n", + " \".jsonl\": (path) => new JSONLinesLoader(path, \"/html\"),\n", + " \".txt\": (path) => new TextLoader(path),\n", + " \".csv\": (path) => new CSVLoader(path, \"text\"),\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: 'Foo\\nBar\\nBaz\\n\\n',\n", + " metadata: {\n", + " source: '/Users/bracesproul/code/lang-chain-ai/langchainjs/examples/src/document_loaders/example_data/example.txt'\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "// disable console.warn calls\n", + "console.warn = () => {}\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " source: '/Users/bracesproul/code/lang-chain-ai/langchainjs/examples/src/document_loaders/example_data/example.txt'\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all DirectoryLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "console.log(docs[0].metadata)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all DirectoryLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain.document_loaders_fs_directory.DirectoryLoader.html" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/document_loaders/file_loaders/pdf.ipynb b/docs/core_docs/docs/integrations/document_loaders/file_loaders/pdf.ipynb index 1a4e0ec67788..9ef008dd93b1 100644 --- a/docs/core_docs/docs/integrations/document_loaders/file_loaders/pdf.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/file_loaders/pdf.ipynb @@ -1,502 +1,502 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": {}, - "source": [ - "---\n", - "sidebar_label: PDFLoader\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# PDFLoader\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Compatibility\n", - "\n", - "Only available on Node.js.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "This notebook provides a quick overview for getting started with `PDFLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `PDFLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_pdf.PDFLoader.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Compatibility | Local | PY support | \n", - "| :--- | :--- | :---: | :---: | :---: |\n", - "| [PDFLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_pdf.PDFLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_fs_pdf.html) | Node-only | ✅ | 🟠 (See note below) |\n", - "\n", - "> The Python package has many PDF loaders to choose from. See [this link](https://python.langchain.com/docs/integrations/document_loaders/) for a full list of Python document loaders.\n", - "\n", - "## Setup\n", - "\n", - "To access `PDFLoader` document loader you'll need to install the `@langchain/community` integration, along with the `pdf-parse` package.\n", - "\n", - "### Credentials\n", - "\n", - "### Installation\n", - "\n", - "The LangChain PDFLoader integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core pdf-parse\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and load documents:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\"\n", - "\n", - "const nike10kPdfPath = \"../../../../data/nke-10k-2023.pdf\"\n", - "\n", - "const loader = new PDFLoader(nike10kPdfPath)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: 'Table of Contents\\n' +\n", - " 'UNITED STATES\\n' +\n", - " 'SECURITIES AND EXCHANGE COMMISSION\\n' +\n", - " 'Washington, D.C. 20549\\n' +\n", - " 'FORM 10-K\\n' +\n", - " '(Mark One)\\n' +\n", - " '☑ ANNUAL REPORT PURSUANT TO SECTION 13 OR 15(D) OF THE SECURITIES EXCHANGE ACT OF 1934\\n' +\n", - " 'FOR THE FISCAL YEAR ENDED MAY 31, 2023\\n' +\n", - " 'OR\\n' +\n", - " '☐ TRANSITION REPORT PURSUANT TO SECTION 13 OR 15(D) OF THE SECURITIES EXCHANGE ACT OF 1934\\n' +\n", - " 'FOR THE TRANSITION PERIOD FROM TO .\\n' +\n", - " 'Commission File No. 1-10635\\n' +\n", - " 'NIKE, Inc.\\n' +\n", - " '(Exact name of Registrant as specified in its charter)\\n' +\n", - " 'Oregon93-0584541\\n' +\n", - " '(State or other jurisdiction of incorporation)(IRS Employer Identification No.)\\n' +\n", - " 'One Bowerman Drive, Beaverton, Oregon 97005-6453\\n' +\n", - " '(Address of principal executive offices and zip code)\\n' +\n", - " '(503) 671-6453\\n' +\n", - " \"(Registrant's telephone number, including area code)\\n\" +\n", - " 'SECURITIES REGISTERED PURSUANT TO SECTION 12(B) OF THE ACT:\\n' +\n", - " 'Class B Common StockNKENew York Stock Exchange\\n' +\n", - " '(Title of each class)(Trading symbol)(Name of each exchange on which registered)\\n' +\n", - " 'SECURITIES REGISTERED PURSUANT TO SECTION 12(G) OF THE ACT:\\n' +\n", - " 'NONE\\n' +\n", - " 'Indicate by check mark:YESNO\\n' +\n", - " '•if the registrant is a well-known seasoned issuer, as defined in Rule 405 of the Securities Act.þ ̈\\n' +\n", - " '•if the registrant is not required to file reports pursuant to Section 13 or Section 15(d) of the Act. ̈þ\\n' +\n", - " '•whether the registrant (1) has filed all reports required to be filed by Section 13 or 15(d) of the Securities Exchange Act of 1934 during the preceding\\n' +\n", - " '12 months (or for such shorter period that the registrant was required to file such reports), and (2) has been subject to such filing requirements for the\\n' +\n", - " 'past 90 days.\\n' +\n", - " 'þ ̈\\n' +\n", - " '•whether the registrant has submitted electronically every Interactive Data File required to be submitted pursuant to Rule 405 of Regulation S-T\\n' +\n", - " '(§232.405 of this chapter) during the preceding 12 months (or for such shorter period that the registrant was required to submit such files).\\n' +\n", - " 'þ ̈\\n' +\n", - " '•whether the registrant is a large accelerated filer, an accelerated filer, a non-accelerated filer, a smaller reporting company or an emerging growth company. See the definitions of “large accelerated filer,”\\n' +\n", - " '“accelerated filer,” “smaller reporting company,” and “emerging growth company” in Rule 12b-2 of the Exchange Act.\\n' +\n", - " 'Large accelerated filerþAccelerated filer☐Non-accelerated filer☐Smaller reporting company☐Emerging growth company☐\\n' +\n", - " '•if an emerging growth company, if the registrant has elected not to use the extended transition period for complying with any new or revised financial\\n' +\n", - " 'accounting standards provided pursuant to Section 13(a) of the Exchange Act.\\n' +\n", - " ' ̈\\n' +\n", - " \"•whether the registrant has filed a report on and attestation to its management's assessment of the effectiveness of its internal control over financial\\n\" +\n", - " 'reporting under Section 404(b) of the Sarbanes-Oxley Act (15 U.S.C. 7262(b)) by the registered public accounting firm that prepared or issued its audit\\n' +\n", - " 'report.\\n' +\n", - " 'þ\\n' +\n", - " '•if securities are registered pursuant to Section 12(b) of the Act, whether the financial statements of the registrant included in the filing reflect the\\n' +\n", - " 'correction of an error to previously issued financial statements.\\n' +\n", - " ' ̈\\n' +\n", - " '•whether any of those error corrections are restatements that required a recovery analysis of incentive-based compensation received by any of the\\n' +\n", - " \"registrant's executive officers during the relevant recovery period pursuant to § 240.10D-1(b).\\n\" +\n", - " ' ̈\\n' +\n", - " '•\\n' +\n", - " 'whether the registrant is a shell company (as defined in Rule 12b-2 of the Act).☐þ\\n' +\n", - " \"As of November 30, 2022, the aggregate market values of the Registrant's Common Stock held by non-affiliates were:\\n\" +\n", - " 'Class A$7,831,564,572 \\n' +\n", - " 'Class B136,467,702,472 \\n' +\n", - " '$144,299,267,044 ',\n", - " metadata: {\n", - " source: '../../../../data/nke-10k-2023.pdf',\n", - " pdf: {\n", - " version: '1.10.100',\n", - " info: [Object],\n", - " metadata: null,\n", - " totalPages: 107\n", - " },\n", - " loc: { pageNumber: 1 }\n", - " },\n", - " id: undefined\n", - "}\n" - ] - } - ], - "source": [ - "const docs = await loader.load()\n", - "docs[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: PDFLoader\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " source: '../../../../data/nke-10k-2023.pdf',\n", - " pdf: {\n", - " version: '1.10.100',\n", - " info: {\n", - " PDFFormatVersion: '1.4',\n", - " IsAcroFormPresent: false,\n", - " IsXFAPresent: false,\n", - " Title: '0000320187-23-000039',\n", - " Author: 'EDGAR Online, a division of Donnelley Financial Solutions',\n", - " Subject: 'Form 10-K filed on 2023-07-20 for the period ending 2023-05-31',\n", - " Keywords: '0000320187-23-000039; ; 10-K',\n", - " Creator: 'EDGAR Filing HTML Converter',\n", - " Producer: 'EDGRpdf Service w/ EO.Pdf 22.0.40.0',\n", - " CreationDate: \"D:20230720162200-04'00'\",\n", - " ModDate: \"D:20230720162208-04'00'\"\n", - " },\n", - " metadata: null,\n", - " totalPages: 107\n", - " },\n", - " loc: { pageNumber: 1 }\n", - "}\n" - ] - } - ], - "source": [ - "console.log(docs[0].metadata)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Usage, one document per file" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# PDFLoader\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "\n", + "Only available on Node.js.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This notebook provides a quick overview for getting started with `PDFLoader` [document loaders](/docs/concepts/document_loaders). For detailed documentation of all `PDFLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_pdf.PDFLoader.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Compatibility | Local | PY support | \n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [PDFLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_pdf.PDFLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_fs_pdf.html) | Node-only | ✅ | 🟠 (See note below) |\n", + "\n", + "> The Python package has many PDF loaders to choose from. See [this link](https://python.langchain.com/docs/integrations/document_loaders/) for a full list of Python document loaders.\n", + "\n", + "## Setup\n", + "\n", + "To access `PDFLoader` document loader you'll need to install the `@langchain/community` integration, along with the `pdf-parse` package.\n", + "\n", + "### Credentials\n", + "\n", + "### Installation\n", + "\n", + "The LangChain PDFLoader integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core pdf-parse\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Table of Contents\n", - "UNITED STATES\n", - "SECURITIES AND EXCHANGE COMMISSION\n", - "Washington, D.C. 20549\n", - "FORM 10-K\n", - "\n" - ] - } - ], - "source": [ - "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", - "\n", - "const singleDocPerFileLoader = new PDFLoader(nike10kPdfPath, {\n", - " splitPages: false,\n", - "});\n", - "\n", - "const singleDoc = await singleDocPerFileLoader.load();\n", - "console.log(singleDoc[0].pageContent.slice(0, 100))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Usage, custom `pdfjs` build\n", - "\n", - "By default we use the `pdfjs` build bundled with `pdf-parse`, which is compatible with most environments, including Node.js and modern browsers. If you want to use a more recent version of `pdfjs-dist` or if you want to use a custom build of `pdfjs-dist`, you can do so by providing a custom `pdfjs` function that returns a promise that resolves to the `PDFJS` object.\n", - "\n", - "In the following example we use the \"legacy\" (see [pdfjs docs](https://github.com/mozilla/pdf.js/wiki/Frequently-Asked-Questions#which-browsersenvironments-are-supported)) build of `pdfjs-dist`, which includes several polyfills not included in the default build.\n", - "\n", - "```{=mdx}\n", - "\n", - " pdfjs-dist\n", - "\n", - "\n", - "```\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", - "\n", - "const customBuildLoader = new PDFLoader(nike10kPdfPath, {\n", - " // you may need to add `.then(m => m.default)` to the end of the import\n", - " // @lc-ts-ignore\n", - " pdfjs: () => import(\"pdfjs-dist/legacy/build/pdf.js\"),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Eliminating extra spaces\n", - "\n", - "PDFs come in many varieties, which makes reading them a challenge. The loader parses individual text elements and joins them together with a space by default, but\n", - "if you are seeing excessive spaces, this may not be the desired behavior. In that case, you can override the separator with an empty string like this:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "(Mark One)\n", - "☑ ANNUAL REPORT PURSUANT TO SECTION 13 OR 15(D) OF THE SECURITIES EXCHANGE ACT OF 1934\n", - "FOR THE FISCAL YEAR ENDED MAY 31, 2023\n", - "OR\n", - "☐ TRANSITI\n" - ] - } - ], - "source": [ - "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", - "\n", - "const noExtraSpacesLoader = new PDFLoader(nike10kPdfPath, {\n", - " parsedItemSeparator: \"\",\n", - "});\n", - "\n", - "const noExtraSpacesDocs = await noExtraSpacesLoader.load();\n", - "console.log(noExtraSpacesDocs[0].pageContent.slice(100, 250))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Loading directories" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\"\n", + "\n", + "const nike10kPdfPath = \"../../../../data/nke-10k-2023.pdf\"\n", + "\n", + "const loader = new PDFLoader(nike10kPdfPath)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "Unknown file type: Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt\n", - "Unknown file type: example.txt\n", - "Unknown file type: notion.md\n", - "Unknown file type: bad_frontmatter.md\n", - "Unknown file type: frontmatter.md\n", - "Unknown file type: no_frontmatter.md\n", - "Unknown file type: no_metadata.md\n", - "Unknown file type: tags_and_frontmatter.md\n", - "Unknown file type: test.mp3\n" - ] + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: 'Table of Contents\\n' +\n", + " 'UNITED STATES\\n' +\n", + " 'SECURITIES AND EXCHANGE COMMISSION\\n' +\n", + " 'Washington, D.C. 20549\\n' +\n", + " 'FORM 10-K\\n' +\n", + " '(Mark One)\\n' +\n", + " '☑ ANNUAL REPORT PURSUANT TO SECTION 13 OR 15(D) OF THE SECURITIES EXCHANGE ACT OF 1934\\n' +\n", + " 'FOR THE FISCAL YEAR ENDED MAY 31, 2023\\n' +\n", + " 'OR\\n' +\n", + " '☐ TRANSITION REPORT PURSUANT TO SECTION 13 OR 15(D) OF THE SECURITIES EXCHANGE ACT OF 1934\\n' +\n", + " 'FOR THE TRANSITION PERIOD FROM TO .\\n' +\n", + " 'Commission File No. 1-10635\\n' +\n", + " 'NIKE, Inc.\\n' +\n", + " '(Exact name of Registrant as specified in its charter)\\n' +\n", + " 'Oregon93-0584541\\n' +\n", + " '(State or other jurisdiction of incorporation)(IRS Employer Identification No.)\\n' +\n", + " 'One Bowerman Drive, Beaverton, Oregon 97005-6453\\n' +\n", + " '(Address of principal executive offices and zip code)\\n' +\n", + " '(503) 671-6453\\n' +\n", + " \"(Registrant's telephone number, including area code)\\n\" +\n", + " 'SECURITIES REGISTERED PURSUANT TO SECTION 12(B) OF THE ACT:\\n' +\n", + " 'Class B Common StockNKENew York Stock Exchange\\n' +\n", + " '(Title of each class)(Trading symbol)(Name of each exchange on which registered)\\n' +\n", + " 'SECURITIES REGISTERED PURSUANT TO SECTION 12(G) OF THE ACT:\\n' +\n", + " 'NONE\\n' +\n", + " 'Indicate by check mark:YESNO\\n' +\n", + " '•if the registrant is a well-known seasoned issuer, as defined in Rule 405 of the Securities Act.þ ̈\\n' +\n", + " '•if the registrant is not required to file reports pursuant to Section 13 or Section 15(d) of the Act. ̈þ\\n' +\n", + " '•whether the registrant (1) has filed all reports required to be filed by Section 13 or 15(d) of the Securities Exchange Act of 1934 during the preceding\\n' +\n", + " '12 months (or for such shorter period that the registrant was required to file such reports), and (2) has been subject to such filing requirements for the\\n' +\n", + " 'past 90 days.\\n' +\n", + " 'þ ̈\\n' +\n", + " '•whether the registrant has submitted electronically every Interactive Data File required to be submitted pursuant to Rule 405 of Regulation S-T\\n' +\n", + " '(§232.405 of this chapter) during the preceding 12 months (or for such shorter period that the registrant was required to submit such files).\\n' +\n", + " 'þ ̈\\n' +\n", + " '•whether the registrant is a large accelerated filer, an accelerated filer, a non-accelerated filer, a smaller reporting company or an emerging growth company. See the definitions of “large accelerated filer,”\\n' +\n", + " '“accelerated filer,” “smaller reporting company,” and “emerging growth company” in Rule 12b-2 of the Exchange Act.\\n' +\n", + " 'Large accelerated filerþAccelerated filer☐Non-accelerated filer☐Smaller reporting company☐Emerging growth company☐\\n' +\n", + " '•if an emerging growth company, if the registrant has elected not to use the extended transition period for complying with any new or revised financial\\n' +\n", + " 'accounting standards provided pursuant to Section 13(a) of the Exchange Act.\\n' +\n", + " ' ̈\\n' +\n", + " \"•whether the registrant has filed a report on and attestation to its management's assessment of the effectiveness of its internal control over financial\\n\" +\n", + " 'reporting under Section 404(b) of the Sarbanes-Oxley Act (15 U.S.C. 7262(b)) by the registered public accounting firm that prepared or issued its audit\\n' +\n", + " 'report.\\n' +\n", + " 'þ\\n' +\n", + " '•if securities are registered pursuant to Section 12(b) of the Act, whether the financial statements of the registrant included in the filing reflect the\\n' +\n", + " 'correction of an error to previously issued financial statements.\\n' +\n", + " ' ̈\\n' +\n", + " '•whether any of those error corrections are restatements that required a recovery analysis of incentive-based compensation received by any of the\\n' +\n", + " \"registrant's executive officers during the relevant recovery period pursuant to § 240.10D-1(b).\\n\" +\n", + " ' ̈\\n' +\n", + " '•\\n' +\n", + " 'whether the registrant is a shell company (as defined in Rule 12b-2 of the Act).☐þ\\n' +\n", + " \"As of November 30, 2022, the aggregate market values of the Registrant's Common Stock held by non-affiliates were:\\n\" +\n", + " 'Class A$7,831,564,572 \\n' +\n", + " 'Class B136,467,702,472 \\n' +\n", + " '$144,299,267,044 ',\n", + " metadata: {\n", + " source: '../../../../data/nke-10k-2023.pdf',\n", + " pdf: {\n", + " version: '1.10.100',\n", + " info: [Object],\n", + " metadata: null,\n", + " totalPages: 107\n", + " },\n", + " loc: { pageNumber: 1 }\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: 'Bitcoin: A Peer-to-Peer Electronic Cash System\\n' +\n", - " 'Satoshi Nakamoto\\n' +\n", - " 'satoshin@gmx.com\\n' +\n", - " 'www.bitcoin.org\\n' +\n", - " 'Abstract. A purely peer-to-peer version of electronic cash would allow online \\n' +\n", - " 'payments to be sent directly from one party to another without going through a \\n' +\n", - " 'financial institution. Digital signatures provide part of the solution, but the main \\n' +\n", - " 'benefits are lost if a trusted third party is still required to prevent double-spending. \\n' +\n", - " 'We propose a solution to the double-spending problem using a peer-to-peer network. \\n' +\n", - " 'The network timestamps transactions by hashing them into an ongoing chain of \\n' +\n", - " 'hash-based proof-of-work, forming a record that cannot be changed without redoing \\n' +\n", - " 'the proof-of-work. The longest chain not only serves as proof of the sequence of \\n' +\n", - " 'events witnessed, but proof that it came from the largest pool of CPU power. As \\n' +\n", - " 'long as a majority of CPU power is controlled by nodes that are not cooperating to \\n' +\n", - " \"attack the network, they'll generate the longest chain and outpace attackers. The \\n\" +\n", - " 'network itself requires minimal structure. Messages are broadcast on a best effort \\n' +\n", - " 'basis, and nodes can leave and rejoin the network at will, accepting the longest \\n' +\n", - " 'proof-of-work chain as proof of what happened while they were gone.\\n' +\n", - " '1.Introduction\\n' +\n", - " 'Commerce on the Internet has come to rely almost exclusively on financial institutions serving as \\n' +\n", - " 'trusted third parties to process electronic payments. While the system works well enough for \\n' +\n", - " 'most transactions, it still suffers from the inherent weaknesses of the trust based model. \\n' +\n", - " 'Completely non-reversible transactions are not really possible, since financial institutions cannot \\n' +\n", - " 'avoid mediating disputes. The cost of mediation increases transaction costs, limiting the \\n' +\n", - " 'minimum practical transaction size and cutting off the possibility for small casual transactions, \\n' +\n", - " 'and there is a broader cost in the loss of ability to make non-reversible payments for non-\\n' +\n", - " 'reversible services. With the possibility of reversal, the need for trust spreads. Merchants must \\n' +\n", - " 'be wary of their customers, hassling them for more information than they would otherwise need. \\n' +\n", - " 'A certain percentage of fraud is accepted as unavoidable. These costs and payment uncertainties \\n' +\n", - " 'can be avoided in person by using physical currency, but no mechanism exists to make payments \\n' +\n", - " 'over a communications channel without a trusted party.\\n' +\n", - " 'What is needed is an electronic payment system based on cryptographic proof instead of trust, \\n' +\n", - " 'allowing any two willing parties to transact directly with each other without the need for a trusted \\n' +\n", - " 'third party. Transactions that are computationally impractical to reverse would protect sellers \\n' +\n", - " 'from fraud, and routine escrow mechanisms could easily be implemented to protect buyers. In \\n' +\n", - " 'this paper, we propose a solution to the double-spending problem using a peer-to-peer distributed \\n' +\n", - " 'timestamp server to generate computational proof of the chronological order of transactions. The \\n' +\n", - " 'system is secure as long as honest nodes collectively control more CPU power than any \\n' +\n", - " 'cooperating group of attacker nodes.\\n' +\n", - " '1',\n", - " metadata: {\n", - " source: '/Users/bracesproul/code/lang-chain-ai/langchainjs/examples/src/document_loaders/example_data/bitcoin.pdf',\n", - " pdf: {\n", - " version: '1.10.100',\n", - " info: [Object],\n", - " metadata: null,\n", - " totalPages: 9\n", - " },\n", - " loc: { pageNumber: 1 }\n", - " },\n", - " id: undefined\n", - "}\n", - "Document {\n", - " pageContent: 'Bitcoin: A Peer-to-Peer Electronic Cash System\\n' +\n", - " 'Satoshi Nakamoto\\n' +\n", - " 'satoshin@gmx.com\\n' +\n", - " 'www.bitcoin.org\\n' +\n", - " 'Abstract. A purely peer-to-peer version of electronic cash would allow online \\n' +\n", - " 'payments to be sent directly from one party to another without going through a \\n' +\n", - " 'financial institution. Digital signatures provide part of the solution, but the main \\n' +\n", - " 'benefits are lost if a trusted third party is still required to prevent double-spending. \\n' +\n", - " 'We propose a solution to the double-spending problem using a peer-to-peer network. \\n' +\n", - " 'The network timestamps transactions by hashing them into an ongoing chain of \\n' +\n", - " 'hash-based proof-of-work, forming a record that cannot be changed without redoing \\n' +\n", - " 'the proof-of-work. The longest chain not only serves as proof of the sequence of \\n' +\n", - " 'events witnessed, but proof that it came from the largest pool of CPU power. As \\n' +\n", - " 'long as a majority of CPU power is controlled by nodes that are not cooperating to',\n", - " metadata: {\n", - " source: '/Users/bracesproul/code/lang-chain-ai/langchainjs/examples/src/document_loaders/example_data/bitcoin.pdf',\n", - " pdf: {\n", - " version: '1.10.100',\n", - " info: [Object],\n", - " metadata: null,\n", - " totalPages: 9\n", - " },\n", - " loc: { pageNumber: 1, lines: [Object] }\n", - " },\n", - " id: undefined\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " source: '../../../../data/nke-10k-2023.pdf',\n", + " pdf: {\n", + " version: '1.10.100',\n", + " info: {\n", + " PDFFormatVersion: '1.4',\n", + " IsAcroFormPresent: false,\n", + " IsXFAPresent: false,\n", + " Title: '0000320187-23-000039',\n", + " Author: 'EDGAR Online, a division of Donnelley Financial Solutions',\n", + " Subject: 'Form 10-K filed on 2023-07-20 for the period ending 2023-05-31',\n", + " Keywords: '0000320187-23-000039; ; 10-K',\n", + " Creator: 'EDGAR Filing HTML Converter',\n", + " Producer: 'EDGRpdf Service w/ EO.Pdf 22.0.40.0',\n", + " CreationDate: \"D:20230720162200-04'00'\",\n", + " ModDate: \"D:20230720162208-04'00'\"\n", + " },\n", + " metadata: null,\n", + " totalPages: 107\n", + " },\n", + " loc: { pageNumber: 1 }\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Usage, one document per file" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Table of Contents\n", + "UNITED STATES\n", + "SECURITIES AND EXCHANGE COMMISSION\n", + "Washington, D.C. 20549\n", + "FORM 10-K\n", + "\n" + ] + } + ], + "source": [ + "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", + "\n", + "const singleDocPerFileLoader = new PDFLoader(nike10kPdfPath, {\n", + " splitPages: false,\n", + "});\n", + "\n", + "const singleDoc = await singleDocPerFileLoader.load();\n", + "console.log(singleDoc[0].pageContent.slice(0, 100))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Usage, custom `pdfjs` build\n", + "\n", + "By default we use the `pdfjs` build bundled with `pdf-parse`, which is compatible with most environments, including Node.js and modern browsers. If you want to use a more recent version of `pdfjs-dist` or if you want to use a custom build of `pdfjs-dist`, you can do so by providing a custom `pdfjs` function that returns a promise that resolves to the `PDFJS` object.\n", + "\n", + "In the following example we use the \"legacy\" (see [pdfjs docs](https://github.com/mozilla/pdf.js/wiki/Frequently-Asked-Questions#which-browsersenvironments-are-supported)) build of `pdfjs-dist`, which includes several polyfills not included in the default build.\n", + "\n", + "```{=mdx}\n", + "\n", + " pdfjs-dist\n", + "\n", + "\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", + "\n", + "const customBuildLoader = new PDFLoader(nike10kPdfPath, {\n", + " // you may need to add `.then(m => m.default)` to the end of the import\n", + " // @lc-ts-ignore\n", + " pdfjs: () => import(\"pdfjs-dist/legacy/build/pdf.js\"),\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Eliminating extra spaces\n", + "\n", + "PDFs come in many varieties, which makes reading them a challenge. The loader parses individual text elements and joins them together with a space by default, but\n", + "if you are seeing excessive spaces, this may not be the desired behavior. In that case, you can override the separator with an empty string like this:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(Mark One)\n", + "☑ ANNUAL REPORT PURSUANT TO SECTION 13 OR 15(D) OF THE SECURITIES EXCHANGE ACT OF 1934\n", + "FOR THE FISCAL YEAR ENDED MAY 31, 2023\n", + "OR\n", + "☐ TRANSITI\n" + ] + } + ], + "source": [ + "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", + "\n", + "const noExtraSpacesLoader = new PDFLoader(nike10kPdfPath, {\n", + " parsedItemSeparator: \"\",\n", + "});\n", + "\n", + "const noExtraSpacesDocs = await noExtraSpacesLoader.load();\n", + "console.log(noExtraSpacesDocs[0].pageContent.slice(100, 250))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Loading directories" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Unknown file type: Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt\n", + "Unknown file type: example.txt\n", + "Unknown file type: notion.md\n", + "Unknown file type: bad_frontmatter.md\n", + "Unknown file type: frontmatter.md\n", + "Unknown file type: no_frontmatter.md\n", + "Unknown file type: no_metadata.md\n", + "Unknown file type: tags_and_frontmatter.md\n", + "Unknown file type: test.mp3\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: 'Bitcoin: A Peer-to-Peer Electronic Cash System\\n' +\n", + " 'Satoshi Nakamoto\\n' +\n", + " 'satoshin@gmx.com\\n' +\n", + " 'www.bitcoin.org\\n' +\n", + " 'Abstract. A purely peer-to-peer version of electronic cash would allow online \\n' +\n", + " 'payments to be sent directly from one party to another without going through a \\n' +\n", + " 'financial institution. Digital signatures provide part of the solution, but the main \\n' +\n", + " 'benefits are lost if a trusted third party is still required to prevent double-spending. \\n' +\n", + " 'We propose a solution to the double-spending problem using a peer-to-peer network. \\n' +\n", + " 'The network timestamps transactions by hashing them into an ongoing chain of \\n' +\n", + " 'hash-based proof-of-work, forming a record that cannot be changed without redoing \\n' +\n", + " 'the proof-of-work. The longest chain not only serves as proof of the sequence of \\n' +\n", + " 'events witnessed, but proof that it came from the largest pool of CPU power. As \\n' +\n", + " 'long as a majority of CPU power is controlled by nodes that are not cooperating to \\n' +\n", + " \"attack the network, they'll generate the longest chain and outpace attackers. The \\n\" +\n", + " 'network itself requires minimal structure. Messages are broadcast on a best effort \\n' +\n", + " 'basis, and nodes can leave and rejoin the network at will, accepting the longest \\n' +\n", + " 'proof-of-work chain as proof of what happened while they were gone.\\n' +\n", + " '1.Introduction\\n' +\n", + " 'Commerce on the Internet has come to rely almost exclusively on financial institutions serving as \\n' +\n", + " 'trusted third parties to process electronic payments. While the system works well enough for \\n' +\n", + " 'most transactions, it still suffers from the inherent weaknesses of the trust based model. \\n' +\n", + " 'Completely non-reversible transactions are not really possible, since financial institutions cannot \\n' +\n", + " 'avoid mediating disputes. The cost of mediation increases transaction costs, limiting the \\n' +\n", + " 'minimum practical transaction size and cutting off the possibility for small casual transactions, \\n' +\n", + " 'and there is a broader cost in the loss of ability to make non-reversible payments for non-\\n' +\n", + " 'reversible services. With the possibility of reversal, the need for trust spreads. Merchants must \\n' +\n", + " 'be wary of their customers, hassling them for more information than they would otherwise need. \\n' +\n", + " 'A certain percentage of fraud is accepted as unavoidable. These costs and payment uncertainties \\n' +\n", + " 'can be avoided in person by using physical currency, but no mechanism exists to make payments \\n' +\n", + " 'over a communications channel without a trusted party.\\n' +\n", + " 'What is needed is an electronic payment system based on cryptographic proof instead of trust, \\n' +\n", + " 'allowing any two willing parties to transact directly with each other without the need for a trusted \\n' +\n", + " 'third party. Transactions that are computationally impractical to reverse would protect sellers \\n' +\n", + " 'from fraud, and routine escrow mechanisms could easily be implemented to protect buyers. In \\n' +\n", + " 'this paper, we propose a solution to the double-spending problem using a peer-to-peer distributed \\n' +\n", + " 'timestamp server to generate computational proof of the chronological order of transactions. The \\n' +\n", + " 'system is secure as long as honest nodes collectively control more CPU power than any \\n' +\n", + " 'cooperating group of attacker nodes.\\n' +\n", + " '1',\n", + " metadata: {\n", + " source: '/Users/bracesproul/code/lang-chain-ai/langchainjs/examples/src/document_loaders/example_data/bitcoin.pdf',\n", + " pdf: {\n", + " version: '1.10.100',\n", + " info: [Object],\n", + " metadata: null,\n", + " totalPages: 9\n", + " },\n", + " loc: { pageNumber: 1 }\n", + " },\n", + " id: undefined\n", + "}\n", + "Document {\n", + " pageContent: 'Bitcoin: A Peer-to-Peer Electronic Cash System\\n' +\n", + " 'Satoshi Nakamoto\\n' +\n", + " 'satoshin@gmx.com\\n' +\n", + " 'www.bitcoin.org\\n' +\n", + " 'Abstract. A purely peer-to-peer version of electronic cash would allow online \\n' +\n", + " 'payments to be sent directly from one party to another without going through a \\n' +\n", + " 'financial institution. Digital signatures provide part of the solution, but the main \\n' +\n", + " 'benefits are lost if a trusted third party is still required to prevent double-spending. \\n' +\n", + " 'We propose a solution to the double-spending problem using a peer-to-peer network. \\n' +\n", + " 'The network timestamps transactions by hashing them into an ongoing chain of \\n' +\n", + " 'hash-based proof-of-work, forming a record that cannot be changed without redoing \\n' +\n", + " 'the proof-of-work. The longest chain not only serves as proof of the sequence of \\n' +\n", + " 'events witnessed, but proof that it came from the largest pool of CPU power. As \\n' +\n", + " 'long as a majority of CPU power is controlled by nodes that are not cooperating to',\n", + " metadata: {\n", + " source: '/Users/bracesproul/code/lang-chain-ai/langchainjs/examples/src/document_loaders/example_data/bitcoin.pdf',\n", + " pdf: {\n", + " version: '1.10.100',\n", + " info: [Object],\n", + " metadata: null,\n", + " totalPages: 9\n", + " },\n", + " loc: { pageNumber: 1, lines: [Object] }\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "import { DirectoryLoader } from \"langchain/document_loaders/fs/directory\";\n", + "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", + "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", + "\n", + "const exampleDataPath = \"../../../../../../examples/src/document_loaders/example_data/\";\n", + "\n", + "/* Load all PDFs within the specified directory */\n", + "const directoryLoader = new DirectoryLoader(\n", + " exampleDataPath,\n", + " {\n", + " \".pdf\": (path: string) => new PDFLoader(path),\n", + " }\n", + ");\n", + "\n", + "const directoryDocs = await directoryLoader.load();\n", + "\n", + "console.log(directoryDocs[0]);\n", + "\n", + "/* Additional steps : Split text into chunks with any TextSplitter. You can then use it as context or save it to memory afterwards. */\n", + "const textSplitter = new RecursiveCharacterTextSplitter({\n", + " chunkSize: 1000,\n", + " chunkOverlap: 200,\n", + "});\n", + "\n", + "const splitDocs = await textSplitter.splitDocuments(directoryDocs);\n", + "console.log(splitDocs[0]);\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all PDFLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_pdf.PDFLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { DirectoryLoader } from \"langchain/document_loaders/fs/directory\";\n", - "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", - "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", - "\n", - "const exampleDataPath = \"../../../../../../examples/src/document_loaders/example_data/\";\n", - "\n", - "/* Load all PDFs within the specified directory */\n", - "const directoryLoader = new DirectoryLoader(\n", - " exampleDataPath,\n", - " {\n", - " \".pdf\": (path: string) => new PDFLoader(path),\n", - " }\n", - ");\n", - "\n", - "const directoryDocs = await directoryLoader.load();\n", - "\n", - "console.log(directoryDocs[0]);\n", - "\n", - "/* Additional steps : Split text into chunks with any TextSplitter. You can then use it as context or save it to memory afterwards. */\n", - "const textSplitter = new RecursiveCharacterTextSplitter({\n", - " chunkSize: 1000,\n", - " chunkOverlap: 200,\n", - "});\n", - "\n", - "const splitDocs = await textSplitter.splitDocuments(directoryDocs);\n", - "console.log(splitDocs[0]);\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all PDFLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_pdf.PDFLoader.html" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/document_loaders/file_loaders/text.ipynb b/docs/core_docs/docs/integrations/document_loaders/file_loaders/text.ipynb index aea4ff33148c..e7ce49894db0 100644 --- a/docs/core_docs/docs/integrations/document_loaders/file_loaders/text.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/file_loaders/text.ipynb @@ -1,164 +1,164 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": {}, - "source": [ - "---\n", - "sidebar_label: TextLoader\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# TextLoader\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Compatibility\n", - "\n", - "Only available on Node.js.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "This notebook provides a quick overview for getting started with `TextLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `TextLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Compatibility | Local | PY support | \n", - "| :--- | :--- | :---: | :---: | :---: |\n", - "| [TextLoader](https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html) | [langchain](https://api.js.langchain.com/modules/langchain.document_loaders_fs_text.html) | Node-only | ✅ | ❌ |\n", - "\n", - "## Setup\n", - "\n", - "To access `TextLoader` document loader you'll need to install the `langchain` package.\n", - "\n", - "### Installation\n", - "\n", - "The LangChain TextLoader integration lives in the `langchain` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " langchain\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and load documents:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "import { TextLoader } from \"langchain/document_loaders/fs/text\"\n", - "\n", - "const loader = new TextLoader(\"../../../../../../examples/src/document_loaders/example_data/example.txt\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: 'Foo\\nBar\\nBaz\\n\\n',\n", - " metadata: {\n", - " source: '../../../../../../examples/src/document_loaders/example_data/example.txt'\n", - " },\n", - " id: undefined\n", - "}\n" - ] - } - ], - "source": [ - "const docs = await loader.load()\n", - "docs[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: TextLoader\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# TextLoader\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "\n", + "Only available on Node.js.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This notebook provides a quick overview for getting started with `TextLoader` [document loaders](/docs/concepts/document_loaders). For detailed documentation of all `TextLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Compatibility | Local | PY support | \n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [TextLoader](https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html) | [langchain](https://api.js.langchain.com/modules/langchain.document_loaders_fs_text.html) | Node-only | ✅ | ❌ |\n", + "\n", + "## Setup\n", + "\n", + "To access `TextLoader` document loader you'll need to install the `langchain` package.\n", + "\n", + "### Installation\n", + "\n", + "The LangChain TextLoader integration lives in the `langchain` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " langchain\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " source: '../../../../../../examples/src/document_loaders/example_data/example.txt'\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import { TextLoader } from \"langchain/document_loaders/fs/text\"\n", + "\n", + "const loader = new TextLoader(\"../../../../../../examples/src/document_loaders/example_data/example.txt\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: 'Foo\\nBar\\nBaz\\n\\n',\n", + " metadata: {\n", + " source: '../../../../../../examples/src/document_loaders/example_data/example.txt'\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " source: '../../../../../../examples/src/document_loaders/example_data/example.txt'\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all TextLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "console.log(docs[0].metadata)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all TextLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain.document_loaders_fs_text.TextLoader.html" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/document_loaders/file_loaders/unstructured.ipynb b/docs/core_docs/docs/integrations/document_loaders/file_loaders/unstructured.ipynb index 37c47a4cb761..56621b4cfc3a 100644 --- a/docs/core_docs/docs/integrations/document_loaders/file_loaders/unstructured.ipynb +++ b/docs/core_docs/docs/integrations/document_loaders/file_loaders/unstructured.ipynb @@ -1,243 +1,243 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Unstructured\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# UnstructuredLoader\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Compatibility\n", - "\n", - "Only available on Node.js.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "This notebook provides a quick overview for getting started with `UnstructuredLoader` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `UnstructuredLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_unstructured.UnstructuredLoader.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Compatibility | Local | [PY support](https://python.langchain.com/docs/integrations/document_loaders/unstructured_file) | \n", - "| :--- | :--- | :---: | :---: | :---: |\n", - "| [UnstructuredLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_unstructured.UnstructuredLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_fs_unstructured.html) | Node-only | ✅ | ✅ |\n", - "\n", - "## Setup\n", - "\n", - "To access `UnstructuredLoader` document loader you'll need to install the `@langchain/community` integration package, and create an Unstructured account and get an API key.\n", - "\n", - "### Local\n", - "\n", - "You can run Unstructured locally in your computer using Docker. To do so, you need to have Docker installed. You can find the instructions to install Docker [here](https://docs.docker.com/get-docker/).\n", - "\n", - "```bash\n", - "docker run -p 8000:8000 -d --rm --name unstructured-api downloads.unstructured.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "Head to [unstructured.io](https://unstructured.io/api-key-hosted) to sign up to Unstructured and generate an API key. Once you've done this set the `UNSTRUCTURED_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export UNSTRUCTURED_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain UnstructuredLoader integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and load documents:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { UnstructuredLoader } from \"@langchain/community/document_loaders/fs/unstructured\"\n", - "\n", - "const loader = new UnstructuredLoader(\"../../../../../../examples/src/document_loaders/example_data/notion.md\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Document {\n", - " pageContent: '# Testing the notion markdownloader',\n", - " metadata: {\n", - " filename: 'notion.md',\n", - " languages: [ 'eng' ],\n", - " filetype: 'text/plain',\n", - " category: 'NarrativeText'\n", - " },\n", - " id: undefined\n", - "}\n" - ] - } - ], - "source": [ - "const docs = await loader.load()\n", - "docs[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Unstructured\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " filename: 'notion.md',\n", - " languages: [ 'eng' ],\n", - " filetype: 'text/plain',\n", - " category: 'NarrativeText'\n", - "}\n" - ] - } - ], - "source": [ - "console.log(docs[0].metadata)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Directories\n", - "\n", - "You can also load all of the files in the directory using [`UnstructuredDirectoryLoader`](https://api.js.langchain.com/classes/langchain.document_loaders_fs_unstructured.UnstructuredDirectoryLoader.html), which inherits from [`DirectoryLoader`](/docs/integrations/document_loaders/file_loaders/directory):\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# UnstructuredLoader\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "\n", + "Only available on Node.js.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This notebook provides a quick overview for getting started with `UnstructuredLoader` [document loaders](/docs/concepts/document_loaders). For detailed documentation of all `UnstructuredLoader` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_unstructured.UnstructuredLoader.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Compatibility | Local | [PY support](https://python.langchain.com/docs/integrations/document_loaders/unstructured_file) | \n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [UnstructuredLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_unstructured.UnstructuredLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_fs_unstructured.html) | Node-only | ✅ | ✅ |\n", + "\n", + "## Setup\n", + "\n", + "To access `UnstructuredLoader` document loader you'll need to install the `@langchain/community` integration package, and create an Unstructured account and get an API key.\n", + "\n", + "### Local\n", + "\n", + "You can run Unstructured locally in your computer using Docker. To do so, you need to have Docker installed. You can find the instructions to install Docker [here](https://docs.docker.com/get-docker/).\n", + "\n", + "```bash\n", + "docker run -p 8000:8000 -d --rm --name unstructured-api downloads.unstructured.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Head to [unstructured.io](https://unstructured.io/api-key-hosted) to sign up to Unstructured and generate an API key. Once you've done this set the `UNSTRUCTURED_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export UNSTRUCTURED_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain UnstructuredLoader integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "Unknown file type: Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt\n", - "Unknown file type: test.mp3\n" - ] + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { UnstructuredLoader } from \"@langchain/community/document_loaders/fs/unstructured\"\n", + "\n", + "const loader = new UnstructuredLoader(\"../../../../../../examples/src/document_loaders/example_data/notion.md\")" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "directoryDocs.length: 247\n", - "Document {\n", - " pageContent: 'Bitcoin: A Peer-to-Peer Electronic Cash System',\n", - " metadata: {\n", - " filetype: 'application/pdf',\n", - " languages: [ 'eng' ],\n", - " page_number: 1,\n", - " filename: 'bitcoin.pdf',\n", - " category: 'Title'\n", - " },\n", - " id: undefined\n", - "}\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: '# Testing the notion markdownloader',\n", + " metadata: {\n", + " filename: 'notion.md',\n", + " languages: [ 'eng' ],\n", + " filetype: 'text/plain',\n", + " category: 'NarrativeText'\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " filename: 'notion.md',\n", + " languages: [ 'eng' ],\n", + " filetype: 'text/plain',\n", + " category: 'NarrativeText'\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Directories\n", + "\n", + "You can also load all of the files in the directory using [`UnstructuredDirectoryLoader`](https://api.js.langchain.com/classes/langchain.document_loaders_fs_unstructured.UnstructuredDirectoryLoader.html), which inherits from [`DirectoryLoader`](/docs/integrations/document_loaders/file_loaders/directory):\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Unknown file type: Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt\n", + "Unknown file type: test.mp3\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "directoryDocs.length: 247\n", + "Document {\n", + " pageContent: 'Bitcoin: A Peer-to-Peer Electronic Cash System',\n", + " metadata: {\n", + " filetype: 'application/pdf',\n", + " languages: [ 'eng' ],\n", + " page_number: 1,\n", + " filename: 'bitcoin.pdf',\n", + " category: 'Title'\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "import { UnstructuredDirectoryLoader } from \"@langchain/community/document_loaders/fs/unstructured\";\n", + "\n", + "const directoryLoader = new UnstructuredDirectoryLoader(\n", + " \"../../../../../../examples/src/document_loaders/example_data/\",\n", + " {}\n", + ");\n", + "const directoryDocs = await directoryLoader.load();\n", + "console.log(\"directoryDocs.length: \", directoryDocs.length);\n", + "console.log(directoryDocs[0])\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all UnstructuredLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_unstructured.UnstructuredLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { UnstructuredDirectoryLoader } from \"@langchain/community/document_loaders/fs/unstructured\";\n", - "\n", - "const directoryLoader = new UnstructuredDirectoryLoader(\n", - " \"../../../../../../examples/src/document_loaders/example_data/\",\n", - " {}\n", - ");\n", - "const directoryDocs = await directoryLoader.load();\n", - "console.log(\"directoryDocs.length: \", directoryDocs.length);\n", - "console.log(directoryDocs[0])\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all UnstructuredLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_fs_unstructured.UnstructuredLoader.html" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/document_loaders/index.mdx b/docs/core_docs/docs/integrations/document_loaders/index.mdx index 44770ad5b1ad..0f8dc042422f 100644 --- a/docs/core_docs/docs/integrations/document_loaders/index.mdx +++ b/docs/core_docs/docs/integrations/document_loaders/index.mdx @@ -4,7 +4,7 @@ sidebar_position: 0 # Document loaders -[Document loaders](/docs/concepts#document-loaders) load data into LangChain's expected format for use-cases such as [retrieval-augmented generation (RAG)](/docs/tutorials/rag). +[Document loaders](/docs/concepts/document_loaders) load data into LangChain's expected format for use-cases such as [retrieval-augmented generation (RAG)](/docs/tutorials/rag). LangChain.js categorizes document loaders in two different ways: diff --git a/docs/core_docs/docs/integrations/llms/ai21.mdx b/docs/core_docs/docs/integrations/llms/ai21.mdx index 4a8cabaef115..4f614818b440 100644 --- a/docs/core_docs/docs/integrations/llms/ai21.mdx +++ b/docs/core_docs/docs/integrations/llms/ai21.mdx @@ -19,5 +19,5 @@ import AI21Example from "@examples/models/llm/ai21.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/aleph_alpha.mdx b/docs/core_docs/docs/integrations/llms/aleph_alpha.mdx index bd58660b3cb0..a5788b063760 100644 --- a/docs/core_docs/docs/integrations/llms/aleph_alpha.mdx +++ b/docs/core_docs/docs/integrations/llms/aleph_alpha.mdx @@ -19,5 +19,5 @@ import AlephAlphaExample from "@examples/models/llm/aleph_alpha.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/aws_sagemaker.mdx b/docs/core_docs/docs/integrations/llms/aws_sagemaker.mdx index 48abd663727c..12597cd3e763 100644 --- a/docs/core_docs/docs/integrations/llms/aws_sagemaker.mdx +++ b/docs/core_docs/docs/integrations/llms/aws_sagemaker.mdx @@ -27,5 +27,5 @@ import SageMakerEndpointExample from "@examples/models/llm/sagemaker_endpoint.ts ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/azure.ipynb b/docs/core_docs/docs/integrations/llms/azure.ipynb index b8b3a0e60c2b..3ebc20765204 100644 --- a/docs/core_docs/docs/integrations/llms/azure.ipynb +++ b/docs/core_docs/docs/integrations/llms/azure.ipynb @@ -1,344 +1,344 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Azure OpenAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# AzureOpenAI\n", - "\n", - "```{=mdx}\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of Azure OpenAI [text completion models](/docs/concepts/#llms). The latest and most popular Azure OpenAI models are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/azure/).\n", - ":::\n", - "\n", - ":::info\n", - "\n", - "Previously, LangChain.js supported integration with Azure OpenAI using the dedicated [Azure OpenAI SDK](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai). This SDK is now deprecated in favor of the new Azure integration in the OpenAI SDK, which allows to access the latest OpenAI models and features the same day they are released, and allows seemless transition between the OpenAI API and Azure OpenAI.\n", - "\n", - "If you are using Azure OpenAI with the deprecated SDK, see the [migration guide](#migration-from-azure-openai-sdk) to update to the new API.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "[Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/) is a Microsoft Azure service that provides powerful language models from OpenAI.\n", - "\n", - "This will help you get started with AzureOpenAI completion models (LLMs) using LangChain. For detailed documentation on `AzureOpenAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/azure_openai) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [AzureOpenAI](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access AzureOpenAI models you'll need to create an Azure account, get an API key, and install the `@langchain/openai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [azure.microsoft.com](https://azure.microsoft.com/) to sign up to AzureOpenAI and generate an API key. \n", - "\n", - "You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", - "\n", - "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance.\n", - "\n", - "If you're using Node.js, you can define the following environment variables to use the service:\n", - "\n", - "```bash\n", - "AZURE_OPENAI_API_INSTANCE_NAME=\n", - "AZURE_OPENAI_API_DEPLOYMENT_NAME=\n", - "AZURE_OPENAI_API_KEY=\n", - "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", - "```\n", - "\n", - "Alternatively, you can pass the values directly to the `AzureOpenAI` constructor.\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain AzureOpenAI integration lives in the `@langchain/openai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { AzureOpenAI } from \"@langchain/openai\"\n", - "\n", - "const llm = new AzureOpenAI({\n", - " model: \"gpt-3.5-turbo-instruct\",\n", - " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", - " azureOpenAIApiInstanceName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", - " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", - " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " timeout: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "provides AI solutions to businesses. They offer a range of services including natural language processing, computer vision, and machine learning. Their solutions are designed to help businesses automate processes, gain insights from data, and improve decision-making. AzureOpenAI also offers consulting services to help businesses identify and implement the best AI solutions for their specific needs. They work with a variety of industries, including healthcare, finance, and retail. With their expertise in AI and their partnership with Microsoft Azure, AzureOpenAI is a trusted provider of AI solutions for businesses looking to stay ahead in the rapidly evolving world of technology.\n" - ] - } - ], - "source": [ - "const inputText = \"AzureOpenAI is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Azure OpenAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# AzureOpenAI\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Azure OpenAI [text completion models](/docs/concepts/text_llms). The latest and most popular Azure OpenAI models are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/azure/).\n", + ":::\n", + "\n", + ":::info\n", + "\n", + "Previously, LangChain.js supported integration with Azure OpenAI using the dedicated [Azure OpenAI SDK](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai). This SDK is now deprecated in favor of the new Azure integration in the OpenAI SDK, which allows to access the latest OpenAI models and features the same day they are released, and allows seemless transition between the OpenAI API and Azure OpenAI.\n", + "\n", + "If you are using Azure OpenAI with the deprecated SDK, see the [migration guide](#migration-from-azure-openai-sdk) to update to the new API.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "[Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/) is a Microsoft Azure service that provides powerful language models from OpenAI.\n", + "\n", + "This will help you get started with AzureOpenAI completion models (LLMs) using LangChain. For detailed documentation on `AzureOpenAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/azure_openai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [AzureOpenAI](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access AzureOpenAI models you'll need to create an Azure account, get an API key, and install the `@langchain/openai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [azure.microsoft.com](https://azure.microsoft.com/) to sign up to AzureOpenAI and generate an API key. \n", + "\n", + "You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", + "\n", + "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance.\n", + "\n", + "If you're using Node.js, you can define the following environment variables to use the service:\n", + "\n", + "```bash\n", + "AZURE_OPENAI_API_INSTANCE_NAME=\n", + "AZURE_OPENAI_API_DEPLOYMENT_NAME=\n", + "AZURE_OPENAI_API_KEY=\n", + "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", + "```\n", + "\n", + "Alternatively, you can pass the values directly to the `AzureOpenAI` constructor.\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain AzureOpenAI integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureOpenAI } from \"@langchain/openai\"\n", + "\n", + "const llm = new AzureOpenAI({\n", + " model: \"gpt-3.5-turbo-instruct\",\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiInstanceName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", + " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "provides AI solutions to businesses. They offer a range of services including natural language processing, computer vision, and machine learning. Their solutions are designed to help businesses automate processes, gain insights from data, and improve decision-making. AzureOpenAI also offers consulting services to help businesses identify and implement the best AI solutions for their specific needs. They work with a variety of industries, including healthcare, finance, and retail. With their expertise in AI and their partnership with Microsoft Azure, AzureOpenAI is a trusted provider of AI solutions for businesses looking to stay ahead in the rapidly evolving world of technology.\n" + ] + } + ], + "source": [ + "const inputText = \"AzureOpenAI is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Ich liebe Programmieren.\n" - ] + "cell_type": "code", + "execution_count": 9, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Ich liebe Programmieren.\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = new PromptTemplate({\n", + " template: \"How to say {input} in {output_language}:\\n\",\n", + " inputVariables: [\"input\", \"output_language\"],\n", + "})\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e99eef30", + "metadata": {}, + "source": [ + "## Using Azure Managed Identity\n", + "\n", + "If you're using Azure Managed Identity, you can configure the credentials like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c21d1eb8", + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " DefaultAzureCredential,\n", + " getBearerTokenProvider,\n", + "} from \"@azure/identity\";\n", + "import { AzureOpenAI } from \"@langchain/openai\";\n", + "\n", + "const credentials = new DefaultAzureCredential();\n", + "const azureADTokenProvider = getBearerTokenProvider(\n", + " credentials,\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ");\n", + "\n", + "const managedIdentityLLM = new AzureOpenAI({\n", + " azureADTokenProvider,\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "94c2572b", + "metadata": {}, + "source": [ + "## Using a different domain\n", + "\n", + "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", + "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bbf107a2", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureOpenAI } from \"@langchain/openai\";\n", + "\n", + "const differentDomainLLM = new AzureOpenAI({\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " azureOpenAIBasePath:\n", + " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "afcff984", + "metadata": {}, + "source": [ + "## Migration from Azure OpenAI SDK\n", + "\n", + "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", + "\n", + "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", + " ```bash\n", + " npm install @langchain/openai\n", + " npm uninstall @langchain/azure-openai\n", + " ```\n", + "2. Update your imports to use the new `AzureOpenAI` and `AzureChatOpenAI` classes from the `@langchain/openai` package:\n", + " ```typescript\n", + " import { AzureOpenAI } from \"@langchain/openai\";\n", + " ```\n", + "3. Update your code to use the new `AzureOpenAI` and `AzureChatOpenAI` classes and pass the required parameters:\n", + "\n", + " ```typescript\n", + " const model = new AzureOpenAI({\n", + " azureOpenAIApiKey: \"\",\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + " });\n", + " ```\n", + "\n", + " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", + "\n", + " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", + "\n", + " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all AzureOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = new PromptTemplate({\n", - " template: \"How to say {input} in {output_language}:\\n\",\n", - " inputVariables: [\"input\", \"output_language\"],\n", - "})\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e99eef30", - "metadata": {}, - "source": [ - "## Using Azure Managed Identity\n", - "\n", - "If you're using Azure Managed Identity, you can configure the credentials like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c21d1eb8", - "metadata": {}, - "outputs": [], - "source": [ - "import {\n", - " DefaultAzureCredential,\n", - " getBearerTokenProvider,\n", - "} from \"@azure/identity\";\n", - "import { AzureOpenAI } from \"@langchain/openai\";\n", - "\n", - "const credentials = new DefaultAzureCredential();\n", - "const azureADTokenProvider = getBearerTokenProvider(\n", - " credentials,\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ");\n", - "\n", - "const managedIdentityLLM = new AzureOpenAI({\n", - " azureADTokenProvider,\n", - " azureOpenAIApiInstanceName: \"\",\n", - " azureOpenAIApiDeploymentName: \"\",\n", - " azureOpenAIApiVersion: \"\",\n", - "});\n" - ] - }, - { - "cell_type": "markdown", - "id": "94c2572b", - "metadata": {}, - "source": [ - "## Using a different domain\n", - "\n", - "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", - "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bbf107a2", - "metadata": {}, - "outputs": [], - "source": [ - "import { AzureOpenAI } from \"@langchain/openai\";\n", - "\n", - "const differentDomainLLM = new AzureOpenAI({\n", - " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", - " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", - " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", - " azureOpenAIBasePath:\n", - " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", - "});\n" - ] - }, - { - "cell_type": "markdown", - "id": "afcff984", - "metadata": {}, - "source": [ - "## Migration from Azure OpenAI SDK\n", - "\n", - "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", - "\n", - "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", - " ```bash\n", - " npm install @langchain/openai\n", - " npm uninstall @langchain/azure-openai\n", - " ```\n", - "2. Update your imports to use the new `AzureOpenAI` and `AzureChatOpenAI` classes from the `@langchain/openai` package:\n", - " ```typescript\n", - " import { AzureOpenAI } from \"@langchain/openai\";\n", - " ```\n", - "3. Update your code to use the new `AzureOpenAI` and `AzureChatOpenAI` classes and pass the required parameters:\n", - "\n", - " ```typescript\n", - " const model = new AzureOpenAI({\n", - " azureOpenAIApiKey: \"\",\n", - " azureOpenAIApiInstanceName: \"\",\n", - " azureOpenAIApiDeploymentName: \"\",\n", - " azureOpenAIApiVersion: \"\",\n", - " });\n", - " ```\n", - "\n", - " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", - "\n", - " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", - "\n", - " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all AzureOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" - }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/bedrock.ipynb b/docs/core_docs/docs/integrations/llms/bedrock.ipynb index 604f1267b95e..2cfe7a7aaf7a 100644 --- a/docs/core_docs/docs/integrations/llms/bedrock.ipynb +++ b/docs/core_docs/docs/integrations/llms/bedrock.ipynb @@ -1,280 +1,280 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": {}, - "source": [ - "---\n", - "sidebar_label: Bedrock\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# Bedrock\n", - "\n", - "```{=mdx}\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of Amazon Bedrock models as [text completion models](/docs/concepts/#llms). Many popular models available on Bedrock are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "You may be looking for [this page instead](/docs/integrations/chat/bedrock/).\n", - ":::\n", - "\n", - "```\n", - "\n", - "> [Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that makes Foundation Models (FMs)\n", - "> from leading AI startups and Amazon available via an API. You can choose from a wide range of FMs to find the model that is best suited for your use case.\n", - "\n", - "This will help you get started with Bedrock completion models (LLMs) using LangChain. For detailed documentation on `Bedrock` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/bedrock) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [Bedrock](https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_bedrock.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Bedrock models you'll need to create an AWS account, get an API key, and install the `@langchain/community` integration, along with a few peer dependencies.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [aws.amazon.com](https://aws.amazon.com) to sign up to AWS Bedrock and generate an API key. Once you've done this set the environment variables:\n", - "\n", - "```bash\n", - "export BEDROCK_AWS_REGION=\"your-region-url\"\n", - "export BEDROCK_AWS_ACCESS_KEY_ID=\"your-access-key-id\"\n", - "export BEDROCK_AWS_SECRET_ACCESS_KEY=\"your-secret-access-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain Bedrock integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "\n", - "And install the peer dependencies:\n", - "\n", - "\n", - " @aws-crypto/sha256-js @aws-sdk/credential-provider-node @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", - "\n", - "\n", - "You can also use Bedrock in web environments such as Edge functions or Cloudflare Workers by omitting the `@aws-sdk/credential-provider-node` dependency\n", - "and using the `web` entrypoint:\n", - "\n", - "\n", - " @aws-crypto/sha256-js @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "093ae37f", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "// Deno requires these imports, and way of loading env vars.\n", - "// we don't want to expose in the docs.\n", - "// Below this cell we have a typescript markdown codeblock with\n", - "// the node code.\n", - "import \"@aws-sdk/credential-provider-node\";\n", - "import \"@smithy/protocol-http\";\n", - "import \"@aws-crypto/sha256-js\";\n", - "import \"@smithy/protocol-http\";\n", - "import \"@smithy/signature-v4\";\n", - "import \"@smithy/eventstream-codec\";\n", - "import \"@smithy/util-utf8\";\n", - "import \"@aws-sdk/types\";\n", - "import { Bedrock } from \"@langchain/community/llms/bedrock\"\n", - "import { getEnvironmentVariable } from \"@langchain/core/utils/env\";\n", - "\n", - "const llm = new Bedrock({\n", - " model: \"anthropic.claude-v2\",\n", - " region: \"us-east-1\",\n", - " // endpointUrl: \"custom.amazonaws.com\",\n", - " credentials: {\n", - " accessKeyId: getEnvironmentVariable(\"BEDROCK_AWS_ACCESS_KEY_ID\"),\n", - " secretAccessKey: getEnvironmentVariable(\"BEDROCK_AWS_SECRET_ACCESS_KEY\"),\n", - " },\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "a0562a13", - "metadata": {}, - "source": [ - "```typescript\n", - "import { Bedrock } from \"@langchain/community/llms/bedrock\"\n", - "\n", - "const llm = new Bedrock({\n", - " model: \"anthropic.claude-v2\",\n", - " region: process.env.BEDROCK_AWS_REGION ?? \"us-east-1\",\n", - " // endpointUrl: \"custom.amazonaws.com\",\n", - " credentials: {\n", - " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID,\n", - " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY,\n", - " },\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation\n", - "\n", - "Note that some models require specific prompting techniques. For example, Anthropic's Claude-v2 model will throw an error if\n", - "the prompt does not start with `Human: `." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "\u001b[32m\" Here are a few key points about Bedrock AI:\\n\"\u001b[39m +\n", - " \u001b[32m\"\\n\"\u001b[39m +\n", - " \u001b[32m\"- Bedrock was founded in 2021 and is based in San Fran\"\u001b[39m... 116 more characters" + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Bedrock\n", + "---" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const inputText = \"Human: Bedrock is an AI company that\\nAssistant: \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# Bedrock\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Amazon Bedrock models as [text completion models](/docs/concepts/text_llms). Many popular models available on Bedrock are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/bedrock/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "> [Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that makes Foundation Models (FMs)\n", + "> from leading AI startups and Amazon available via an API. You can choose from a wide range of FMs to find the model that is best suited for your use case.\n", + "\n", + "This will help you get started with Bedrock completion models (LLMs) using LangChain. For detailed documentation on `Bedrock` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/bedrock) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [Bedrock](https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_bedrock.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Bedrock models you'll need to create an AWS account, get an API key, and install the `@langchain/community` integration, along with a few peer dependencies.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [aws.amazon.com](https://aws.amazon.com) to sign up to AWS Bedrock and generate an API key. Once you've done this set the environment variables:\n", + "\n", + "```bash\n", + "export BEDROCK_AWS_REGION=\"your-region-url\"\n", + "export BEDROCK_AWS_ACCESS_KEY_ID=\"your-access-key-id\"\n", + "export BEDROCK_AWS_SECRET_ACCESS_KEY=\"your-secret-access-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Bedrock integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "\n", + "And install the peer dependencies:\n", + "\n", + "\n", + " @aws-crypto/sha256-js @aws-sdk/credential-provider-node @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", + "\n", + "\n", + "You can also use Bedrock in web environments such as Edge functions or Cloudflare Workers by omitting the `@aws-sdk/credential-provider-node` dependency\n", + "and using the `web` entrypoint:\n", + "\n", + "\n", + " @aws-crypto/sha256-js @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "093ae37f", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "// Deno requires these imports, and way of loading env vars.\n", + "// we don't want to expose in the docs.\n", + "// Below this cell we have a typescript markdown codeblock with\n", + "// the node code.\n", + "import \"@aws-sdk/credential-provider-node\";\n", + "import \"@smithy/protocol-http\";\n", + "import \"@aws-crypto/sha256-js\";\n", + "import \"@smithy/protocol-http\";\n", + "import \"@smithy/signature-v4\";\n", + "import \"@smithy/eventstream-codec\";\n", + "import \"@smithy/util-utf8\";\n", + "import \"@aws-sdk/types\";\n", + "import { Bedrock } from \"@langchain/community/llms/bedrock\"\n", + "import { getEnvironmentVariable } from \"@langchain/core/utils/env\";\n", + "\n", + "const llm = new Bedrock({\n", + " model: \"anthropic.claude-v2\",\n", + " region: \"us-east-1\",\n", + " // endpointUrl: \"custom.amazonaws.com\",\n", + " credentials: {\n", + " accessKeyId: getEnvironmentVariable(\"BEDROCK_AWS_ACCESS_KEY_ID\"),\n", + " secretAccessKey: getEnvironmentVariable(\"BEDROCK_AWS_SECRET_ACCESS_KEY\"),\n", + " },\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "a0562a13", + "metadata": {}, + "source": [ + "```typescript\n", + "import { Bedrock } from \"@langchain/community/llms/bedrock\"\n", + "\n", + "const llm = new Bedrock({\n", + " model: \"anthropic.claude-v2\",\n", + " region: process.env.BEDROCK_AWS_REGION ?? \"us-east-1\",\n", + " // endpointUrl: \"custom.amazonaws.com\",\n", + " credentials: {\n", + " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID,\n", + " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY,\n", + " },\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "Note that some models require specific prompting techniques. For example, Anthropic's Claude-v2 model will throw an error if\n", + "the prompt does not start with `Human: `." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\" Here are a few key points about Bedrock AI:\\n\"\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"- Bedrock was founded in 2021 and is based in San Fran\"\u001b[39m... 116 more characters" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const inputText = \"Human: Bedrock is an AI company that\\nAssistant: \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, { - "data": { - "text/plain": [ - "\u001b[32m' Here is how to say \"I love programming\" in German:\\n'\u001b[39m +\n", - " \u001b[32m\"\\n\"\u001b[39m +\n", - " \u001b[32m\"Ich liebe das Programmieren.\"\u001b[39m" + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m' Here is how to say \"I love programming\" in German:\\n'\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"Ich liebe das Programmieren.\"\u001b[39m" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"Human: How to say {input} in {output_language}:\\nAssistant:\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all Bedrock features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html" ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"Human: How to say {input} in {output_language}:\\nAssistant:\")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all Bedrock features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" - }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } + } }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/chrome_ai.mdx b/docs/core_docs/docs/integrations/llms/chrome_ai.mdx index 9f52e87c4f68..6771e35cf546 100644 --- a/docs/core_docs/docs/integrations/llms/chrome_ai.mdx +++ b/docs/core_docs/docs/integrations/llms/chrome_ai.mdx @@ -116,5 +116,5 @@ for await (const chunk of await model.stream("How are you?")) { ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/cloudflare_workersai.ipynb b/docs/core_docs/docs/integrations/llms/cloudflare_workersai.ipynb index b4d13ec0fdeb..fe0b3d46ce1b 100644 --- a/docs/core_docs/docs/integrations/llms/cloudflare_workersai.ipynb +++ b/docs/core_docs/docs/integrations/llms/cloudflare_workersai.ipynb @@ -1,211 +1,211 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Cloudflare Workers AI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# CloudflareWorkersAI\n", - "\n", - "This will help you get started with Cloudflare Workers AI [text completion models (LLMs)](/docs/concepts#llms) using LangChain. For detailed documentation on `CloudflareWorkersAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | PY support | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`CloudflareWorkersAI`](https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAI.html) | [`@langchain/cloudflare`](https://npmjs.com/@langchain/cloudflare) | ❌ | ✅ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cloudflare?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cloudflare?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Cloudflare Workers AI models you'll need to create a Cloudflare account, get an API key, and install the `@langchain/cloudflare` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head [to this page](https://developers.cloudflare.com/workers-ai/) to sign up to Cloudflare and generate an API key. Once you've done this, note your `CLOUDFLARE_ACCOUNT_ID` and `CLOUDFLARE_API_TOKEN`.\n", - "\n", - "### Installation\n", - "\n", - "The LangChain Cloudflare integration lives in the `@langchain/cloudflare` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/cloudflare @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "cab7c2aa", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "// @ts-expect-error Deno is not recognized\n", - "const CLOUDFLARE_ACCOUNT_ID = Deno.env.get(\"CLOUDFLARE_ACCOUNT_ID\");\n", - "// @ts-expect-error Deno is not recognized\n", - "const CLOUDFLARE_API_TOKEN = Deno.env.get(\"CLOUDFLARE_API_TOKEN\");" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { CloudflareWorkersAI } from \"@langchain/cloudflare\";\n", - "\n", - "const llm = new CloudflareWorkersAI({\n", - " model: \"@cf/meta/llama-3.1-8b-instruct\", // Default value\n", - " cloudflareAccountId: CLOUDFLARE_ACCOUNT_ID,\n", - " cloudflareApiToken: CLOUDFLARE_API_TOKEN,\n", - " // Pass a custom base URL to use Cloudflare AI Gateway\n", - " // baseUrl: `https://gateway.ai.cloudflare.com/v1/{YOUR_ACCOUNT_ID}/{GATEWAY_NAME}/workers-ai/`,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "\u001b[32m\"Cloudflare is not an AI company, but rather a content delivery network (CDN) and security company. T\"\u001b[39m... 876 more characters" + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Cloudflare Workers AI\n", + "---" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const inputText = \"Cloudflare is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText);\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# CloudflareWorkersAI\n", + "\n", + "This will help you get started with Cloudflare Workers AI [text completion models (LLMs)](/docs/concepts/text_llms) using LangChain. For detailed documentation on `CloudflareWorkersAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | PY support | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`CloudflareWorkersAI`](https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAI.html) | [`@langchain/cloudflare`](https://npmjs.com/@langchain/cloudflare) | ❌ | ✅ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cloudflare?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cloudflare?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Cloudflare Workers AI models you'll need to create a Cloudflare account, get an API key, and install the `@langchain/cloudflare` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head [to this page](https://developers.cloudflare.com/workers-ai/) to sign up to Cloudflare and generate an API key. Once you've done this, note your `CLOUDFLARE_ACCOUNT_ID` and `CLOUDFLARE_API_TOKEN`.\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Cloudflare integration lives in the `@langchain/cloudflare` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/cloudflare @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "data": { - "text/plain": [ - "\u001b[32m\"That's a simple but sweet statement! \\n\"\u001b[39m +\n", - " \u001b[32m\"\\n\"\u001b[39m +\n", - " \u001b[32m'To say \"I love programming\" in German, you can say: \"ICH LIEB'\u001b[39m... 366 more characters" + "cell_type": "code", + "execution_count": 1, + "id": "cab7c2aa", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "// @ts-expect-error Deno is not recognized\n", + "const CLOUDFLARE_ACCOUNT_ID = Deno.env.get(\"CLOUDFLARE_ACCOUNT_ID\");\n", + "// @ts-expect-error Deno is not recognized\n", + "const CLOUDFLARE_API_TOKEN = Deno.env.get(\"CLOUDFLARE_API_TOKEN\");" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { CloudflareWorkersAI } from \"@langchain/cloudflare\";\n", + "\n", + "const llm = new CloudflareWorkersAI({\n", + " model: \"@cf/meta/llama-3.1-8b-instruct\", // Default value\n", + " cloudflareAccountId: CLOUDFLARE_ACCOUNT_ID,\n", + " cloudflareApiToken: CLOUDFLARE_API_TOKEN,\n", + " // Pass a custom base URL to use Cloudflare AI Gateway\n", + " // baseUrl: `https://gateway.ai.cloudflare.com/v1/{YOUR_ACCOUNT_ID}/{GATEWAY_NAME}/workers-ai/`,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Cloudflare is not an AI company, but rather a content delivery network (CDN) and security company. T\"\u001b[39m... 876 more characters" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const inputText = \"Cloudflare is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText);\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"That's a simple but sweet statement! \\n\"\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m'To say \"I love programming\" in German, you can say: \"ICH LIEB'\u001b[39m... 366 more characters" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `CloudflareWorkersAI` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAI.html" ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `CloudflareWorkersAI` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" - }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } + } }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/cohere.ipynb b/docs/core_docs/docs/integrations/llms/cohere.ipynb index 7e4f20391841..92def6e09adc 100644 --- a/docs/core_docs/docs/integrations/llms/cohere.ipynb +++ b/docs/core_docs/docs/integrations/llms/cohere.ipynb @@ -1,264 +1,264 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Cohere\n", - "lc_docs_skip_validation: true\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# Cohere\n", - "\n", - "```{=mdx}\n", - "\n", - ":::warning Legacy\n", - "\n", - "Cohere has marked their `generate` endpoint for LLMs as deprecated. Follow their [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using their Chat API via the [`ChatCohere`](/docs/integrations/chat/cohere) integration.\n", - "\n", - ":::\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of Cohere models as [text completion models](/docs/concepts/#llms). Many popular models available on Cohere are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "You may be looking for [this page instead](/docs/integrations/chat/cohere/).\n", - ":::\n", - "\n", - "```\n", - "\n", - "This will help you get started with Cohere completion models (LLMs) using LangChain. For detailed documentation on `Cohere` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.Cohere.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/cohere) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [Cohere](https://api.js.langchain.com/classes/langchain_cohere.Cohere.html) | [@langchain/cohere](https://api.js.langchain.com/modules/langchain_cohere.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Cohere models you'll need to create a Cohere account, get an API key, and install the `@langchain/cohere` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [cohere.com](https://cohere.com) to sign up to Cohere and generate an API key. Once you've done this set the `COHERE_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export COHERE_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain Cohere integration lives in the `@langchain/cohere` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/cohere @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { Cohere } from \"@langchain/cohere\"\n", - "\n", - "const llm = new Cohere({\n", - " model: \"command\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "2518004d", - "metadata": {}, - "source": [ - "### Custom client for Cohere on Azure, Cohere on AWS Bedrock, and Standalone Cohere Instance.\n", - "\n", - "We can instantiate a custom `CohereClient` and pass it to the ChatCohere constructor.\n", - "\n", - "**Note:** If a custom client is provided both `COHERE_API_KEY` environment variable and `apiKey` parameter in the constructor will be ignored." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "79da9b26", - "metadata": {}, - "outputs": [], - "source": [ - "import { Cohere } from \"@langchain/cohere\";\n", - "import { CohereClient } from \"cohere-ai\";\n", - "\n", - "const client = new CohereClient({\n", - " token: \"\",\n", - " environment: \"\", //optional\n", - " // other params\n", - "});\n", - "\n", - "const llmWithCustomClient = new Cohere({\n", - " client,\n", - " // other params...\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Cohere is a company that provides natural language processing models that help companies improve human-machine interactions. Cohere was founded in 2019 by Aidan Gomez, Ivan Zhang, and Nick Frosst. \n" - ] - } - ], - "source": [ - "const inputText = \"Cohere is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Cohere\n", + "lc_docs_skip_validation: true\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# Cohere\n", + "\n", + "```{=mdx}\n", + "\n", + ":::warning Legacy\n", + "\n", + "Cohere has marked their `generate` endpoint for LLMs as deprecated. Follow their [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using their Chat API via the [`ChatCohere`](/docs/integrations/chat/cohere) integration.\n", + "\n", + ":::\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Cohere models as [text completion models](/docs/concepts/text_llms). Many popular models available on Cohere are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/cohere/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "This will help you get started with Cohere completion models (LLMs) using LangChain. For detailed documentation on `Cohere` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.Cohere.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/cohere) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [Cohere](https://api.js.langchain.com/classes/langchain_cohere.Cohere.html) | [@langchain/cohere](https://api.js.langchain.com/modules/langchain_cohere.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Cohere models you'll need to create a Cohere account, get an API key, and install the `@langchain/cohere` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [cohere.com](https://cohere.com) to sign up to Cohere and generate an API key. Once you've done this set the `COHERE_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export COHERE_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Cohere integration lives in the `@langchain/cohere` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/cohere @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " Ich liebe Programming.\n", - "\n", - "But for day to day purposes Ich mag Programming. would be enough and perfectly understood.\n", - "\n", - "I love programming is \"Ich liebe Programming\" and I like programming is \"Ich mag Programming\" respectively.\n", - "\n", - "There are also other ways to express this feeling, such as \"Ich habe Spaß mit Programming\", which means \"I enjoy programming\". But \"Ich mag\" and \"Ich liebe\" are the most common expressions for this.\n", - "\n", - "Let me know if I can be of further help with something else! \n" - ] + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { Cohere } from \"@langchain/cohere\"\n", + "\n", + "const llm = new Cohere({\n", + " model: \"command\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2518004d", + "metadata": {}, + "source": [ + "### Custom client for Cohere on Azure, Cohere on AWS Bedrock, and Standalone Cohere Instance.\n", + "\n", + "We can instantiate a custom `CohereClient` and pass it to the ChatCohere constructor.\n", + "\n", + "**Note:** If a custom client is provided both `COHERE_API_KEY` environment variable and `apiKey` parameter in the constructor will be ignored." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79da9b26", + "metadata": {}, + "outputs": [], + "source": [ + "import { Cohere } from \"@langchain/cohere\";\n", + "import { CohereClient } from \"cohere-ai\";\n", + "\n", + "const client = new CohereClient({\n", + " token: \"\",\n", + " environment: \"\", //optional\n", + " // other params\n", + "});\n", + "\n", + "const llmWithCustomClient = new Cohere({\n", + " client,\n", + " // other params...\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cohere is a company that provides natural language processing models that help companies improve human-machine interactions. Cohere was founded in 2019 by Aidan Gomez, Ivan Zhang, and Nick Frosst. \n" + ] + } + ], + "source": [ + "const inputText = \"Cohere is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Ich liebe Programming.\n", + "\n", + "But for day to day purposes Ich mag Programming. would be enough and perfectly understood.\n", + "\n", + "I love programming is \"Ich liebe Programming\" and I like programming is \"Ich mag Programming\" respectively.\n", + "\n", + "There are also other ways to express this feeling, such as \"Ich habe Spaß mit Programming\", which means \"I enjoy programming\". But \"Ich mag\" and \"Ich liebe\" are the most common expressions for this.\n", + "\n", + "Let me know if I can be of further help with something else! \n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = new PromptTemplate({\n", + " template: \"How to say {input} in {output_language}:\\n\",\n", + " inputVariables: [\"input\", \"output_language\"],\n", + "})\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all Cohere features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cohere.Cohere.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = new PromptTemplate({\n", - " template: \"How to say {input} in {output_language}:\\n\",\n", - " inputVariables: [\"input\", \"output_language\"],\n", - "})\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all Cohere features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cohere.Cohere.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" - }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/deep_infra.mdx b/docs/core_docs/docs/integrations/llms/deep_infra.mdx index 58e4313e8f76..52660a931e7c 100644 --- a/docs/core_docs/docs/integrations/llms/deep_infra.mdx +++ b/docs/core_docs/docs/integrations/llms/deep_infra.mdx @@ -26,5 +26,5 @@ import Example from "@examples/models/llm/deepinfra.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/fireworks.ipynb b/docs/core_docs/docs/integrations/llms/fireworks.ipynb index c16fa6c019c7..ef23f86b3989 100644 --- a/docs/core_docs/docs/integrations/llms/fireworks.ipynb +++ b/docs/core_docs/docs/integrations/llms/fireworks.ipynb @@ -1,280 +1,280 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": {}, - "source": [ - "---\n", - "sidebar_label: Fireworks\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# Fireworks\n", - "\n", - "\n", - "```{=mdx}\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of Fireworks models as [text completion models](/docs/concepts/#llms). Many popular models available on Fireworks are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "You may be looking for [this page instead](/docs/integrations/chat/fireworks/).\n", - ":::\n", - "\n", - "```\n", - "\n", - "[Fireworks AI](https://fireworks.ai/) is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n", - "\n", - "This will help you get started with Fireworks completion models (LLMs) using LangChain. For detailed documentation on `Fireworks` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/fireworks) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [Fireworks](https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_fireworks.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Fireworks models you'll need to create a Fireworks account, get an API key, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [fireworks.ai](https://fireworks.ai/) to sign up to Fireworks and generate an API key. Once you've done this set the `FIREWORKS_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export FIREWORKS_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain Fireworks integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { Fireworks } from \"@langchain/community/llms/fireworks\"\n", - "\n", - "const llm = new Fireworks({\n", - " model: \"accounts/fireworks/models/llama-v3-70b-instruct\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " timeout: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - " helps businesses automate their customer support using AI-powered chatbots. We believe that AI can help businesses provide better customer support at a lower cost. Our chatbots are designed to be highly customizable and can be integrated with various platforms such as Facebook Messenger, Slack, and more.\n", - "\n", - "We are looking for a talented and motivated **Machine Learning Engineer** to join our team. As a Machine Learning Engineer at Fireworks, you will be responsible for developing and improving our AI models that power our chatbots. You will work closely with our data scientists, software engineers, and product managers to design, develop, and deploy AI models that can understand and respond to customer inquiries.\n", - "\n", - "**Responsibilities:**\n", - "\n", - "* Develop and improve AI models that can understand and respond to customer inquiries\n", - "* Work with data scientists to design and develop new AI models\n", - "* Collaborate with software engineers to integrate AI models with our chatbot platform\n", - "* Work with product managers to understand customer requirements and develop AI models that meet those requirements\n", - "* Develop and maintain data pipelines to support AI model development and deployment\n", - "* Develop and maintain tools to monitor and evaluate AI model performance\n", - "* Stay up-to-date with the latest developments in AI and machine learning and apply this knowledge to improve our AI models\n", - "\n", - "**Requirements:**\n", - "\n", - "* Bachelor's\n" - ] - } - ], - "source": [ - "const inputText = \"Fireworks is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Fireworks\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# Fireworks\n", + "\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Fireworks models as [text completion models](/docs/concepts/text_llms). Many popular models available on Fireworks are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/fireworks/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "[Fireworks AI](https://fireworks.ai/) is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n", + "\n", + "This will help you get started with Fireworks completion models (LLMs) using LangChain. For detailed documentation on `Fireworks` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/fireworks) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [Fireworks](https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_fireworks.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Fireworks models you'll need to create a Fireworks account, get an API key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [fireworks.ai](https://fireworks.ai/) to sign up to Fireworks and generate an API key. Once you've done this set the `FIREWORKS_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export FIREWORKS_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Fireworks integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Ich liebe Programmieren.\n", - "\n", - "How to say I love coding. in German:\n", - "Ich liebe Coden.\n", - "\n", - "How to say I love to code. in German:\n", - "Ich liebe es zu coden.\n", - "\n", - "How to say I'm a programmer. in German:\n", - "Ich bin ein Programmierer.\n", - "\n", - "How to say I'm a coder. in German:\n", - "Ich bin ein Coder.\n", - "\n", - "How to say I'm a developer. in German:\n", - "Ich bin ein Entwickler.\n", - "\n", - "How to say I'm a software engineer. in German:\n", - "Ich bin ein Software-Ingenieur.\n", - "\n", - "How to say I'm a tech enthusiast. in German:\n", - "Ich bin ein Technik-Enthusiast.\n", - "\n", - "How to say I'm passionate about technology. in German:\n", - "Ich bin leidenschaftlich für Technologie.\n", - "\n", - "How to say I'm passionate about coding. in German:\n", - "Ich bin leidenschaftlich für Coden.\n", - "\n", - "How to say I'm passionate about programming. in German:\n", - "Ich bin leidenschaftlich für Programmieren.\n", - "\n", - "How to say I enjoy coding. in German:\n", - "Ich genieße Coden.\n", - "\n", - "How to say I enjoy programming. in German:\n", - "Ich genieße Programmieren.\n", - "\n", - "How to say I'm good at coding. in German:\n", - "Ich bin gut im Coden.\n", - "\n", - "How to say I'm\n" - ] + "cell_type": "code", + "execution_count": 1, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { Fireworks } from \"@langchain/community/llms/fireworks\"\n", + "\n", + "const llm = new Fireworks({\n", + " model: \"accounts/fireworks/models/llama-v3-70b-instruct\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " helps businesses automate their customer support using AI-powered chatbots. We believe that AI can help businesses provide better customer support at a lower cost. Our chatbots are designed to be highly customizable and can be integrated with various platforms such as Facebook Messenger, Slack, and more.\n", + "\n", + "We are looking for a talented and motivated **Machine Learning Engineer** to join our team. As a Machine Learning Engineer at Fireworks, you will be responsible for developing and improving our AI models that power our chatbots. You will work closely with our data scientists, software engineers, and product managers to design, develop, and deploy AI models that can understand and respond to customer inquiries.\n", + "\n", + "**Responsibilities:**\n", + "\n", + "* Develop and improve AI models that can understand and respond to customer inquiries\n", + "* Work with data scientists to design and develop new AI models\n", + "* Collaborate with software engineers to integrate AI models with our chatbot platform\n", + "* Work with product managers to understand customer requirements and develop AI models that meet those requirements\n", + "* Develop and maintain data pipelines to support AI model development and deployment\n", + "* Develop and maintain tools to monitor and evaluate AI model performance\n", + "* Stay up-to-date with the latest developments in AI and machine learning and apply this knowledge to improve our AI models\n", + "\n", + "**Requirements:**\n", + "\n", + "* Bachelor's\n" + ] + } + ], + "source": [ + "const inputText = \"Fireworks is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ich liebe Programmieren.\n", + "\n", + "How to say I love coding. in German:\n", + "Ich liebe Coden.\n", + "\n", + "How to say I love to code. in German:\n", + "Ich liebe es zu coden.\n", + "\n", + "How to say I'm a programmer. in German:\n", + "Ich bin ein Programmierer.\n", + "\n", + "How to say I'm a coder. in German:\n", + "Ich bin ein Coder.\n", + "\n", + "How to say I'm a developer. in German:\n", + "Ich bin ein Entwickler.\n", + "\n", + "How to say I'm a software engineer. in German:\n", + "Ich bin ein Software-Ingenieur.\n", + "\n", + "How to say I'm a tech enthusiast. in German:\n", + "Ich bin ein Technik-Enthusiast.\n", + "\n", + "How to say I'm passionate about technology. in German:\n", + "Ich bin leidenschaftlich für Technologie.\n", + "\n", + "How to say I'm passionate about coding. in German:\n", + "Ich bin leidenschaftlich für Coden.\n", + "\n", + "How to say I'm passionate about programming. in German:\n", + "Ich bin leidenschaftlich für Programmieren.\n", + "\n", + "How to say I enjoy coding. in German:\n", + "Ich genieße Coden.\n", + "\n", + "How to say I enjoy programming. in German:\n", + "Ich genieße Programmieren.\n", + "\n", + "How to say I'm good at coding. in German:\n", + "Ich bin gut im Coden.\n", + "\n", + "How to say I'm\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4989353f", + "metadata": {}, + "source": [ + "Behind the scenes, Fireworks AI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", + "\n", + "- Certain properties are not supported by the Fireworks API, see [here](https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility).\n", + "- Generation using multiple prompts is not supported.\n" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all Fireworks features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "4989353f", - "metadata": {}, - "source": [ - "Behind the scenes, Fireworks AI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", - "\n", - "- Certain properties are not supported by the Fireworks API, see [here](https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility).\n", - "- Generation using multiple prompts is not supported.\n" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all Fireworks features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" - }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/friendli.mdx b/docs/core_docs/docs/integrations/llms/friendli.mdx index 398c430049c8..70e843410a19 100644 --- a/docs/core_docs/docs/integrations/llms/friendli.mdx +++ b/docs/core_docs/docs/integrations/llms/friendli.mdx @@ -30,5 +30,5 @@ import Example from "@examples/models/llm/friendli.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/llms/google_vertex_ai.ipynb index 4bdfa603513f..8ad068013ecb 100644 --- a/docs/core_docs/docs/integrations/llms/google_vertex_ai.ipynb +++ b/docs/core_docs/docs/integrations/llms/google_vertex_ai.ipynb @@ -1,282 +1,282 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Google Vertex AI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# Google Vertex AI\n", - "\n", - "```{=mdx}\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of Google Vertex models as [text completion models](/docs/concepts/#llms). Many popular models available on Google Vertex are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "You may be looking for [this page instead](/docs/integrations/chat/google_vertex_ai/).\n", - ":::\n", - "\n", - "```\n", - "\n", - "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc.\n", - "\n", - "This will help you get started with VertexAI completion models (LLMs) using LangChain. For detailed documentation on `VertexAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.VertexAI.html).\n", - "\n", - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [VertexAI](https://api.js.langchain.com/classes/langchain_google_vertexai.VertexAI.html) | [`@langchain/google-vertexai`](https://www.npmjs.com/package/@langchain/google-vertexai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-vertexai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-vertexai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "LangChain.js supports two different authentication methods based on whether\n", - "you're running in a Node.js environment or a web environment.\n", - "\n", - "To access VertexAI models you'll need to create a Google Cloud Platform (GCP) account, get an API key, and install the `@langchain/google-vertexai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "#### Node.js\n", - "\n", - "You should make sure the Vertex AI API is\n", - "enabled for the relevant project and that you've authenticated to\n", - "Google Cloud using one of these methods:\n", - "\n", - "- You are logged into an account (using `gcloud auth application-default login`)\n", - " permitted to that project.\n", - "- You are running on a machine using a service account that is permitted\n", - " to the project.\n", - "- You have downloaded the credentials for a service account that is permitted\n", - " to the project and set the `GOOGLE_APPLICATION_CREDENTIALS` environment\n", - " variable to the path of this file.\n", - " **or**\n", - "- You set the `GOOGLE_API_KEY` environment variable to the API key for the project.\n", - "\n", - "#### Web\n", - "\n", - "To call Vertex AI models in web environments (like Edge functions), you'll need to install\n", - "the `@langchain/google-vertexai-web` package.\n", - "\n", - "Then, you'll need to add your service account credentials directly as a `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable:\n", - "\n", - "```\n", - "GOOGLE_VERTEX_AI_WEB_CREDENTIALS={\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...}\n", - "```\n", - "\n", - "You can also pass your credentials directly in code like this:\n", - "\n", - "```typescript\n", - "import { VertexAI } from \"@langchain/google-vertexai\";\n", - "// Or uncomment this line if you're using the web version:\n", - "// import { VertexAI } from \"@langchain/google-vertexai-web\";\n", - "\n", - "const model = new VertexAI({\n", - " authOptions: {\n", - " credentials: {\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...},\n", - " },\n", - "});\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain VertexAI integration lives in the `@langchain/google-vertexai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/google-vertexai @langchain/core\n", - "\n", - "\n", - "or for web environments:\n", - "\n", - "\n", - " @langchain/google-vertexai-web @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { VertexAI } from \"@langchain/google-vertexai-web\"\n", - "\n", - "const llm = new VertexAI({\n", - " model: \"gemini-pro\",\n", - " temperature: 0,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "const inputText = \"VertexAI is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "f580765e", - "metadata": { - "vscode": { - "languageId": "raw" + "cells": [ + { + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Google Vertex AI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# Google Vertex AI\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Google Vertex models as [text completion models](/docs/concepts/text_llms). Many popular models available on Google Vertex are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/google_vertex_ai/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc.\n", + "\n", + "This will help you get started with VertexAI completion models (LLMs) using LangChain. For detailed documentation on `VertexAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.VertexAI.html).\n", + "\n", + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [VertexAI](https://api.js.langchain.com/classes/langchain_google_vertexai.VertexAI.html) | [`@langchain/google-vertexai`](https://www.npmjs.com/package/@langchain/google-vertexai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-vertexai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-vertexai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "LangChain.js supports two different authentication methods based on whether\n", + "you're running in a Node.js environment or a web environment.\n", + "\n", + "To access VertexAI models you'll need to create a Google Cloud Platform (GCP) account, get an API key, and install the `@langchain/google-vertexai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "#### Node.js\n", + "\n", + "You should make sure the Vertex AI API is\n", + "enabled for the relevant project and that you've authenticated to\n", + "Google Cloud using one of these methods:\n", + "\n", + "- You are logged into an account (using `gcloud auth application-default login`)\n", + " permitted to that project.\n", + "- You are running on a machine using a service account that is permitted\n", + " to the project.\n", + "- You have downloaded the credentials for a service account that is permitted\n", + " to the project and set the `GOOGLE_APPLICATION_CREDENTIALS` environment\n", + " variable to the path of this file.\n", + " **or**\n", + "- You set the `GOOGLE_API_KEY` environment variable to the API key for the project.\n", + "\n", + "#### Web\n", + "\n", + "To call Vertex AI models in web environments (like Edge functions), you'll need to install\n", + "the `@langchain/google-vertexai-web` package.\n", + "\n", + "Then, you'll need to add your service account credentials directly as a `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable:\n", + "\n", + "```\n", + "GOOGLE_VERTEX_AI_WEB_CREDENTIALS={\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...}\n", + "```\n", + "\n", + "You can also pass your credentials directly in code like this:\n", + "\n", + "```typescript\n", + "import { VertexAI } from \"@langchain/google-vertexai\";\n", + "// Or uncomment this line if you're using the web version:\n", + "// import { VertexAI } from \"@langchain/google-vertexai-web\";\n", + "\n", + "const model = new VertexAI({\n", + " authOptions: {\n", + " credentials: {\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...},\n", + " },\n", + "});\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain VertexAI integration lives in the `@langchain/google-vertexai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/google-vertexai @langchain/core\n", + "\n", + "\n", + "or for web environments:\n", + "\n", + "\n", + " @langchain/google-vertexai-web @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { VertexAI } from \"@langchain/google-vertexai-web\"\n", + "\n", + "const llm = new VertexAI({\n", + " model: \"gemini-pro\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "const inputText = \"VertexAI is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "f580765e", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "```txt\n", + "offers a wide range of cloud computing services and artificial intelligence solutions to businesses and developers worldwide.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "078e9db2", + "metadata": {}, + "outputs": [], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4d106b41", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "```txt\n", + "\"Ich liebe Programmieren.\"\n", + "Pronunciation guide:\n", + "\n", + "Ich: [ɪç] (similar to \"ikh\" with a soft \"ch\" sound)\n", + "liebe: [ˈliːbə] (LEE-buh)\n", + "Programmieren: [pʁoɡʁaˈmiːʁən] (pro-gra-MEE-ren)\n", + "\n", + "You could also say:\n", + "\"Ich liebe es zu programmieren.\"\n", + "Which translates more literally to \"I love to program.\" This version is a bit more formal or precise.\n", + "Pronunciation:\n", + "\n", + "es: [ɛs] (like the letter \"S\")\n", + "zu: [tsuː] (tsoo)\n", + "\n", + "Both versions are correct and commonly used.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all VertexAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_vertexai.VertexAI.html" + ] } - }, - "source": [ - "```txt\n", - "offers a wide range of cloud computing services and artificial intelligence solutions to businesses and developers worldwide.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "078e9db2", - "metadata": {}, - "outputs": [], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "4d106b41", - "metadata": { + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + }, "vscode": { - "languageId": "raw" + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - }, - "source": [ - "```txt\n", - "\"Ich liebe Programmieren.\"\n", - "Pronunciation guide:\n", - "\n", - "Ich: [ɪç] (similar to \"ikh\" with a soft \"ch\" sound)\n", - "liebe: [ˈliːbə] (LEE-buh)\n", - "Programmieren: [pʁoɡʁaˈmiːʁən] (pro-gra-MEE-ren)\n", - "\n", - "You could also say:\n", - "\"Ich liebe es zu programmieren.\"\n", - "Which translates more literally to \"I love to program.\" This version is a bit more formal or precise.\n", - "Pronunciation:\n", - "\n", - "es: [ɛs] (like the letter \"S\")\n", - "zu: [tsuː] (tsoo)\n", - "\n", - "Both versions are correct and commonly used.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all VertexAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_vertexai.VertexAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" - }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/gradient_ai.mdx b/docs/core_docs/docs/integrations/llms/gradient_ai.mdx index 1de895af0342..9076509bfecd 100644 --- a/docs/core_docs/docs/integrations/llms/gradient_ai.mdx +++ b/docs/core_docs/docs/integrations/llms/gradient_ai.mdx @@ -55,5 +55,5 @@ The use your own custom adapter simply set `adapterId` during setup. ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/huggingface_inference.mdx b/docs/core_docs/docs/integrations/llms/huggingface_inference.mdx index ec2be6743dc6..0ac556b43f0e 100644 --- a/docs/core_docs/docs/integrations/llms/huggingface_inference.mdx +++ b/docs/core_docs/docs/integrations/llms/huggingface_inference.mdx @@ -23,5 +23,5 @@ console.log({ res }); ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/ibm.ipynb b/docs/core_docs/docs/integrations/llms/ibm.ipynb index 81e57aceb7d6..1644f7401724 100644 --- a/docs/core_docs/docs/integrations/llms/ibm.ipynb +++ b/docs/core_docs/docs/integrations/llms/ibm.ipynb @@ -1,361 +1,361 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: IBM watsonx.ai\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# IBM watsonx.ai\n", - "\n", - "\n", - "This will help you get started with IBM [text completion models (LLMs)](/docs/concepts#llms) using LangChain. For detailed documentation on `IBM watsonx.ai` features and configuration options, please refer to the [IBM watsonx.ai](https://api.js.langchain.com/classes/_langchain_community.llms_ibm.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/ibm_watsonx/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`IBM watsonx.ai`](https://api.js.langchain.com/modules/_langchain_community.llms_ibm.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "\n", - "To access IBM WatsonxAI models you'll need to create an IBM watsonx.ai account, get an API key or any other type of credentials, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "\n", - "Head to [IBM Cloud](https://cloud.ibm.com/login) to sign up to IBM watsonx.ai and generate an API key or provide any other authentication form as presented below.\n", - "\n", - "#### IAM authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=iam\n", - "export WATSONX_AI_APIKEY=\n", - "```\n", - "\n", - "#### Bearer token authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=bearertoken\n", - "export WATSONX_AI_BEARER_TOKEN=\n", - "```\n", - "\n", - "#### CP4D authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=cp4d\n", - "export WATSONX_AI_USERNAME=\n", - "export WATSONX_AI_PASSWORD=\n", - "export WATSONX_AI_URL=\n", - "```\n", - "\n", - "Once these are placed in your environment variables and object is initialized authentication will proceed automatically.\n", - "\n", - "Authentication can also be accomplished by passing these values as parameters to a new instance.\n", - "\n", - "## IAM authentication\n", - "\n", - "```typescript\n", - "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"iam\",\n", - " watsonxAIApikey: \"\",\n", - "};\n", - "const instance = new WatsonxLLM(props);\n", - "```\n", - "\n", - "## Bearer token authentication\n", - "\n", - "```typescript\n", - "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"bearertoken\",\n", - " watsonxAIBearerToken: \"\",\n", - "};\n", - "const instance = new WatsonxLLM(props);\n", - "```\n", - "\n", - "### CP4D authentication\n", - "\n", - "```typescript\n", - "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"cp4d\",\n", - " watsonxAIUsername: \"\",\n", - " watsonxAIPassword: \"\",\n", - " watsonxAIUrl: \"\",\n", - "};\n", - "const instance = new WatsonxLLM(props);\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain IBM watsonx.ai integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", - "\n", - "const props = {\n", - " decoding_method: \"sample\",\n", - " max_new_tokens: 100,\n", - " min_new_tokens: 1,\n", - " temperature: 0.5,\n", - " top_k: 50,\n", - " top_p: 1,\n", - "};\n", - "const instance = new WatsonxLLM({\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: process.env.API_URL,\n", - " projectId: \"\",\n", - " spaceId: \"\",\n", - " idOrName: \"\",\n", - " model: \"\",\n", - " ...props,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "f7498103", - "metadata": {}, - "source": [ - "Note:\n", - "\n", - "- You must provide `spaceId`, `projectId` or `idOrName`(deployment id) in order to proceed.\n", - "- Depending on the region of your provisioned service instance, use correct serviceUrl.\n", - "- You need to specify the model you want to use for inferencing through model_id." - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation and generation\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "print('Hello world.')<|endoftext|>\n", - "{\n", - " generations: [ [ [Object] ], [ [Object] ] ],\n", - " llmOutput: { tokenUsage: { generated_token_count: 28, input_token_count: 10 } }\n", - "}\n" - ] - } - ], - "source": [ - "const result = await instance.invoke(\"Print hello world.\");\n", - "console.log(result);\n", - "\n", - "const results = await instance.generate([\n", - " \"Print hello world.\",\n", - " \"Print bye, bye world!\",\n", - "]);\n", - "console.log(results);" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can chain our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: IBM watsonx.ai\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Ich liebe Programmieren.\n", - "\n", - "To express that you are passionate about programming in German,\n" - ] - } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", - "\n", - "const chain = prompt.pipe(instance);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "0c305670", - "metadata": {}, - "source": [ - "## Props overwriting\n", - "\n", - "Passed props at initialization will last for the whole life cycle of the object, however you may overwrite them for a single method's call by passing second argument as below\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "bb53235c", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# IBM watsonx.ai\n", + "\n", + "\n", + "This will help you get started with IBM [text completion models (LLMs)](/docs/concepts/text_llms) using LangChain. For detailed documentation on `IBM watsonx.ai` features and configuration options, please refer to the [IBM watsonx.ai](https://api.js.langchain.com/classes/_langchain_community.llms_ibm.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/ibm_watsonx/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`IBM watsonx.ai`](https://api.js.langchain.com/modules/_langchain_community.llms_ibm.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "\n", + "To access IBM WatsonxAI models you'll need to create an IBM watsonx.ai account, get an API key or any other type of credentials, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "\n", + "Head to [IBM Cloud](https://cloud.ibm.com/login) to sign up to IBM watsonx.ai and generate an API key or provide any other authentication form as presented below.\n", + "\n", + "#### IAM authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=iam\n", + "export WATSONX_AI_APIKEY=\n", + "```\n", + "\n", + "#### Bearer token authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=bearertoken\n", + "export WATSONX_AI_BEARER_TOKEN=\n", + "```\n", + "\n", + "#### CP4D authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=cp4d\n", + "export WATSONX_AI_USERNAME=\n", + "export WATSONX_AI_PASSWORD=\n", + "export WATSONX_AI_URL=\n", + "```\n", + "\n", + "Once these are placed in your environment variables and object is initialized authentication will proceed automatically.\n", + "\n", + "Authentication can also be accomplished by passing these values as parameters to a new instance.\n", + "\n", + "## IAM authentication\n", + "\n", + "```typescript\n", + "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"iam\",\n", + " watsonxAIApikey: \"\",\n", + "};\n", + "const instance = new WatsonxLLM(props);\n", + "```\n", + "\n", + "## Bearer token authentication\n", + "\n", + "```typescript\n", + "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"bearertoken\",\n", + " watsonxAIBearerToken: \"\",\n", + "};\n", + "const instance = new WatsonxLLM(props);\n", + "```\n", + "\n", + "### CP4D authentication\n", + "\n", + "```typescript\n", + "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"cp4d\",\n", + " watsonxAIUsername: \"\",\n", + " watsonxAIPassword: \"\",\n", + " watsonxAIUrl: \"\",\n", + "};\n", + "const instance = new WatsonxLLM(props);\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain IBM watsonx.ai integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "print('Hello world.')<|endoftext|>\n" - ] - } - ], - "source": [ - "const result2 = await instance.invoke(\"Print hello world.\", {\n", - " parameters: {\n", - " max_new_tokens: 20,\n", - " },\n", - "});\n", - "console.log(result2);" - ] - }, - { - "cell_type": "markdown", - "id": "577a0583", - "metadata": {}, - "source": [ - "## Tokenization\n", - "This package has it's custom getNumTokens implementation which returns exact amount of tokens that would be used.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "339e237c", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { WatsonxLLM } from \"@langchain/community/llms/ibm\";\n", + "\n", + "const props = {\n", + " decoding_method: \"sample\",\n", + " max_new_tokens: 100,\n", + " min_new_tokens: 1,\n", + " temperature: 0.5,\n", + " top_k: 50,\n", + " top_p: 1,\n", + "};\n", + "const instance = new WatsonxLLM({\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: process.env.API_URL,\n", + " projectId: \"\",\n", + " spaceId: \"\",\n", + " idOrName: \"\",\n", + " model: \"\",\n", + " ...props,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "f7498103", + "metadata": {}, + "source": [ + "Note:\n", + "\n", + "- You must provide `spaceId`, `projectId` or `idOrName`(deployment id) in order to proceed.\n", + "- Depending on the region of your provisioned service instance, use correct serviceUrl.\n", + "- You need to specify the model you want to use for inferencing through model_id." + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation and generation\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "print('Hello world.')<|endoftext|>\n", + "{\n", + " generations: [ [ [Object] ], [ [Object] ] ],\n", + " llmOutput: { tokenUsage: { generated_token_count: 28, input_token_count: 10 } }\n", + "}\n" + ] + } + ], + "source": [ + "const result = await instance.invoke(\"Print hello world.\");\n", + "console.log(result);\n", + "\n", + "const results = await instance.generate([\n", + " \"Print hello world.\",\n", + " \"Print bye, bye world!\",\n", + "]);\n", + "console.log(results);" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can chain our completion model with a prompt template like so:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "4\n" - ] + "cell_type": "code", + "execution_count": 6, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ich liebe Programmieren.\n", + "\n", + "To express that you are passionate about programming in German,\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(instance);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "0c305670", + "metadata": {}, + "source": [ + "## Props overwriting\n", + "\n", + "Passed props at initialization will last for the whole life cycle of the object, however you may overwrite them for a single method's call by passing second argument as below\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "bb53235c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "print('Hello world.')<|endoftext|>\n" + ] + } + ], + "source": [ + "const result2 = await instance.invoke(\"Print hello world.\", {\n", + " parameters: {\n", + " max_new_tokens: 20,\n", + " },\n", + "});\n", + "console.log(result2);" + ] + }, + { + "cell_type": "markdown", + "id": "577a0583", + "metadata": {}, + "source": [ + "## Tokenization\n", + "This package has it's custom getNumTokens implementation which returns exact amount of tokens that would be used.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "339e237c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "4\n" + ] + } + ], + "source": [ + "const tokens = await instance.getNumTokens(\"Print hello world.\");\n", + "console.log(tokens);" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `IBM watsonx.ai` features and configurations head to the API reference: [API docs](https://api.js.langchain.com/modules/_langchain_community.embeddings_ibm.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "JavaScript (Node.js)", + "language": "javascript", + "name": "javascript" + }, + "language_info": { + "file_extension": ".js", + "mimetype": "application/javascript", + "name": "javascript", + "version": "20.17.0" } - ], - "source": [ - "const tokens = await instance.getNumTokens(\"Print hello world.\");\n", - "console.log(tokens);" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `IBM watsonx.ai` features and configurations head to the API reference: [API docs](https://api.js.langchain.com/modules/_langchain_community.embeddings_ibm.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "JavaScript (Node.js)", - "language": "javascript", - "name": "javascript" }, - "language_info": { - "file_extension": ".js", - "mimetype": "application/javascript", - "name": "javascript", - "version": "20.17.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/index.mdx b/docs/core_docs/docs/integrations/llms/index.mdx index 646b79f19c62..664218ec92f4 100644 --- a/docs/core_docs/docs/integrations/llms/index.mdx +++ b/docs/core_docs/docs/integrations/llms/index.mdx @@ -6,7 +6,7 @@ sidebar_class_name: hidden # LLMs :::caution -You are currently on a page documenting the use of [text completion models](/docs/concepts/#llms). Many of the latest and most popular models are [chat completion models](/docs/concepts/#chat-models). +You are currently on a page documenting the use of [text completion models](/docs/concepts/text_llms). Many of the latest and most popular models are [chat completion models](/docs/concepts/chat_models). Unless you are specifically using more advanced prompting techniques, you are probably looking for [this page instead](/docs/integrations/chat/). ::: diff --git a/docs/core_docs/docs/integrations/llms/jigsawstack.mdx b/docs/core_docs/docs/integrations/llms/jigsawstack.mdx index 1060f55ee323..a53754255dbb 100644 --- a/docs/core_docs/docs/integrations/llms/jigsawstack.mdx +++ b/docs/core_docs/docs/integrations/llms/jigsawstack.mdx @@ -39,5 +39,5 @@ export const run = async () => { ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/layerup_security.mdx b/docs/core_docs/docs/integrations/llms/layerup_security.mdx index 0c96057c21c7..58945caaf7bf 100644 --- a/docs/core_docs/docs/integrations/llms/layerup_security.mdx +++ b/docs/core_docs/docs/integrations/llms/layerup_security.mdx @@ -32,5 +32,5 @@ import LayerupSecurityExampleCode from "@examples/llms/layerup_security.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/llama_cpp.mdx b/docs/core_docs/docs/integrations/llms/llama_cpp.mdx index fe0673b79207..576ea560440b 100644 --- a/docs/core_docs/docs/integrations/llms/llama_cpp.mdx +++ b/docs/core_docs/docs/integrations/llms/llama_cpp.mdx @@ -122,5 +122,5 @@ import LlamaCppStreamExample from "@examples/models/llm/llama_cpp_stream.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/mistral.ipynb b/docs/core_docs/docs/integrations/llms/mistral.ipynb index 587bd1ea784f..cb7cf11f6d01 100644 --- a/docs/core_docs/docs/integrations/llms/mistral.ipynb +++ b/docs/core_docs/docs/integrations/llms/mistral.ipynb @@ -1,310 +1,310 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": {}, - "source": [ - "---\n", - "sidebar_label: MistralAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# MistralAI\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip\n", - "Want to run Mistral's models locally? Check out our [Ollama integration](/docs/integrations/chat/ollama).\n", - ":::\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of Mistral models as [text completion models](/docs/concepts/#llms). Many popular models available on Mistral are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "You may be looking for [this page instead](/docs/integrations/chat/mistral/).\n", - ":::\n", - "\n", - "```\n", - "\n", - "[Mistral AI](https://mistral.ai/) is a platform that offers hosting for their powerful [open source models](https://docs.mistral.ai/getting-started/models/).\n", - "\n", - "This will help you get started with MistralAI completion models (LLMs) using LangChain. For detailed documentation on `MistralAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_mistralai.MistralAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | PY support | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [MistralAI](https://api.js.langchain.com/classes/langchain_mistralai.MistralAI.html) | [`@langchain/mistralai`](https://www.npmjs.com/package/@langchain/mistralai) | ❌ | ✅ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access MistralAI models you'll need to create a MistralAI account, get an API key, and install the `@langchain/mistralai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [console.mistral.ai](https://console.mistral.ai/) to sign up to MistralAI and generate an API key. Once you've done this set the `MISTRAL_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export MISTRAL_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain MistralAI integration lives in the `@langchain/mistralai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/mistralai @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { MistralAI } from \"@langchain/mistralai\"\n", - "\n", - "const llm = new MistralAI({\n", - " model: \"codestral-latest\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - " has developed Mistral 7B, a large language model (LLM) that is open-source and available for commercial use. Mistral 7B is a 7 billion parameter model that is trained on a diverse and high-quality dataset, and it has been fine-tuned to perform well on a variety of tasks, including text generation, question answering, and code interpretation.\n", - "\n", - "MistralAI has made Mistral 7B available under a permissive license, allowing anyone to use the model for commercial purposes without having to pay any fees. This has made Mistral 7B a popular choice for businesses and organizations that want to leverage the power of large language models without incurring high costs.\n", - "\n", - "Mistral 7B has been trained on a diverse and high-quality dataset, which has enabled it to perform well on a variety of tasks. It has been fine-tuned to generate coherent and contextually relevant text, and it has been shown to be capable of answering complex questions and interpreting code.\n", - "\n", - "Mistral 7B is also a highly efficient model, capable of processing text at a fast pace. This makes it well-suited for applications that require real-time responses, such as chatbots and virtual assistants.\n", - "\n", - "Overall, Mistral 7B is a powerful and versatile large language model that is open-source and available for commercial use. Its ability to perform well on a variety of tasks, its efficiency, and its permissive license make it a popular choice for businesses and organizations that want to leverage the power of large language models.\n" - ] - } - ], - "source": [ - "const inputText = \"MistralAI is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: MistralAI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "I love programming.\n", - "\n", - "Ich liebe Programmieren.\n", - "\n", - "In German, the phrase \"I love programming\" is translated as \"Ich liebe Programmieren.\" The word \"programming\" is translated to \"Programmieren,\" and \"I love\" is translated to \"Ich liebe.\"\n" - ] - } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e99eef30", - "metadata": {}, - "source": [ - "Since the Mistral LLM is a completions model, they also allow you to insert a `suffix` to the prompt. Suffixes can be passed via the call options when invoking a model like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "ec67551d", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# MistralAI\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip\n", + "Want to run Mistral's models locally? Check out our [Ollama integration](/docs/integrations/chat/ollama).\n", + ":::\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Mistral models as [text completion models](/docs/concepts/text_llms). Many popular models available on Mistral are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/mistral/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "[Mistral AI](https://mistral.ai/) is a platform that offers hosting for their powerful [open source models](https://docs.mistral.ai/getting-started/models/).\n", + "\n", + "This will help you get started with MistralAI completion models (LLMs) using LangChain. For detailed documentation on `MistralAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_mistralai.MistralAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | PY support | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [MistralAI](https://api.js.langchain.com/classes/langchain_mistralai.MistralAI.html) | [`@langchain/mistralai`](https://www.npmjs.com/package/@langchain/mistralai) | ❌ | ✅ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access MistralAI models you'll need to create a MistralAI account, get an API key, and install the `@langchain/mistralai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [console.mistral.ai](https://console.mistral.ai/) to sign up to MistralAI and generate an API key. Once you've done this set the `MISTRAL_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export MISTRAL_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain MistralAI integration lives in the `@langchain/mistralai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/mistralai @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "console.log('hello world');\n", - "```\n" - ] - } - ], - "source": [ - "const suffixResponse = await llm.invoke(\n", - " \"You can print 'hello world' to the console in javascript like this:\\n```javascript\", {\n", - " suffix: \"```\"\n", - " }\n", - ");\n", - "console.log(suffixResponse);" - ] - }, - { - "cell_type": "markdown", - "id": "b9265343", - "metadata": {}, - "source": [ - "As seen in the first example, the model generated the requested `console.log('hello world')` code snippet, but also included extra unwanted text. By adding a suffix, we can constrain the model to only complete the prompt up to the suffix (in this case, three backticks). This allows us to easily parse the completion and extract only the desired response without the suffix using a custom output parser." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "e2d34dc8", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { MistralAI } from \"@langchain/mistralai\"\n", + "\n", + "const llm = new MistralAI({\n", + " model: \"codestral-latest\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " has developed Mistral 7B, a large language model (LLM) that is open-source and available for commercial use. Mistral 7B is a 7 billion parameter model that is trained on a diverse and high-quality dataset, and it has been fine-tuned to perform well on a variety of tasks, including text generation, question answering, and code interpretation.\n", + "\n", + "MistralAI has made Mistral 7B available under a permissive license, allowing anyone to use the model for commercial purposes without having to pay any fees. This has made Mistral 7B a popular choice for businesses and organizations that want to leverage the power of large language models without incurring high costs.\n", + "\n", + "Mistral 7B has been trained on a diverse and high-quality dataset, which has enabled it to perform well on a variety of tasks. It has been fine-tuned to generate coherent and contextually relevant text, and it has been shown to be capable of answering complex questions and interpreting code.\n", + "\n", + "Mistral 7B is also a highly efficient model, capable of processing text at a fast pace. This makes it well-suited for applications that require real-time responses, such as chatbots and virtual assistants.\n", + "\n", + "Overall, Mistral 7B is a powerful and versatile large language model that is open-source and available for commercial use. Its ability to perform well on a variety of tasks, its efficiency, and its permissive license make it a popular choice for businesses and organizations that want to leverage the power of large language models.\n" + ] + } + ], + "source": [ + "const inputText = \"MistralAI is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "I love programming.\n", + "\n", + "Ich liebe Programmieren.\n", + "\n", + "In German, the phrase \"I love programming\" is translated as \"Ich liebe Programmieren.\" The word \"programming\" is translated to \"Programmieren,\" and \"I love\" is translated to \"Ich liebe.\"\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e99eef30", + "metadata": {}, + "source": [ + "Since the Mistral LLM is a completions model, they also allow you to insert a `suffix` to the prompt. Suffixes can be passed via the call options when invoking a model like so:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "console.log('hello world');\n", - "\n" - ] + "cell_type": "code", + "execution_count": 5, + "id": "ec67551d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "console.log('hello world');\n", + "```\n" + ] + } + ], + "source": [ + "const suffixResponse = await llm.invoke(\n", + " \"You can print 'hello world' to the console in javascript like this:\\n```javascript\", {\n", + " suffix: \"```\"\n", + " }\n", + ");\n", + "console.log(suffixResponse);" + ] + }, + { + "cell_type": "markdown", + "id": "b9265343", + "metadata": {}, + "source": [ + "As seen in the first example, the model generated the requested `console.log('hello world')` code snippet, but also included extra unwanted text. By adding a suffix, we can constrain the model to only complete the prompt up to the suffix (in this case, three backticks). This allows us to easily parse the completion and extract only the desired response without the suffix using a custom output parser." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e2d34dc8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "console.log('hello world');\n", + "\n" + ] + } + ], + "source": [ + "import { MistralAI } from \"@langchain/mistralai\";\n", + "\n", + "const llmForFillInCompletion = new MistralAI({\n", + " model: \"codestral-latest\",\n", + " temperature: 0,\n", + "});\n", + "\n", + "const suffix = \"```\";\n", + "\n", + "const customOutputParser = (input: string) => {\n", + " if (input.includes(suffix)) {\n", + " return input.split(suffix)[0];\n", + " }\n", + " throw new Error(\"Input does not contain suffix.\")\n", + "};\n", + "\n", + "const resWithParser = await llmForFillInCompletion.invoke(\n", + " \"You can print 'hello world' to the console in javascript like this:\\n```javascript\", {\n", + " suffix,\n", + " }\n", + ");\n", + "\n", + "console.log(customOutputParser(resWithParser));" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all MistralAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_mistralai.MistralAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - ], - "source": [ - "import { MistralAI } from \"@langchain/mistralai\";\n", - "\n", - "const llmForFillInCompletion = new MistralAI({\n", - " model: \"codestral-latest\",\n", - " temperature: 0,\n", - "});\n", - "\n", - "const suffix = \"```\";\n", - "\n", - "const customOutputParser = (input: string) => {\n", - " if (input.includes(suffix)) {\n", - " return input.split(suffix)[0];\n", - " }\n", - " throw new Error(\"Input does not contain suffix.\")\n", - "};\n", - "\n", - "const resWithParser = await llmForFillInCompletion.invoke(\n", - " \"You can print 'hello world' to the console in javascript like this:\\n```javascript\", {\n", - " suffix,\n", - " }\n", - ");\n", - "\n", - "console.log(customOutputParser(resWithParser));" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all MistralAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_mistralai.MistralAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" - }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/ni_bittensor.mdx b/docs/core_docs/docs/integrations/llms/ni_bittensor.mdx index 44d675a77de0..07f0a2271088 100644 --- a/docs/core_docs/docs/integrations/llms/ni_bittensor.mdx +++ b/docs/core_docs/docs/integrations/llms/ni_bittensor.mdx @@ -30,5 +30,5 @@ console.log({ res }); ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/ollama.ipynb b/docs/core_docs/docs/integrations/llms/ollama.ipynb index 4b73b88943e7..cf301f0b65f8 100644 --- a/docs/core_docs/docs/integrations/llms/ollama.ipynb +++ b/docs/core_docs/docs/integrations/llms/ollama.ipynb @@ -1,284 +1,284 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Ollama\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# Ollama\n", - "\n", - "```{=mdx}\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of Ollama models as [text completion models](/docs/concepts/#llms). Many popular models available on Ollama are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "You may be looking for [this page instead](/docs/integrations/chat/ollama/).\n", - ":::\n", - "\n", - "```\n", - "\n", - "This will help you get started with Ollama [text completion models (LLMs)](/docs/concepts#llms) using LangChain. For detailed documentation on `Ollama` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.Ollama.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 3, locally.\n", - "\n", - "Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage.\n", - "\n", - "This example goes over how to use LangChain to interact with an Ollama-run Llama 2 7b instance.\n", - "For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/ollama/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`Ollama`](https://api.js.langchain.com/classes/langchain_ollama.Ollama.html) | [`@langchain/ollama`](https://npmjs.com/@langchain/ollama) | ✅ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/ollama?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/ollama?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Ollama embedding models you'll need to follow [these instructions](https://github.com/jmorganca/ollama) to install Ollama, and install the `@langchain/ollama` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain Ollama integration lives in the `@langchain/ollama` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/ollama @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { Ollama } from \"@langchain/ollama\"\n", - "\n", - "const llm = new Ollama({\n", - " model: \"llama3\", // Default value\n", - " temperature: 0,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "I think you meant to say \"Olivia\" instead of \"Ollama\". Olivia is not a well-known AI company, but there are several other AI companies with similar names. Here are a few examples:\n", - "\n", - "* Oliva AI: A startup that uses artificial intelligence to help businesses optimize their operations and improve customer experiences.\n", - "* Olivia Technologies: A company that develops AI-powered solutions for industries such as healthcare, finance, and education.\n", - "* Olivia.ai: A platform that uses AI to help businesses automate their workflows and improve productivity.\n", - "\n", - "If you meant something else by \"Ollama\", please let me know and I'll do my best to help!\n" - ] - } - ], - "source": [ - "const inputText = \"Ollama is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Ollama\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "A programmer's passion!\n", - "\n", - "In German, you can express your love for programming with the following phrases:\n", - "\n", - "1. Ich liebe Programmieren: This is a direct translation of \"I love programming.\"\n", - "2. Programmieren ist meine Leidenschaft: This means \"Programming is my passion.\"\n", - "3. Ich bin total verliebt in Programmieren: This translates to \"I'm totally in love with programming.\"\n", - "4. Programmieren macht mich glücklich: This phrase means \"Programming makes me happy\" or \"I'm joyful when programming.\"\n", - "\n", - "If you want to be more casual, you can use:\n", - "\n", - "1. Ich bin ein Programmier-Fan: This is a playful way to say \"I'm a fan of programming.\"\n", - "2. Programmieren ist mein Ding: This translates to \"Programming is my thing\" or \"I'm all about programming.\"\n", - "\n", - "Remember that German has different forms for formal and informal speech, so adjust the phrases according to your relationship with the person you're speaking to!\n" - ] - } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e99eef30", - "metadata": {}, - "source": [ - "## Multimodal models\n", - "\n", - "Ollama supports open source multimodal models like [LLaVA](https://ollama.ai/library/llava) in versions 0.1.15 and up.\n", - "You can bind base64 encoded image data to multimodal-capable models to use as context like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "1ff218e2", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# Ollama\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Ollama models as [text completion models](/docs/concepts/text_llms). Many popular models available on Ollama are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/ollama/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "This will help you get started with Ollama [text completion models (LLMs)](/docs/concepts/text_llms) using LangChain. For detailed documentation on `Ollama` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.Ollama.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 3, locally.\n", + "\n", + "Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage.\n", + "\n", + "This example goes over how to use LangChain to interact with an Ollama-run Llama 2 7b instance.\n", + "For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/ollama/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`Ollama`](https://api.js.langchain.com/classes/langchain_ollama.Ollama.html) | [`@langchain/ollama`](https://npmjs.com/@langchain/ollama) | ✅ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/ollama?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/ollama?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Ollama embedding models you'll need to follow [these instructions](https://github.com/jmorganca/ollama) to install Ollama, and install the `@langchain/ollama` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Ollama integration lives in the `@langchain/ollama` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/ollama @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " The image shows a hot dog placed inside what appears to be a bun that has been specially prepared to resemble a hot dog bun. This is an example of a creative or novelty food item, where the bread used for the bun looks similar to a cooked hot dog itself, playing on the name \"hot dog.\" The image also shows the typical garnishes like ketchup and mustard on the side. \n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { Ollama } from \"@langchain/ollama\"\n", + "\n", + "const llm = new Ollama({\n", + " model: \"llama3\", // Default value\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I think you meant to say \"Olivia\" instead of \"Ollama\". Olivia is not a well-known AI company, but there are several other AI companies with similar names. Here are a few examples:\n", + "\n", + "* Oliva AI: A startup that uses artificial intelligence to help businesses optimize their operations and improve customer experiences.\n", + "* Olivia Technologies: A company that develops AI-powered solutions for industries such as healthcare, finance, and education.\n", + "* Olivia.ai: A platform that uses AI to help businesses automate their workflows and improve productivity.\n", + "\n", + "If you meant something else by \"Ollama\", please let me know and I'll do my best to help!\n" + ] + } + ], + "source": [ + "const inputText = \"Ollama is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "A programmer's passion!\n", + "\n", + "In German, you can express your love for programming with the following phrases:\n", + "\n", + "1. Ich liebe Programmieren: This is a direct translation of \"I love programming.\"\n", + "2. Programmieren ist meine Leidenschaft: This means \"Programming is my passion.\"\n", + "3. Ich bin total verliebt in Programmieren: This translates to \"I'm totally in love with programming.\"\n", + "4. Programmieren macht mich glücklich: This phrase means \"Programming makes me happy\" or \"I'm joyful when programming.\"\n", + "\n", + "If you want to be more casual, you can use:\n", + "\n", + "1. Ich bin ein Programmier-Fan: This is a playful way to say \"I'm a fan of programming.\"\n", + "2. Programmieren ist mein Ding: This translates to \"Programming is my thing\" or \"I'm all about programming.\"\n", + "\n", + "Remember that German has different forms for formal and informal speech, so adjust the phrases according to your relationship with the person you're speaking to!\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e99eef30", + "metadata": {}, + "source": [ + "## Multimodal models\n", + "\n", + "Ollama supports open source multimodal models like [LLaVA](https://ollama.ai/library/llava) in versions 0.1.15 and up.\n", + "You can bind base64 encoded image data to multimodal-capable models to use as context like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "1ff218e2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " The image shows a hot dog placed inside what appears to be a bun that has been specially prepared to resemble a hot dog bun. This is an example of a creative or novelty food item, where the bread used for the bun looks similar to a cooked hot dog itself, playing on the name \"hot dog.\" The image also shows the typical garnishes like ketchup and mustard on the side. \n" + ] + } + ], + "source": [ + "import { Ollama } from \"@langchain/ollama\";\n", + "import * as fs from \"node:fs/promises\";\n", + "\n", + "const imageData = await fs.readFile(\"../../../../../examples/hotdog.jpg\");\n", + "\n", + "const model = new Ollama({\n", + " model: \"llava\",\n", + "}).bind({\n", + " images: [imageData.toString(\"base64\")],\n", + "});\n", + "\n", + "const res = await model.invoke(\"What's in this image?\");\n", + "console.log(res);" + ] + }, + { + "cell_type": "markdown", + "id": "cac0a2dd", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "- LLM [conceptual guide](/docs/concepts/text_llms)\n", + "- LLM [how-to guides](/docs/how_to/#llms)" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `Ollama` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.Ollama.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - ], - "source": [ - "import { Ollama } from \"@langchain/ollama\";\n", - "import * as fs from \"node:fs/promises\";\n", - "\n", - "const imageData = await fs.readFile(\"../../../../../examples/hotdog.jpg\");\n", - "\n", - "const model = new Ollama({\n", - " model: \"llava\",\n", - "}).bind({\n", - " images: [imageData.toString(\"base64\")],\n", - "});\n", - "\n", - "const res = await model.invoke(\"What's in this image?\");\n", - "console.log(res);" - ] - }, - { - "cell_type": "markdown", - "id": "cac0a2dd", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "- LLM [conceptual guide](/docs/concepts/#llms)\n", - "- LLM [how-to guides](/docs/how_to/#llms)" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `Ollama` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.Ollama.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" - }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/openai.ipynb b/docs/core_docs/docs/integrations/llms/openai.ipynb index 81105d7c5eb3..a0ae65f8d079 100644 --- a/docs/core_docs/docs/integrations/llms/openai.ipynb +++ b/docs/core_docs/docs/integrations/llms/openai.ipynb @@ -1,262 +1,262 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": {}, - "source": [ - "---\n", - "sidebar_label: OpenAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# OpenAI\n", - "\n", - "```{=mdx}\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of OpenAI [text completion models](/docs/concepts/#llms). The latest and most popular OpenAI models are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/openai/).\n", - ":::\n", - "\n", - "```\n", - "\n", - "[OpenAI](https://en.wikipedia.org/wiki/OpenAI) is an artificial intelligence (AI) research laboratory.\n", - "\n", - "This will help you get started with OpenAI completion models (LLMs) using LangChain. For detailed documentation on `OpenAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.OpenAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/openai) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [OpenAI](https://api.js.langchain.com/classes/langchain_openai.OpenAI.html) | [@langchain/openai](https://www.npmjs.com/package/@langchain/openai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the `@langchain/openai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [platform.openai.com](https://platform.openai.com/) to sign up to OpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export OPENAI_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain OpenAI integration lives in the `@langchain/openai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { OpenAI } from \"@langchain/openai\"\n", - "\n", - "const llm = new OpenAI({\n", - " model: \"gpt-3.5-turbo-instruct\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " timeout: undefined,\n", - " maxRetries: 2,\n", - " apiKey: process.env.OPENAI_API_KEY,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "develops and promotes friendly AI for the benefit of humanity. It was founded in 2015 by Elon Musk, Sam Altman, Greg Brockman, Ilya Sutskever, Wojciech Zaremba, John Schulman, and Chris Olah. The company's mission is to create and promote artificial general intelligence (AGI) that is safe and beneficial to humanity.\n", - "\n", - "OpenAI conducts research in various areas of AI, including deep learning, reinforcement learning, robotics, and natural language processing. The company also develops and releases open-source tools and platforms for AI research, such as the GPT-3 language model and the Gym toolkit for reinforcement learning.\n", - "\n", - "One of the main goals of OpenAI is to ensure that the development of AI is aligned with human values and does not pose a threat to humanity. To this end, the company has established a set of principles for safe and ethical AI development, and it actively collaborates with other organizations and researchers in the field.\n", - "\n", - "OpenAI has received funding from various sources, including tech giants like Microsoft and Amazon, as well as individual investors. It has also partnered with companies and organizations such as Google, IBM, and the United Nations to advance its research and promote responsible AI development.\n", - "\n", - "In addition to its research and development\n" - ] - } - ], - "source": [ - "const inputText = \"OpenAI is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: OpenAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# OpenAI\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of OpenAI [text completion models](/docs/concepts/text_llms). The latest and most popular OpenAI models are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/openai/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "[OpenAI](https://en.wikipedia.org/wiki/OpenAI) is an artificial intelligence (AI) research laboratory.\n", + "\n", + "This will help you get started with OpenAI completion models (LLMs) using LangChain. For detailed documentation on `OpenAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.OpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/openai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [OpenAI](https://api.js.langchain.com/classes/langchain_openai.OpenAI.html) | [@langchain/openai](https://www.npmjs.com/package/@langchain/openai) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the `@langchain/openai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [platform.openai.com](https://platform.openai.com/) to sign up to OpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain OpenAI integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { OpenAI } from \"@langchain/openai\"\n", + "\n", + "const llm = new OpenAI({\n", + " model: \"gpt-3.5-turbo-instruct\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " apiKey: process.env.OPENAI_API_KEY,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Ich liebe Programmieren.\n" - ] + "cell_type": "code", + "execution_count": 2, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "develops and promotes friendly AI for the benefit of humanity. It was founded in 2015 by Elon Musk, Sam Altman, Greg Brockman, Ilya Sutskever, Wojciech Zaremba, John Schulman, and Chris Olah. The company's mission is to create and promote artificial general intelligence (AGI) that is safe and beneficial to humanity.\n", + "\n", + "OpenAI conducts research in various areas of AI, including deep learning, reinforcement learning, robotics, and natural language processing. The company also develops and releases open-source tools and platforms for AI research, such as the GPT-3 language model and the Gym toolkit for reinforcement learning.\n", + "\n", + "One of the main goals of OpenAI is to ensure that the development of AI is aligned with human values and does not pose a threat to humanity. To this end, the company has established a set of principles for safe and ethical AI development, and it actively collaborates with other organizations and researchers in the field.\n", + "\n", + "OpenAI has received funding from various sources, including tech giants like Microsoft and Amazon, as well as individual investors. It has also partnered with companies and organizations such as Google, IBM, and the United Nations to advance its research and promote responsible AI development.\n", + "\n", + "In addition to its research and development\n" + ] + } + ], + "source": [ + "const inputText = \"OpenAI is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Ich liebe Programmieren.\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = new PromptTemplate({\n", + " template: \"How to say {input} in {output_language}:\\n\",\n", + " inputVariables: [\"input\", \"output_language\"],\n", + "})\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e99eef30", + "metadata": {}, + "source": [ + "If you're part of an organization, you can set `process.env.OPENAI_ORGANIZATION` to your OpenAI organization id, or pass it in as `organization` when\n", + "initializing the model.\n", + "\n", + "## Custom URLs\n", + "\n", + "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d958ab00", + "metadata": {}, + "outputs": [], + "source": [ + "const llmCustomURL = new OpenAI({\n", + " temperature: 0.9,\n", + " configuration: {\n", + " baseURL: \"https://your_custom_url.com\",\n", + " },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "81a5e2ea", + "metadata": {}, + "source": [ + "You can also pass other `ClientOptions` parameters accepted by the official SDK.\n", + "\n", + "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/llms/azure).\n" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all OpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.OpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = new PromptTemplate({\n", - " template: \"How to say {input} in {output_language}:\\n\",\n", - " inputVariables: [\"input\", \"output_language\"],\n", - "})\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e99eef30", - "metadata": {}, - "source": [ - "If you're part of an organization, you can set `process.env.OPENAI_ORGANIZATION` to your OpenAI organization id, or pass it in as `organization` when\n", - "initializing the model.\n", - "\n", - "## Custom URLs\n", - "\n", - "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d958ab00", - "metadata": {}, - "outputs": [], - "source": [ - "const llmCustomURL = new OpenAI({\n", - " temperature: 0.9,\n", - " configuration: {\n", - " baseURL: \"https://your_custom_url.com\",\n", - " },\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "81a5e2ea", - "metadata": {}, - "source": [ - "You can also pass other `ClientOptions` parameters accepted by the official SDK.\n", - "\n", - "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/llms/azure).\n" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all OpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.OpenAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" - }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/prompt_layer_openai.mdx b/docs/core_docs/docs/integrations/llms/prompt_layer_openai.mdx index 29a7a9d5cf8a..14ef8f9ecc80 100644 --- a/docs/core_docs/docs/integrations/llms/prompt_layer_openai.mdx +++ b/docs/core_docs/docs/integrations/llms/prompt_layer_openai.mdx @@ -57,5 +57,5 @@ The request and the response will be logged in the [PromptLayer dashboard](https ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/raycast.mdx b/docs/core_docs/docs/integrations/llms/raycast.mdx index 12ec3de9d69f..3a9dab7e8781 100644 --- a/docs/core_docs/docs/integrations/llms/raycast.mdx +++ b/docs/core_docs/docs/integrations/llms/raycast.mdx @@ -32,5 +32,5 @@ const model = new RaycastAI({ ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/replicate.mdx b/docs/core_docs/docs/integrations/llms/replicate.mdx index 1b7ca5f9b70d..6f0fb0fb0add 100644 --- a/docs/core_docs/docs/integrations/llms/replicate.mdx +++ b/docs/core_docs/docs/integrations/llms/replicate.mdx @@ -22,5 +22,5 @@ You can find a full list of models on [Replicate's website](https://replicate.co ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/together.ipynb b/docs/core_docs/docs/integrations/llms/together.ipynb index d028a6404652..ebee5f5c4682 100644 --- a/docs/core_docs/docs/integrations/llms/together.ipynb +++ b/docs/core_docs/docs/integrations/llms/together.ipynb @@ -1,251 +1,251 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Together AI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# TogetherAI\n", - "\n", - ":::caution\n", - "You are currently on a page documenting the use of Together AI models as [text completion models](/docs/concepts/#llms). Many popular models available on Together AI are [chat completion models](/docs/concepts/#chat-models).\n", - "\n", - "You may be looking for [this page instead](/docs/integrations/chat/togetherai/).\n", - ":::\n", - "\n", - "[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/inference-models) in a couple lines of code.\n", - "\n", - "This will help you get started with Together AI [text completion models (LLMs)](/docs/concepts#llms) using LangChain. For detailed documentation on `TogetherAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_llms_togetherai.TogetherAI.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/together/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`TogetherAI`](https://api.js.langchain.com/classes/langchain_community_llms_togetherai.TogetherAI.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access `ChatTogetherAI` models you'll need to create a Together account, get an API key [here](https://api.together.xyz/), and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [api.together.ai](https://api.together.ai/) to sign up to TogetherAI and generate an API key. Once you've done this set the `TOGETHER_AI_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export TOGETHER_AI_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain TogetherAI integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "a0562a13", - "metadata": {}, - "outputs": [], - "source": [ - "import { TogetherAI } from \"@langchain/community/llms/togetherai\";\n", - "\n", - "const llm = new TogetherAI({\n", - " model: \"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo\",\n", - " maxTokens: 256,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "035dea0f", - "metadata": { - "tags": [] - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - " offers a range of AI-powered solutions to help businesses and organizations improve their customer service, sales, and marketing efforts. Their platform uses natural language processing (NLP) and machine learning algorithms to analyze customer interactions and provide insights and recommendations to help businesses improve their customer experience.\n", - "Together's solutions include:\n", - "1. Customer Service: Together's customer service solution uses AI to analyze customer interactions and provide insights and recommendations to help businesses improve their customer experience. This includes analyzing customer feedback, sentiment analysis, and predictive analytics to identify areas for improvement.\n", - "2. Sales: Together's sales solution uses AI to analyze customer interactions and provide insights and recommendations to help businesses improve their sales efforts. This includes analyzing customer behavior, sentiment analysis, and predictive analytics to identify opportunities for upselling and cross-selling.\n", - "3. Marketing: Together's marketing solution uses AI to analyze customer interactions and provide insights and recommendations to help businesses improve their marketing efforts. This includes analyzing customer behavior, sentiment analysis, and predictive analytics to identify areas for improvement.\n", - "Together's platform is designed to be easy to use and integrates with a range of popular CRM and marketing automation tools. Their solutions are available as a cloud-based subscription service, making it easy for businesses to get started with AI-powered customer service, sales, and marketing.\n", - "Overall,\n" - ] - } - ], - "source": [ - "const inputText = \"Together is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "078e9db2", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Together AI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# TogetherAI\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Together AI models as [text completion models](/docs/concepts/text_llms). Many popular models available on Together AI are [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/togetherai/).\n", + ":::\n", + "\n", + "[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/inference-models) in a couple lines of code.\n", + "\n", + "This will help you get started with Together AI [text completion models (LLMs)](/docs/concepts/text_llms) using LangChain. For detailed documentation on `TogetherAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_llms_togetherai.TogetherAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/together/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`TogetherAI`](https://api.js.langchain.com/classes/langchain_community_llms_togetherai.TogetherAI.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access `ChatTogetherAI` models you'll need to create a Together account, get an API key [here](https://api.together.xyz/), and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [api.together.ai](https://api.together.ai/) to sign up to TogetherAI and generate an API key. Once you've done this set the `TOGETHER_AI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export TOGETHER_AI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain TogetherAI integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Ich liebe Programmieren.\n", - "\n", - "How to say I love programming. in French:\n", - "J'adore programmer.\n", - "\n", - "How to say I love programming. in Spanish:\n", - "Me encanta programar.\n", - "\n", - "How to say I love programming. in Italian:\n", - "Mi piace programmare.\n", - "\n", - "How to say I love programming. in Portuguese:\n", - "Eu amo programar.\n", - "\n", - "How to say I love programming. in Russian:\n", - "Я люблю программирование.\n", - "\n", - "How to say I love programming. in Japanese:\n", - "私はプログラミングが好きです。\n", - "\n", - "How to say I love programming. in Chinese:\n", - "我喜欢编程。\n", - "\n", - "How to say I love programming. in Korean:\n", - "나는 프로그래밍을 좋아합니다.\n", - "\n", - "How to say I love programming. in Arabic:\n", - "أنا أحب البرمجة.\n", - "\n", - "How to say I love programming. in Hebrew:\n", - "אני אוהבת לתכנת.\n", - "\n", - "How to say I love programming. in Hindi:\n", - "\n", - "मुझे प्रोग्रामिंग पसंद है।\n", - "\n", - "\n", - "\n", - "I hope this helps you express your love for programming in different languages!\n" - ] + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { TogetherAI } from \"@langchain/community/llms/togetherai\";\n", + "\n", + "const llm = new TogetherAI({\n", + " model: \"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo\",\n", + " maxTokens: 256,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " offers a range of AI-powered solutions to help businesses and organizations improve their customer service, sales, and marketing efforts. Their platform uses natural language processing (NLP) and machine learning algorithms to analyze customer interactions and provide insights and recommendations to help businesses improve their customer experience.\n", + "Together's solutions include:\n", + "1. Customer Service: Together's customer service solution uses AI to analyze customer interactions and provide insights and recommendations to help businesses improve their customer experience. This includes analyzing customer feedback, sentiment analysis, and predictive analytics to identify areas for improvement.\n", + "2. Sales: Together's sales solution uses AI to analyze customer interactions and provide insights and recommendations to help businesses improve their sales efforts. This includes analyzing customer behavior, sentiment analysis, and predictive analytics to identify opportunities for upselling and cross-selling.\n", + "3. Marketing: Together's marketing solution uses AI to analyze customer interactions and provide insights and recommendations to help businesses improve their marketing efforts. This includes analyzing customer behavior, sentiment analysis, and predictive analytics to identify areas for improvement.\n", + "Together's platform is designed to be easy to use and integrates with a range of popular CRM and marketing automation tools. Their solutions are available as a cloud-based subscription service, making it easy for businesses to get started with AI-powered customer service, sales, and marketing.\n", + "Overall,\n" + ] + } + ], + "source": [ + "const inputText = \"Together is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ich liebe Programmieren.\n", + "\n", + "How to say I love programming. in French:\n", + "J'adore programmer.\n", + "\n", + "How to say I love programming. in Spanish:\n", + "Me encanta programar.\n", + "\n", + "How to say I love programming. in Italian:\n", + "Mi piace programmare.\n", + "\n", + "How to say I love programming. in Portuguese:\n", + "Eu amo programar.\n", + "\n", + "How to say I love programming. in Russian:\n", + "Я люблю программирование.\n", + "\n", + "How to say I love programming. in Japanese:\n", + "私はプログラミングが好きです。\n", + "\n", + "How to say I love programming. in Chinese:\n", + "我喜欢编程。\n", + "\n", + "How to say I love programming. in Korean:\n", + "나는 프로그래밍을 좋아합니다.\n", + "\n", + "How to say I love programming. in Arabic:\n", + "أنا أحب البرمجة.\n", + "\n", + "How to say I love programming. in Hebrew:\n", + "אני אוהבת לתכנת.\n", + "\n", + "How to say I love programming. in Hindi:\n", + "\n", + "मुझे प्रोग्रामिंग पसंद है।\n", + "\n", + "\n", + "\n", + "I hope this helps you express your love for programming in different languages!\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `TogetherAi` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_llms_togetherai.TogetherAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - ], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `TogetherAi` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_llms_togetherai.TogetherAI.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" - }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx b/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx index da71e6ea09d7..090a0acb274e 100644 --- a/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx +++ b/docs/core_docs/docs/integrations/llms/watsonx_ai.mdx @@ -36,5 +36,5 @@ import WatsonxAiExample from "@examples/llms/watsonx_ai.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/writer.mdx b/docs/core_docs/docs/integrations/llms/writer.mdx index f7999c947f90..43e1e7d9827f 100644 --- a/docs/core_docs/docs/integrations/llms/writer.mdx +++ b/docs/core_docs/docs/integrations/llms/writer.mdx @@ -29,5 +29,5 @@ import WriterExample from "@examples/models/llm/writer.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/llms/yandex.mdx b/docs/core_docs/docs/integrations/llms/yandex.mdx index 4ef8cca2d80f..076b7b412219 100644 --- a/docs/core_docs/docs/integrations/llms/yandex.mdx +++ b/docs/core_docs/docs/integrations/llms/yandex.mdx @@ -30,5 +30,5 @@ import YandexGPTExample from "@examples/models/llm/yandex.ts"; ## Related -- LLM [conceptual guide](/docs/concepts/#llms) +- LLM [conceptual guide](/docs/concepts/text_llms) - LLM [how-to guides](/docs/how_to/#llms) diff --git a/docs/core_docs/docs/integrations/retrievers/bedrock-knowledge-bases.ipynb b/docs/core_docs/docs/integrations/retrievers/bedrock-knowledge-bases.ipynb index d0d0c2af9815..3b7594e8ce11 100644 --- a/docs/core_docs/docs/integrations/retrievers/bedrock-knowledge-bases.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/bedrock-knowledge-bases.ipynb @@ -1,272 +1,272 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Knowledge Bases for Amazon Bedrock\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# Knowledge Bases for Amazon Bedrock\n", + "\n", + "## Overview\n", + "\n", + "This will help you getting started with the [AmazonKnowledgeBaseRetriever](/docs/concepts/retrievers). For detailed documentation of all AmazonKnowledgeBaseRetriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.AmazonKnowledgeBaseRetriever.html).\n", + "\n", + "Knowledge Bases for Amazon Bedrock is a fully managed support for end-to-end RAG workflow provided by Amazon Web Services (AWS).\n", + "It provides an entire ingestion workflow of converting your documents into embeddings (vector) and storing the embeddings in a specialized vector database.\n", + "Knowledge Bases for Amazon Bedrock supports popular databases for vector storage, including vector engine for Amazon OpenSearch Serverless, Pinecone, Redis Enterprise Cloud, Amazon Aurora (coming soon), and MongoDB (coming soon).\n", + "\n", + "### Integration details\n", + "\n", + "| Retriever | Self-host | Cloud offering | Package | [Py support](https://python.langchain.com/docs/integrations/retrievers/bedrock/) |\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "[AmazonKnowledgeBaseRetriever](https://api.js.langchain.com/classes/langchain_aws.AmazonKnowledgeBaseRetriever.html) | 🟠 (see details below) | ✅ | @langchain/aws | ✅ |\n", + "\n", + "> AWS Knowledge Base Retriever can be 'self hosted' in the sense you can run it on your own AWS infrastructure. However it is not possible to run on another cloud provider or on-premises.\n", + "\n", + "## Setup\n", + "\n", + "In order to use the AmazonKnowledgeBaseRetriever, you need to have an AWS account, where you can manage your indexes and documents. Once you've setup your account, set the following environment variables:\n", + "\n", + "```bash\n", + "process.env.AWS_KNOWLEDGE_BASE_ID=your-knowledge-base-id\n", + "process.env.AWS_ACCESS_KEY_ID=your-access-key-id\n", + "process.env.AWS_SECRET_ACCESS_KEY=your-secret-access-key\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "72ee0c4b-9764-423a-9dbf-95129e185210", + "metadata": {}, + "source": [ + "If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a15d341e-3e26-4ca3-830b-5aab30ed66de", + "metadata": {}, + "outputs": [], + "source": [ + "// process.env.LANGSMITH_API_KEY = \"\";\n", + "// process.env.LANGSMITH_TRACING = \"true\";" + ] + }, + { + "cell_type": "markdown", + "id": "0730d6a1-c893-4840-9817-5e5251676d5d", + "metadata": {}, + "source": [ + "### Installation\n", + "\n", + "This retriever lives in the `@langchain/aws` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/aws @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our retriever:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70cc8e65-2a02-408a-bbc6-8ef649057d82", + "metadata": {}, + "outputs": [], + "source": [ + "import { AmazonKnowledgeBaseRetriever } from \"@langchain/aws\";\n", + "\n", + "const retriever = new AmazonKnowledgeBaseRetriever({\n", + " topK: 10,\n", + " knowledgeBaseId: process.env.AWS_KNOWLEDGE_BASE_ID,\n", + " region: \"us-east-2\",\n", + " clientOptions: {\n", + " credentials: {\n", + " accessKeyId: process.env.AWS_ACCESS_KEY_ID,\n", + " secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,\n", + " },\n", + " },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "5c5f2839-4020-424e-9fc9-07777eede442", + "metadata": {}, + "source": [ + "## Usage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", + "metadata": {}, + "outputs": [], + "source": [ + "const query = \"...\"\n", + "\n", + "await retriever.invoke(query);" + ] + }, + { + "cell_type": "markdown", + "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", + "metadata": {}, + "source": [ + "## Use within a chain\n", + "\n", + "Like other retrievers, AmazonKnowledgeBaseRetriever can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n", + "\n", + "We will need a LLM or chat model:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25b647a3-f8f2-4541-a289-7a241e43f9df", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`\n", + "Answer the question based only on the context provided.\n", + "\n", + "Context: {context}\n", + "\n", + "Question: {question}`);\n", + "\n", + "const formatDocs = (docs: Document[]) => {\n", + " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", + "}\n", + "\n", + "// See https://js.langchain.com/docs/tutorials/rag\n", + "const ragChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocs),\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser(),\n", + "]);" + ] + }, + { + "cell_type": "markdown", + "id": "22b1d6f8", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "\n", + ":::tip\n", + "\n", + "See [our RAG tutorial](docs/tutorials/rag) for more information and examples on `RunnableSequence`'s like the one above.\n", + "\n", + ":::\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8", + "metadata": {}, + "outputs": [], + "source": [ + "await ragChain.invoke(\"...\")" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all AmazonKnowledgeBaseRetriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.AmazonKnowledgeBaseRetriever.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "typescript", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.4" } - }, - "source": [ - "---\n", - "sidebar_label: Knowledge Bases for Amazon Bedrock\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# Knowledge Bases for Amazon Bedrock\n", - "\n", - "## Overview\n", - "\n", - "This will help you getting started with the [AmazonKnowledgeBaseRetriever](/docs/concepts/#retrievers). For detailed documentation of all AmazonKnowledgeBaseRetriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.AmazonKnowledgeBaseRetriever.html).\n", - "\n", - "Knowledge Bases for Amazon Bedrock is a fully managed support for end-to-end RAG workflow provided by Amazon Web Services (AWS).\n", - "It provides an entire ingestion workflow of converting your documents into embeddings (vector) and storing the embeddings in a specialized vector database.\n", - "Knowledge Bases for Amazon Bedrock supports popular databases for vector storage, including vector engine for Amazon OpenSearch Serverless, Pinecone, Redis Enterprise Cloud, Amazon Aurora (coming soon), and MongoDB (coming soon).\n", - "\n", - "### Integration details\n", - "\n", - "| Retriever | Self-host | Cloud offering | Package | [Py support](https://python.langchain.com/docs/integrations/retrievers/bedrock/) |\n", - "| :--- | :--- | :---: | :---: | :---: |\n", - "[AmazonKnowledgeBaseRetriever](https://api.js.langchain.com/classes/langchain_aws.AmazonKnowledgeBaseRetriever.html) | 🟠 (see details below) | ✅ | @langchain/aws | ✅ |\n", - "\n", - "> AWS Knowledge Base Retriever can be 'self hosted' in the sense you can run it on your own AWS infrastructure. However it is not possible to run on another cloud provider or on-premises.\n", - "\n", - "## Setup\n", - "\n", - "In order to use the AmazonKnowledgeBaseRetriever, you need to have an AWS account, where you can manage your indexes and documents. Once you've setup your account, set the following environment variables:\n", - "\n", - "```bash\n", - "process.env.AWS_KNOWLEDGE_BASE_ID=your-knowledge-base-id\n", - "process.env.AWS_ACCESS_KEY_ID=your-access-key-id\n", - "process.env.AWS_SECRET_ACCESS_KEY=your-secret-access-key\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "72ee0c4b-9764-423a-9dbf-95129e185210", - "metadata": {}, - "source": [ - "If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a15d341e-3e26-4ca3-830b-5aab30ed66de", - "metadata": {}, - "outputs": [], - "source": [ - "// process.env.LANGSMITH_API_KEY = \"\";\n", - "// process.env.LANGSMITH_TRACING = \"true\";" - ] - }, - { - "cell_type": "markdown", - "id": "0730d6a1-c893-4840-9817-5e5251676d5d", - "metadata": {}, - "source": [ - "### Installation\n", - "\n", - "This retriever lives in the `@langchain/aws` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/aws @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our retriever:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70cc8e65-2a02-408a-bbc6-8ef649057d82", - "metadata": {}, - "outputs": [], - "source": [ - "import { AmazonKnowledgeBaseRetriever } from \"@langchain/aws\";\n", - "\n", - "const retriever = new AmazonKnowledgeBaseRetriever({\n", - " topK: 10,\n", - " knowledgeBaseId: process.env.AWS_KNOWLEDGE_BASE_ID,\n", - " region: \"us-east-2\",\n", - " clientOptions: {\n", - " credentials: {\n", - " accessKeyId: process.env.AWS_ACCESS_KEY_ID,\n", - " secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,\n", - " },\n", - " },\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "5c5f2839-4020-424e-9fc9-07777eede442", - "metadata": {}, - "source": [ - "## Usage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", - "metadata": {}, - "outputs": [], - "source": [ - "const query = \"...\"\n", - "\n", - "await retriever.invoke(query);" - ] - }, - { - "cell_type": "markdown", - "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", - "metadata": {}, - "source": [ - "## Use within a chain\n", - "\n", - "Like other retrievers, AmazonKnowledgeBaseRetriever can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n", - "\n", - "We will need a LLM or chat model:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25b647a3-f8f2-4541-a289-7a241e43f9df", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`\n", - "Answer the question based only on the context provided.\n", - "\n", - "Context: {context}\n", - "\n", - "Question: {question}`);\n", - "\n", - "const formatDocs = (docs: Document[]) => {\n", - " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", - "}\n", - "\n", - "// See https://js.langchain.com/docs/tutorials/rag\n", - "const ragChain = RunnableSequence.from([\n", - " {\n", - " context: retriever.pipe(formatDocs),\n", - " question: new RunnablePassthrough(),\n", - " },\n", - " prompt,\n", - " llm,\n", - " new StringOutputParser(),\n", - "]);" - ] - }, - { - "cell_type": "markdown", - "id": "22b1d6f8", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "\n", - ":::tip\n", - "\n", - "See [our RAG tutorial](docs/tutorials/rag) for more information and examples on `RunnableSequence`'s like the one above.\n", - "\n", - ":::\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8", - "metadata": {}, - "outputs": [], - "source": [ - "await ragChain.invoke(\"...\")" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all AmazonKnowledgeBaseRetriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.AmazonKnowledgeBaseRetriever.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "typescript", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.4" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/retrievers/chaindesk-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/chaindesk-retriever.mdx index f6fe2743d402..f5fc5f510095 100644 --- a/docs/core_docs/docs/integrations/retrievers/chaindesk-retriever.mdx +++ b/docs/core_docs/docs/integrations/retrievers/chaindesk-retriever.mdx @@ -19,5 +19,5 @@ import Example from "@examples/retrievers/chaindesk.ts"; ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/chatgpt-retriever-plugin.mdx b/docs/core_docs/docs/integrations/retrievers/chatgpt-retriever-plugin.mdx index e9766ddbe0eb..6f332d239414 100644 --- a/docs/core_docs/docs/integrations/retrievers/chatgpt-retriever-plugin.mdx +++ b/docs/core_docs/docs/integrations/retrievers/chatgpt-retriever-plugin.mdx @@ -32,5 +32,5 @@ console.log(docs); ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/dria.mdx b/docs/core_docs/docs/integrations/retrievers/dria.mdx index a060403739d2..5a29ebc004eb 100644 --- a/docs/core_docs/docs/integrations/retrievers/dria.mdx +++ b/docs/core_docs/docs/integrations/retrievers/dria.mdx @@ -40,5 +40,5 @@ import Example from "@examples/retrievers/dria.ts"; ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/exa.ipynb b/docs/core_docs/docs/integrations/retrievers/exa.ipynb index d184f0aafd4b..189159d73db0 100644 --- a/docs/core_docs/docs/integrations/retrievers/exa.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/exa.ipynb @@ -1,355 +1,355 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Exa\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# ExaRetriever\n", - "\n", - "## Overview\n", - "\n", - "[Exa](https://exa.ai/) is a search engine that retrieves relevant content from the web given some input query.\n", - "\n", - "This guide will help you getting started with the Exa [retriever](/docs/concepts/#retrievers). For detailed documentation of all `ExaRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_exa.ExaRetriever.html).\n", - "\n", - "### Integration details\n", - "\n", - "| Retriever | Source | Package |\n", - "| :--- | :--- | :---: |\n", - "[ExaRetriever](https://api.js.langchain.com/classes/langchain_exa.ExaRetriever.html) | Information on the web. | [`@langchain/exa`](https://www.npmjs.com/package/@langchain/exa) |\n", - "\n", - "## Setup\n", - "\n", - "You'll need to set your API key as an environment variable.\n", - "\n", - "The `Exa` class defaults to `EXASEARCH_API_KEY` when searching for your API key.\n", - "\n", - "```typescript\n", - "process.env.EXASEARCH_API_KEY=\"\";\n", - "```\n", - "\n", - "If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGSMITH_API_KEY = \"\";\n", - "// process.env.LANGSMITH_TRACING = \"true\";\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "This retriever lives in the `@langchain/exa` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/exa @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our retriever:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "70cc8e65-2a02-408a-bbc6-8ef649057d82", - "metadata": {}, - "outputs": [], - "source": [ - "import { ExaRetriever } from \"@langchain/exa\";\n", - "import Exa from \"exa-js\";\n", - "\n", - "const retriever = new ExaRetriever({\n", - " // @lc-ts-ignore\n", - " client: new Exa(\n", - " process.env.EXASEARCH_API_KEY // default API key\n", - " ),\n", - " searchArgs: {\n", - " numResults: 2,\n", - " }\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "5c5f2839-4020-424e-9fc9-07777eede442", - "metadata": {}, - "source": [ - "## Usage" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'President Biden’s State of the Union Address\\n' +\n", - " 'Madam Speaker, Madam Vice President, and our First Lady and Second Gentleman, members of Congress and the Cabinet, Justices of the Supreme Court, my fellow Americans: Last year, COVID-19 kept us apart. This year, we’re finally together again.\\n' +\n", - " 'Tonight — tonight we meet as Democrats, Republicans, and independents, but, most importantly, as Americans with a duty to one another, to America, to the American people, and to the Constitution, and an unwavering resolve that freedom will always triumph over tyranny.\\n' +\n", - " 'Six — thank you. Six days ago, Russia’s Vladimir Putin sought to shake the very foundations of the free world, thinking he could make it bend to his menacing ways. But he badly miscalculated. He thought he could roll into Ukraine and the world would roll over. Instead, he met with a wall of strength he never anticipated or imagined. He met the Ukrainian people.\\n' +\n", - " 'From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination literally inspires the world. Groups of citizens blocking tanks with their bodies. Everyone from students to retirees, to teachers turned soldiers defending their homeland.\\n' +\n", - " 'And in this struggle — President Zelenskyy said in his speech to the European Parliament, “Light will win over darkness.”\\n' +\n", - " 'The Ukrainian Ambassador to the United States is here tonight sitting with the First Lady. Let each of us, if you’re able to stand, stand and send an unmistakable signal to the world and Ukraine. Thank you. Thank you, thank you, thank you.\\n' +\n", - " 'She’s bright, she’s strong, and she’s resolved.\\n' +\n", - " 'Yes. We, the United States of America, stand with the Ukrainian people.\\n' +\n", - " 'Throughout our history, we’ve learned this lesson: When dictators do not pay a price for their aggression, they cause more chaos; they keep moving; and the costs, the threats to the America — and America, to the world keeps rising.\\n' +\n", - " 'That’s why the NATO Alliance was created: to secure peace and stability in Europe after World War Two.\\n' +\n", - " 'The United States is a member, along with 29 other nations. It matters. American diplomacy matters. American resolve matters.\\n' +\n", - " 'Putin’s latest attack on Ukraine was premeditated and totally unprovoked. He rejected repeated efforts at diplomacy.\\n' +\n", - " 'He thought the West and NATO wouldn’t respond. He thought he could divide us at home, in this chamber, in this nation. He thought he could divide us in Europe as well.\\n' +\n", - " 'But Putin was wrong. We are ready. We are united. And that’s what we did: We stayed united.\\n' +\n", - " 'We prepared extensively and carefully. We spent months building coalitions of other freedom-loving nations in Europe and the Americas to — from America to the Asian and African continents to confront Putin.\\n' +\n", - " 'Like many of you, I spent countless hours unifying our European Allies.\\n' +\n", - " 'We shared with the world, in advance, what we knew Putin was planning and precisely how he would try to falsely and justify his aggression.\\n' +\n", - " 'We countered Russia’s lies with the truth. And now — now that he’s acted, the free world is holding him accountable, along with 27 members of the European Union — including France, Germany, Italy — as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others. Even Switzerland are inflicting pain on Russia and supporting the people of Ukraine.\\n' +\n", - " 'Putin is now isolated from the world more than he has ever been.\\n' +\n", - " 'Together. Together. Together, along with our Allies, we are right now enforcing powerful economic sanctions. We’re cutting off Russia’s largest banks from the international financial system; preventing Russia’s Central Bank from defending the Russian ruble, making Putin’s $630 billion war fund worthless. We’re choking Russia’s access, we’re choking Russia’s access to technology that will sap its economic strength and weaken its military for years to come.\\n' +\n", - " 'Tonight, I say to the Russian oligarchs and the corrupt leaders who’ve bilked billions of dollars off this violent regime: No more.\\n' +\n", - " 'The United States — I mean it. The United States Department of Justice is assembling a dedicated task force to go after the crimes of the Russian oligarchs.\\n' +\n", - " 'We’re joining with European Allies to find and seize their yachts, their luxury apartments, their private jets. We’re coming for your ill-begotten gains.\\n' +\n", - " 'And, tonight, I’m announcing that we will join our Allies in closing off American air space to all Russian flights, further isolating Russia and adding an additional squeeze on their economy.\\n' +\n", - " 'He has no idea what’s coming.\\n' +\n", - " 'The ruble has already lost 30 percent of its value, the Russian stock market has lost 40 percent of its value, and trading remains suspended.\\n' +\n", - " 'The Russian economy is reeling, and Putin alone is the one to blame.\\n' +\n", - " 'Together with our Allies, we’re providing support to the Ukrainians in their fight for freedom: military assistance, economic assistance, humanitarian assistance. We’re giving more than a billion dollars in direct assistance to Ukraine. And we’ll continue to aid the Ukrainian people as they defend their country and help ease their suffering.\\n' +\n", - " 'But let me be clear: Our forces are not engaged and will not engage in the conflict with Russian forces in Ukraine. Our forces are not going to Europe to fight in Ukraine but to defend our NATO Allies in the event that Putin decides to keep moving west.\\n' +\n", - " 'For that purpose, we have mobilized American ground forces, air squadrons, ship deployments to protect NATO countries, including Poland, Romania, Latvia, Lithuania, and Estonia.\\n' +\n", - " 'And as I’ve made crystal clear, the United States and our Allies will defend every inch of territory that is NATO territory with the full force of our collective power — every single inch.\\n' +\n", - " 'And we’re clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days, weeks, and months will be hard on them.\\n' +\n", - " 'Putin has unleashed violence and chaos. But while he may make gains on the battlefield, he will pay a continuing high price over the long run.\\n' +\n", - " 'And a pound of Ukrainian people — the proud, proud people — pound for pound, ready to fight with every inch of (inaudible) they have. They’ve known 30 years of independence — have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.\\n' +\n", - " 'To all Americans, I’ll be honest with you, as I’ve always promised I would be. A Russian dictator infa- — invading a foreign country has costs around the world. And I’m taking robust action to make sure the pain of our sanctions is targeted at the Russian economy and that we use every tool at our disposal to protect American businesses and consumers.\\n' +\n", - " 'Tonight, I can announce the United States has worked with 30 other countries to release 60 million barrels of oil from reserves around the world. America will lead that effort, releasing 30 million barrels of our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, united with our Allies.\\n' +\n", - " 'These steps will help blunt gas prices here at home. But I know news about what’s happening can seem alarming to all Americans. But I want you to know: We’re going to be okay. We’re going to be okay.\\n' +\n", - " 'When the history of this era is written, Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger.\\n' +\n", - " 'While it shouldn’t and while it shouldn’t have taken something so terrible for people around the world to see what’s at stake, now everyone sees it clearly.\\n' +\n", - " 'We see the unity among leaders of nations, a more unified Europe, a more unified West.\\n' +\n", - " 'We see unity among the people who are gathering in cities in large crowds around the world, even in Russia, to demonstrate their support for the people of Ukraine.\\n' +\n", - " 'In the battle between democracy and autocracies, democracies are rising to the moment and the world is clearly choosing the side of peace and security.\\n' +\n", - " 'This is the real test, and it’s going to take time. So, let us continue to draw inspiration from the iron will of the Ukrainian people.\\n' +\n", - " 'To our fellow Ukrainian Americans who forged a deep bond that connects our two nations: We stand with you. We stand with you.\\n' +\n", - " 'Putin may circle Kyiv with tanks, but he’ll never gain the hearts and souls of Ukrainian people. He’ll never — he’ll never extinguish their love of freedom. And he will never, never weaken the resolve of the free world.\\n' +\n", - " 'We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. The pandemic has been punishing. And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more.\\n' +\n", - " 'I understand, like many of you did. My dad had to leave his home in Scranton, Pennsylvania, to find work. So, like many of you, I grew up in a family when the price of food went up, it was felt throughout the family; it had an impact.\\n' +\n", - " 'That’s why one of the first things I did as President was fight to pass the American Rescue Plan, because people were hurting. We needed to act and we did.\\n' +\n", - " 'American Rescue Plan \\n' +\n", - " 'Few pieces of legislation have done more at a critical moment in our history to lift us out of a crisis. It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief to tens of millions of Americans. It helped put food on the table. Remember those long lines of cars waiting for hours just to get a box of food put in their trunk? It cut the cost of healthcare insurance. And as my dad used to say, it gave the people “just a little bit of breathing room.”\\n' +\n", - " 'And unlike the $2 trillion tax cut passed in the previous administration that benefitted the top 1 percent of Americans, the American Rescue Plan helped working people and left no one behind. And, folks — and it worked. It worked.\\n' +\n", - " 'It worked and created jobs — lots of jobs. In fact, our economy created over 6.5 million new jobs just last year, more jobs in one year than ever before in the history of the United States of America.\\n' +\n", - " 'Economic Progress Report \\n' +\n", - " 'The economy grew at a rate of 5.7 last year — the strongest growth'... 35166 more characters,\n", - " metadata: {\n", - " score: 0.16303963959217072,\n", - " title: '2022 State of the Union Address | The White House',\n", - " id: 'https://www.whitehouse.gov/state-of-the-union-2022/',\n", - " url: 'https://www.whitehouse.gov/state-of-the-union-2022/',\n", - " publishedDate: '2022-02-25',\n", - " author: ''\n", - " },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: \"The President. Thank you all very, very much. Thank you, please. Thank you so much. Madam Speaker, Madam Vice President, and our First Lady and Second Gentleman, Members of Congress and the Cabinet, Justices of the Supreme Court, my fellow Americans: Last year, COVID-19 kept us apart. This year, we're finally together again.\\n\" +\n", - " 'Tonight we meet as Democrats, Republicans, and Independents, but most importantly, as Americans with a duty to one another, to America, to the American people, and to the Constitution, and an unwavering resolve that freedom will always triumph over tyranny.\\n' +\n", - " \"Six—[applause]—thank you. Six days ago, Russia's Vladimir Putin sought to shake the very foundations of the free world, thinking he could make it bend to his menacing ways. But he badly miscalculated. He thought he could roll into Ukraine and the world would roll over. Instead, he met with a wall of strength he never anticipated or imagined. He met the Ukrainian people.\\n\" +\n", - " 'From President Zelenskiy, their—to every Ukrainian, their fearlessness, their courage, their determination literally inspires the world. Groups of citizens blocking tanks with their bodies. Everyone from students to retirees, to teachers turned soldiers defending their homeland. And in this struggle—President Zelenskiy said in his speech to the European Parliament, \"Light will win over darkness.\"\\n' +\n", - " \"The Ukrainian Ambassador to the United States is here tonight sitting with the First Lady. Let each of us, if you're able to stand, stand and send an unmistakable signal to the world and Ukraine. Thank you. Thank you, thank you, thank you. She's bright, she's strong, and she's resolved. Yes. We, the United States of America, stand with the Ukrainian people.\\n\" +\n", - " \"Throughout our history, we've learned this lesson: When dictators do not pay a price for their aggression, they cause more chaos; they keep moving; and the costs, the threats to the America—and America, to the world keeps rising. That's why the NATO alliance was created: to secure peace and stability in Europe after World War II. The United States is a member, along with 29 other nations. It matters. American diplomacy matters. American resolve matters.\\n\" +\n", - " \"Putin's latest attack on Ukraine was premeditated and totally unprovoked. He rejected repeated—repeated—efforts at diplomacy. He thought the West and NATO wouldn't respond. He thought he could divide us at home, in this Chamber, in this Nation. He thought he could divide us in Europe as well.\\n\" +\n", - " \"But Putin was wrong. We are ready. We are united. And that's what we did: We stayed united. We prepared extensively and carefully. We spent months building coalitions of other freedom-loving nations in Europe and the Americas to—from America to the Asian and African continents to confront Putin.\\n\" +\n", - " \"Like many of you, I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsify and justify his aggression. We countered Russia's lies with the truth. And now—now that he's acted, the free world is holding him accountable, along with 27 members of the European Union—including France, Germany, Italy—as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others—even Switzerland—are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than he has ever been.\\n\" +\n", - " \"Together, along with our allies, we are right now enforcing powerful economic sanctions. We're cutting off Russia's largest banks from the international financial system; preventing Russia's Central Bank from defending the Russian ruble, making Putin's $630 billion war fund worthless. We're choking Russia's access to technology that will sap its economic strength and weaken its military for years to come.\\n\" +\n", - " 'Tonight I say to the Russian oligarchs and the corrupt leaders who have bilked billions of dollars off this violent regime: No more. The United States—[applause]—I mean it. The United States Department of Justice is assembling a dedicated task force to go after the crimes of the Russian oligarchs.\\n' +\n", - " \"We're joining with European allies to find and seize their yachts, their luxury apartments, their private jets. We're coming for your ill-begotten gains. And tonight I'm announcing that we will join our allies in closing off American air space to all Russian flights, further isolating Russia and adding an additional squeeze on their economy.\\n\" +\n", - " \"He has no idea what's coming. The ruble has already lost 30 percent of its value, the Russian stock market has lost 40 percent of its value, and trading remains suspended. The Russian economy is reeling, and Putin alone is the one to blame.\\n\" +\n", - " \"Together with our allies, we're providing support to the Ukrainians in their fight for freedom: military assistance, economic assistance, humanitarian assistance. We're giving more than a billion dollars in direct assistance to Ukraine. And we'll continue to aid the Ukrainian people as they defend their country and help ease their suffering.\\n\" +\n", - " \"But let me be clear: Our Forces are not engaged and will not engage in the conflict with Russian forces in Ukraine. Our Forces are not going to Europe to fight [in]* Ukraine but to defend our NATO allies in the event that Putin decides to keep moving west. For that purpose, we have mobilized American ground forces, air squadrons, ship deployments to protect NATO countries, including Poland, Romania, Latvia, Lithuania, and Estonia. And as I've made crystal clear, the United States and our allies will defend every inch of territory that is NATO territory with the full force of our collective power—every single inch.\\n\" +\n", - " \"And we're clear eyed. The Ukrainians are fighting back with pure courage. But the next few days, weeks, and months will be hard on them. Putin has unleashed violence and chaos. But while he may make gains on the battlefield, he'll pay a continuing high price over the long run. And a pound of Ukrainian people—the proud, proud people—pound for pound, ready to fight with every inch of energy they have. They've known 30 years of independence—have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.\\n\" +\n", - " \"To all Americans, I'll be honest with you, as I've always promised I would be. A Russian dictator invading a foreign country has costs around the world. And I'm taking robust action to make sure the pain of our sanctions is targeted at Russian economy and that we use every tool at our disposal to protect American businesses and consumers.\\n\" +\n", - " 'Tonight I can announce the United States has worked with 30 other countries to release 60 million barrels of oil from reserves around the world. America will lead that effort, releasing 30 million barrels of our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, united with our allies.\\n' +\n", - " \"These steps will help blunt gas prices here at home. But I know news about what's happening can seem alarming to all Americans. But I want you to know: We're going to be okay. We're going to be okay.\\n\" +\n", - " \"When the history of this era is written, Putin's war on Ukraine will have left Russia weaker and the rest of the world stronger.\\n\" +\n", - " \"While it shouldn't have taken something so terrible for people around the world to see what's at stake, now everyone sees it clearly. We see the unity among leaders of nations, a more unified Europe, a more unified West. We see unity among the people who are gathering in cities in large crowds around the world, even in Russia, to demonstrate their support for the people of Ukraine.\\n\" +\n", - " \"In the battle between democracy and autocracies, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. This is the real test, and it's going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people.\\n\" +\n", - " \"To our fellow Ukrainian Americans who forged a deep bond that connects our two nations: We stand with you. We stand with you. Putin may circle Kiev with tanks, but he'll never gain the hearts and souls of the Uranian [Ukrainian]* people. He'll never extinguish their love of freedom. And he will never, never weaken the resolve of the free world.\\n\" +\n", - " 'We meet tonight in an America that has lived through 2 of the hardest years this Nation has ever faced. The pandemic has been punishing. And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more.\\n' +\n", - " \"I understand, like many of you did. My dad had to leave his home in Scranton, Pennsylvania, to find work. So, like many of you, I grew up in a family when the price of food went up, it was felt throughout the family; it had an impact. That's why one of the first things I did as President was fight to pass the American Rescue Plan, because people were hurting. We needed to act, and we did.\\n\" +\n", - " 'Few pieces of legislation have done more at a critical moment in our history to lift us out of a crisis. It fueled our efforts to vaccinate the Nation and combat COVID-19. It delivered immediate economic relief to tens of millions of Americans. It helped put food on the table. Remember those long lines of cars waiting for hours just to get a box of food put in their trunk? It cut the cost of health care insurance. And as my dad used to say, it gave the people \"just a little bit of breathing room.\"\\n' +\n", - " 'And unlike the $2 trillion tax cut passed in the previous administration that benefited the top 1 percent of Americans, the American Rescue Plan——\\n' +\n", - " ' Audience members. Boo!\\n' +\n", - " ' The President. ——the American Rescue Plan helped working people and left no one behind. And, folks—and it worked. It worked. It worked and created jobs, lots of jobs. In fact, our economy created over 6.5 million new jobs just last year, more jobs in 1 year than ever before in the history of the United States of America. The economy grew at a rate of 5.7 last year, the strongest growth rate in 40 years and the first step in'... 35254 more characters,\n", - " metadata: {\n", - " score: 0.16301880776882172,\n", - " title: 'Address Before a Joint Session of the Congress on the State of the Union',\n", - " id: 'https://www.presidency.ucsb.edu/documents/address-before-joint-session-the-congress-the-state-the-union-28',\n", - " url: 'https://www.presidency.ucsb.edu/documents/address-before-joint-session-the-congress-the-state-the-union-28',\n", - " publishedDate: '2022-03-01',\n", - " author: ''\n", - " },\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const query = \"What did the speaker say about Justice Breyer in the 2022 State of the Union?\";\n", - "\n", - "await retriever.invoke(query);" - ] - }, - { - "cell_type": "markdown", - "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", - "metadata": {}, - "source": [ - "## Use within a chain\n", - "\n", - "Like other retrievers, ExaRetriever can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n", - "\n", - "We will need a LLM or chat model:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "25b647a3-f8f2-4541-a289-7a241e43f9df", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`\n", - "Answer the question based only on the context provided.\n", - "\n", - "Context: {context}\n", - "\n", - "Question: {question}`);\n", - "\n", - "const formatDocs = (docs: Document[]) => {\n", - " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", - "}\n", - "\n", - "// See https://js.langchain.com/docs/tutorials/rag\n", - "const ragChain = RunnableSequence.from([\n", - " {\n", - " context: retriever.pipe(formatDocs),\n", - " question: new RunnablePassthrough(),\n", - " },\n", - " prompt,\n", - " llm,\n", - " new StringOutputParser(),\n", - "]);" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Exa\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "In the 2022 State of the Union Address, the speaker, President Biden, honored Justice Breyer, describing him as someone who has dedicated his life to serve the country. He acknowledged Justice Breyer as an Army veteran and a constitutional scholar, and he expressed gratitude for his service. President Biden also mentioned that one of the most serious constitutional responsibilities of a President is nominating someone to serve on the United States Supreme Court, and he highlighted his nomination of Ketanji Brown Jackson to succeed Justice Breyer.\n" - ] + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ExaRetriever\n", + "\n", + "## Overview\n", + "\n", + "[Exa](https://exa.ai/) is a search engine that retrieves relevant content from the web given some input query.\n", + "\n", + "This guide will help you getting started with the Exa [retriever](/docs/concepts/retrievers). For detailed documentation of all `ExaRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_exa.ExaRetriever.html).\n", + "\n", + "### Integration details\n", + "\n", + "| Retriever | Source | Package |\n", + "| :--- | :--- | :---: |\n", + "[ExaRetriever](https://api.js.langchain.com/classes/langchain_exa.ExaRetriever.html) | Information on the web. | [`@langchain/exa`](https://www.npmjs.com/package/@langchain/exa) |\n", + "\n", + "## Setup\n", + "\n", + "You'll need to set your API key as an environment variable.\n", + "\n", + "The `Exa` class defaults to `EXASEARCH_API_KEY` when searching for your API key.\n", + "\n", + "```typescript\n", + "process.env.EXASEARCH_API_KEY=\"\";\n", + "```\n", + "\n", + "If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGSMITH_API_KEY = \"\";\n", + "// process.env.LANGSMITH_TRACING = \"true\";\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "This retriever lives in the `@langchain/exa` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/exa @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our retriever:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "70cc8e65-2a02-408a-bbc6-8ef649057d82", + "metadata": {}, + "outputs": [], + "source": [ + "import { ExaRetriever } from \"@langchain/exa\";\n", + "import Exa from \"exa-js\";\n", + "\n", + "const retriever = new ExaRetriever({\n", + " // @lc-ts-ignore\n", + " client: new Exa(\n", + " process.env.EXASEARCH_API_KEY // default API key\n", + " ),\n", + " searchArgs: {\n", + " numResults: 2,\n", + " }\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "5c5f2839-4020-424e-9fc9-07777eede442", + "metadata": {}, + "source": [ + "## Usage" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'President Biden’s State of the Union Address\\n' +\n", + " 'Madam Speaker, Madam Vice President, and our First Lady and Second Gentleman, members of Congress and the Cabinet, Justices of the Supreme Court, my fellow Americans: Last year, COVID-19 kept us apart. This year, we’re finally together again.\\n' +\n", + " 'Tonight — tonight we meet as Democrats, Republicans, and independents, but, most importantly, as Americans with a duty to one another, to America, to the American people, and to the Constitution, and an unwavering resolve that freedom will always triumph over tyranny.\\n' +\n", + " 'Six — thank you. Six days ago, Russia’s Vladimir Putin sought to shake the very foundations of the free world, thinking he could make it bend to his menacing ways. But he badly miscalculated. He thought he could roll into Ukraine and the world would roll over. Instead, he met with a wall of strength he never anticipated or imagined. He met the Ukrainian people.\\n' +\n", + " 'From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination literally inspires the world. Groups of citizens blocking tanks with their bodies. Everyone from students to retirees, to teachers turned soldiers defending their homeland.\\n' +\n", + " 'And in this struggle — President Zelenskyy said in his speech to the European Parliament, “Light will win over darkness.”\\n' +\n", + " 'The Ukrainian Ambassador to the United States is here tonight sitting with the First Lady. Let each of us, if you’re able to stand, stand and send an unmistakable signal to the world and Ukraine. Thank you. Thank you, thank you, thank you.\\n' +\n", + " 'She’s bright, she’s strong, and she’s resolved.\\n' +\n", + " 'Yes. We, the United States of America, stand with the Ukrainian people.\\n' +\n", + " 'Throughout our history, we’ve learned this lesson: When dictators do not pay a price for their aggression, they cause more chaos; they keep moving; and the costs, the threats to the America — and America, to the world keeps rising.\\n' +\n", + " 'That’s why the NATO Alliance was created: to secure peace and stability in Europe after World War Two.\\n' +\n", + " 'The United States is a member, along with 29 other nations. It matters. American diplomacy matters. American resolve matters.\\n' +\n", + " 'Putin’s latest attack on Ukraine was premeditated and totally unprovoked. He rejected repeated efforts at diplomacy.\\n' +\n", + " 'He thought the West and NATO wouldn’t respond. He thought he could divide us at home, in this chamber, in this nation. He thought he could divide us in Europe as well.\\n' +\n", + " 'But Putin was wrong. We are ready. We are united. And that’s what we did: We stayed united.\\n' +\n", + " 'We prepared extensively and carefully. We spent months building coalitions of other freedom-loving nations in Europe and the Americas to — from America to the Asian and African continents to confront Putin.\\n' +\n", + " 'Like many of you, I spent countless hours unifying our European Allies.\\n' +\n", + " 'We shared with the world, in advance, what we knew Putin was planning and precisely how he would try to falsely and justify his aggression.\\n' +\n", + " 'We countered Russia’s lies with the truth. And now — now that he’s acted, the free world is holding him accountable, along with 27 members of the European Union — including France, Germany, Italy — as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others. Even Switzerland are inflicting pain on Russia and supporting the people of Ukraine.\\n' +\n", + " 'Putin is now isolated from the world more than he has ever been.\\n' +\n", + " 'Together. Together. Together, along with our Allies, we are right now enforcing powerful economic sanctions. We’re cutting off Russia’s largest banks from the international financial system; preventing Russia’s Central Bank from defending the Russian ruble, making Putin’s $630 billion war fund worthless. We’re choking Russia’s access, we’re choking Russia’s access to technology that will sap its economic strength and weaken its military for years to come.\\n' +\n", + " 'Tonight, I say to the Russian oligarchs and the corrupt leaders who’ve bilked billions of dollars off this violent regime: No more.\\n' +\n", + " 'The United States — I mean it. The United States Department of Justice is assembling a dedicated task force to go after the crimes of the Russian oligarchs.\\n' +\n", + " 'We’re joining with European Allies to find and seize their yachts, their luxury apartments, their private jets. We’re coming for your ill-begotten gains.\\n' +\n", + " 'And, tonight, I’m announcing that we will join our Allies in closing off American air space to all Russian flights, further isolating Russia and adding an additional squeeze on their economy.\\n' +\n", + " 'He has no idea what’s coming.\\n' +\n", + " 'The ruble has already lost 30 percent of its value, the Russian stock market has lost 40 percent of its value, and trading remains suspended.\\n' +\n", + " 'The Russian economy is reeling, and Putin alone is the one to blame.\\n' +\n", + " 'Together with our Allies, we’re providing support to the Ukrainians in their fight for freedom: military assistance, economic assistance, humanitarian assistance. We’re giving more than a billion dollars in direct assistance to Ukraine. And we’ll continue to aid the Ukrainian people as they defend their country and help ease their suffering.\\n' +\n", + " 'But let me be clear: Our forces are not engaged and will not engage in the conflict with Russian forces in Ukraine. Our forces are not going to Europe to fight in Ukraine but to defend our NATO Allies in the event that Putin decides to keep moving west.\\n' +\n", + " 'For that purpose, we have mobilized American ground forces, air squadrons, ship deployments to protect NATO countries, including Poland, Romania, Latvia, Lithuania, and Estonia.\\n' +\n", + " 'And as I’ve made crystal clear, the United States and our Allies will defend every inch of territory that is NATO territory with the full force of our collective power — every single inch.\\n' +\n", + " 'And we’re clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days, weeks, and months will be hard on them.\\n' +\n", + " 'Putin has unleashed violence and chaos. But while he may make gains on the battlefield, he will pay a continuing high price over the long run.\\n' +\n", + " 'And a pound of Ukrainian people — the proud, proud people — pound for pound, ready to fight with every inch of (inaudible) they have. They’ve known 30 years of independence — have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.\\n' +\n", + " 'To all Americans, I’ll be honest with you, as I’ve always promised I would be. A Russian dictator infa- — invading a foreign country has costs around the world. And I’m taking robust action to make sure the pain of our sanctions is targeted at the Russian economy and that we use every tool at our disposal to protect American businesses and consumers.\\n' +\n", + " 'Tonight, I can announce the United States has worked with 30 other countries to release 60 million barrels of oil from reserves around the world. America will lead that effort, releasing 30 million barrels of our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, united with our Allies.\\n' +\n", + " 'These steps will help blunt gas prices here at home. But I know news about what’s happening can seem alarming to all Americans. But I want you to know: We’re going to be okay. We’re going to be okay.\\n' +\n", + " 'When the history of this era is written, Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger.\\n' +\n", + " 'While it shouldn’t and while it shouldn’t have taken something so terrible for people around the world to see what’s at stake, now everyone sees it clearly.\\n' +\n", + " 'We see the unity among leaders of nations, a more unified Europe, a more unified West.\\n' +\n", + " 'We see unity among the people who are gathering in cities in large crowds around the world, even in Russia, to demonstrate their support for the people of Ukraine.\\n' +\n", + " 'In the battle between democracy and autocracies, democracies are rising to the moment and the world is clearly choosing the side of peace and security.\\n' +\n", + " 'This is the real test, and it’s going to take time. So, let us continue to draw inspiration from the iron will of the Ukrainian people.\\n' +\n", + " 'To our fellow Ukrainian Americans who forged a deep bond that connects our two nations: We stand with you. We stand with you.\\n' +\n", + " 'Putin may circle Kyiv with tanks, but he’ll never gain the hearts and souls of Ukrainian people. He’ll never — he’ll never extinguish their love of freedom. And he will never, never weaken the resolve of the free world.\\n' +\n", + " 'We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. The pandemic has been punishing. And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more.\\n' +\n", + " 'I understand, like many of you did. My dad had to leave his home in Scranton, Pennsylvania, to find work. So, like many of you, I grew up in a family when the price of food went up, it was felt throughout the family; it had an impact.\\n' +\n", + " 'That’s why one of the first things I did as President was fight to pass the American Rescue Plan, because people were hurting. We needed to act and we did.\\n' +\n", + " 'American Rescue Plan \\n' +\n", + " 'Few pieces of legislation have done more at a critical moment in our history to lift us out of a crisis. It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief to tens of millions of Americans. It helped put food on the table. Remember those long lines of cars waiting for hours just to get a box of food put in their trunk? It cut the cost of healthcare insurance. And as my dad used to say, it gave the people “just a little bit of breathing room.”\\n' +\n", + " 'And unlike the $2 trillion tax cut passed in the previous administration that benefitted the top 1 percent of Americans, the American Rescue Plan helped working people and left no one behind. And, folks — and it worked. It worked.\\n' +\n", + " 'It worked and created jobs — lots of jobs. In fact, our economy created over 6.5 million new jobs just last year, more jobs in one year than ever before in the history of the United States of America.\\n' +\n", + " 'Economic Progress Report \\n' +\n", + " 'The economy grew at a rate of 5.7 last year — the strongest growth'... 35166 more characters,\n", + " metadata: {\n", + " score: 0.16303963959217072,\n", + " title: '2022 State of the Union Address | The White House',\n", + " id: 'https://www.whitehouse.gov/state-of-the-union-2022/',\n", + " url: 'https://www.whitehouse.gov/state-of-the-union-2022/',\n", + " publishedDate: '2022-02-25',\n", + " author: ''\n", + " },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: \"The President. Thank you all very, very much. Thank you, please. Thank you so much. Madam Speaker, Madam Vice President, and our First Lady and Second Gentleman, Members of Congress and the Cabinet, Justices of the Supreme Court, my fellow Americans: Last year, COVID-19 kept us apart. This year, we're finally together again.\\n\" +\n", + " 'Tonight we meet as Democrats, Republicans, and Independents, but most importantly, as Americans with a duty to one another, to America, to the American people, and to the Constitution, and an unwavering resolve that freedom will always triumph over tyranny.\\n' +\n", + " \"Six—[applause]—thank you. Six days ago, Russia's Vladimir Putin sought to shake the very foundations of the free world, thinking he could make it bend to his menacing ways. But he badly miscalculated. He thought he could roll into Ukraine and the world would roll over. Instead, he met with a wall of strength he never anticipated or imagined. He met the Ukrainian people.\\n\" +\n", + " 'From President Zelenskiy, their—to every Ukrainian, their fearlessness, their courage, their determination literally inspires the world. Groups of citizens blocking tanks with their bodies. Everyone from students to retirees, to teachers turned soldiers defending their homeland. And in this struggle—President Zelenskiy said in his speech to the European Parliament, \"Light will win over darkness.\"\\n' +\n", + " \"The Ukrainian Ambassador to the United States is here tonight sitting with the First Lady. Let each of us, if you're able to stand, stand and send an unmistakable signal to the world and Ukraine. Thank you. Thank you, thank you, thank you. She's bright, she's strong, and she's resolved. Yes. We, the United States of America, stand with the Ukrainian people.\\n\" +\n", + " \"Throughout our history, we've learned this lesson: When dictators do not pay a price for their aggression, they cause more chaos; they keep moving; and the costs, the threats to the America—and America, to the world keeps rising. That's why the NATO alliance was created: to secure peace and stability in Europe after World War II. The United States is a member, along with 29 other nations. It matters. American diplomacy matters. American resolve matters.\\n\" +\n", + " \"Putin's latest attack on Ukraine was premeditated and totally unprovoked. He rejected repeated—repeated—efforts at diplomacy. He thought the West and NATO wouldn't respond. He thought he could divide us at home, in this Chamber, in this Nation. He thought he could divide us in Europe as well.\\n\" +\n", + " \"But Putin was wrong. We are ready. We are united. And that's what we did: We stayed united. We prepared extensively and carefully. We spent months building coalitions of other freedom-loving nations in Europe and the Americas to—from America to the Asian and African continents to confront Putin.\\n\" +\n", + " \"Like many of you, I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsify and justify his aggression. We countered Russia's lies with the truth. And now—now that he's acted, the free world is holding him accountable, along with 27 members of the European Union—including France, Germany, Italy—as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others—even Switzerland—are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than he has ever been.\\n\" +\n", + " \"Together, along with our allies, we are right now enforcing powerful economic sanctions. We're cutting off Russia's largest banks from the international financial system; preventing Russia's Central Bank from defending the Russian ruble, making Putin's $630 billion war fund worthless. We're choking Russia's access to technology that will sap its economic strength and weaken its military for years to come.\\n\" +\n", + " 'Tonight I say to the Russian oligarchs and the corrupt leaders who have bilked billions of dollars off this violent regime: No more. The United States—[applause]—I mean it. The United States Department of Justice is assembling a dedicated task force to go after the crimes of the Russian oligarchs.\\n' +\n", + " \"We're joining with European allies to find and seize their yachts, their luxury apartments, their private jets. We're coming for your ill-begotten gains. And tonight I'm announcing that we will join our allies in closing off American air space to all Russian flights, further isolating Russia and adding an additional squeeze on their economy.\\n\" +\n", + " \"He has no idea what's coming. The ruble has already lost 30 percent of its value, the Russian stock market has lost 40 percent of its value, and trading remains suspended. The Russian economy is reeling, and Putin alone is the one to blame.\\n\" +\n", + " \"Together with our allies, we're providing support to the Ukrainians in their fight for freedom: military assistance, economic assistance, humanitarian assistance. We're giving more than a billion dollars in direct assistance to Ukraine. And we'll continue to aid the Ukrainian people as they defend their country and help ease their suffering.\\n\" +\n", + " \"But let me be clear: Our Forces are not engaged and will not engage in the conflict with Russian forces in Ukraine. Our Forces are not going to Europe to fight [in]* Ukraine but to defend our NATO allies in the event that Putin decides to keep moving west. For that purpose, we have mobilized American ground forces, air squadrons, ship deployments to protect NATO countries, including Poland, Romania, Latvia, Lithuania, and Estonia. And as I've made crystal clear, the United States and our allies will defend every inch of territory that is NATO territory with the full force of our collective power—every single inch.\\n\" +\n", + " \"And we're clear eyed. The Ukrainians are fighting back with pure courage. But the next few days, weeks, and months will be hard on them. Putin has unleashed violence and chaos. But while he may make gains on the battlefield, he'll pay a continuing high price over the long run. And a pound of Ukrainian people—the proud, proud people—pound for pound, ready to fight with every inch of energy they have. They've known 30 years of independence—have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.\\n\" +\n", + " \"To all Americans, I'll be honest with you, as I've always promised I would be. A Russian dictator invading a foreign country has costs around the world. And I'm taking robust action to make sure the pain of our sanctions is targeted at Russian economy and that we use every tool at our disposal to protect American businesses and consumers.\\n\" +\n", + " 'Tonight I can announce the United States has worked with 30 other countries to release 60 million barrels of oil from reserves around the world. America will lead that effort, releasing 30 million barrels of our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, united with our allies.\\n' +\n", + " \"These steps will help blunt gas prices here at home. But I know news about what's happening can seem alarming to all Americans. But I want you to know: We're going to be okay. We're going to be okay.\\n\" +\n", + " \"When the history of this era is written, Putin's war on Ukraine will have left Russia weaker and the rest of the world stronger.\\n\" +\n", + " \"While it shouldn't have taken something so terrible for people around the world to see what's at stake, now everyone sees it clearly. We see the unity among leaders of nations, a more unified Europe, a more unified West. We see unity among the people who are gathering in cities in large crowds around the world, even in Russia, to demonstrate their support for the people of Ukraine.\\n\" +\n", + " \"In the battle between democracy and autocracies, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. This is the real test, and it's going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people.\\n\" +\n", + " \"To our fellow Ukrainian Americans who forged a deep bond that connects our two nations: We stand with you. We stand with you. Putin may circle Kiev with tanks, but he'll never gain the hearts and souls of the Uranian [Ukrainian]* people. He'll never extinguish their love of freedom. And he will never, never weaken the resolve of the free world.\\n\" +\n", + " 'We meet tonight in an America that has lived through 2 of the hardest years this Nation has ever faced. The pandemic has been punishing. And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more.\\n' +\n", + " \"I understand, like many of you did. My dad had to leave his home in Scranton, Pennsylvania, to find work. So, like many of you, I grew up in a family when the price of food went up, it was felt throughout the family; it had an impact. That's why one of the first things I did as President was fight to pass the American Rescue Plan, because people were hurting. We needed to act, and we did.\\n\" +\n", + " 'Few pieces of legislation have done more at a critical moment in our history to lift us out of a crisis. It fueled our efforts to vaccinate the Nation and combat COVID-19. It delivered immediate economic relief to tens of millions of Americans. It helped put food on the table. Remember those long lines of cars waiting for hours just to get a box of food put in their trunk? It cut the cost of health care insurance. And as my dad used to say, it gave the people \"just a little bit of breathing room.\"\\n' +\n", + " 'And unlike the $2 trillion tax cut passed in the previous administration that benefited the top 1 percent of Americans, the American Rescue Plan——\\n' +\n", + " ' Audience members. Boo!\\n' +\n", + " ' The President. ——the American Rescue Plan helped working people and left no one behind. And, folks—and it worked. It worked. It worked and created jobs, lots of jobs. In fact, our economy created over 6.5 million new jobs just last year, more jobs in 1 year than ever before in the history of the United States of America. The economy grew at a rate of 5.7 last year, the strongest growth rate in 40 years and the first step in'... 35254 more characters,\n", + " metadata: {\n", + " score: 0.16301880776882172,\n", + " title: 'Address Before a Joint Session of the Congress on the State of the Union',\n", + " id: 'https://www.presidency.ucsb.edu/documents/address-before-joint-session-the-congress-the-state-the-union-28',\n", + " url: 'https://www.presidency.ucsb.edu/documents/address-before-joint-session-the-congress-the-state-the-union-28',\n", + " publishedDate: '2022-03-01',\n", + " author: ''\n", + " },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const query = \"What did the speaker say about Justice Breyer in the 2022 State of the Union?\";\n", + "\n", + "await retriever.invoke(query);" + ] + }, + { + "cell_type": "markdown", + "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", + "metadata": {}, + "source": [ + "## Use within a chain\n", + "\n", + "Like other retrievers, ExaRetriever can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n", + "\n", + "We will need a LLM or chat model:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "25b647a3-f8f2-4541-a289-7a241e43f9df", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`\n", + "Answer the question based only on the context provided.\n", + "\n", + "Context: {context}\n", + "\n", + "Question: {question}`);\n", + "\n", + "const formatDocs = (docs: Document[]) => {\n", + " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", + "}\n", + "\n", + "// See https://js.langchain.com/docs/tutorials/rag\n", + "const ragChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocs),\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser(),\n", + "]);" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "In the 2022 State of the Union Address, the speaker, President Biden, honored Justice Breyer, describing him as someone who has dedicated his life to serve the country. He acknowledged Justice Breyer as an Army veteran and a constitutional scholar, and he expressed gratitude for his service. President Biden also mentioned that one of the most serious constitutional responsibilities of a President is nominating someone to serve on the United States Supreme Court, and he highlighted his nomination of Ketanji Brown Jackson to succeed Justice Breyer.\n" + ] + } + ], + "source": [ + "await ragChain.invoke(\"What did the speaker say about Justice Breyer in the 2022 State of the Union?\");" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ExaRetriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_exa.ExaRetriever.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "await ragChain.invoke(\"What did the speaker say about Justice Breyer in the 2022 State of the Union?\");" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all ExaRetriever features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_exa.ExaRetriever.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/retrievers/hyde.mdx b/docs/core_docs/docs/integrations/retrievers/hyde.mdx index 6055bd5578e8..55208d8a9872 100644 --- a/docs/core_docs/docs/integrations/retrievers/hyde.mdx +++ b/docs/core_docs/docs/integrations/retrievers/hyde.mdx @@ -27,5 +27,5 @@ npm install @langchain/openai @langchain/core ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/index.mdx b/docs/core_docs/docs/integrations/retrievers/index.mdx index 72fd489c13ae..2a1f7c2b7c2b 100644 --- a/docs/core_docs/docs/integrations/retrievers/index.mdx +++ b/docs/core_docs/docs/integrations/retrievers/index.mdx @@ -7,7 +7,7 @@ import { CategoryTable, IndexTable } from "@theme/FeatureTables"; # Retrievers -A [retriever](/docs/concepts/#retrievers) is an interface that returns documents given an unstructured query. +A [retriever](/docs/concepts/retrievers) is an interface that returns documents given an unstructured query. It is more general than a vector store. A retriever does not need to be able to store documents, only to return (or retrieve) them. diff --git a/docs/core_docs/docs/integrations/retrievers/kendra-retriever.ipynb b/docs/core_docs/docs/integrations/retrievers/kendra-retriever.ipynb index 78496bb51ef7..76a0af4aa17c 100644 --- a/docs/core_docs/docs/integrations/retrievers/kendra-retriever.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/kendra-retriever.ipynb @@ -1,233 +1,233 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Amazon Kendra Retriever\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# AWSKendraRetriever\n", + "\n", + "## Overview\n", + "\n", + "[Amazon Kendra](https://aws.amazon.com/kendra/) is an intelligent search service provided by Amazon Web Services (AWS).\n", + "It utilizes advanced natural language processing (NLP) and machine learning algorithms to enable powerful search capabilities across various data sources within an organization.\n", + "Kendra is designed to help users find the information they need quickly and accurately, improving productivity and decision-making.\n", + "\n", + "With Kendra, users can search across a wide range of content types, including documents, FAQs, knowledge bases, manuals, and websites.\n", + "It supports multiple languages and can understand complex queries, synonyms, and contextual meanings to provide highly relevant search results.\n", + "\n", + "This will help you getting started with the Amazon Kendra [`retriever`](/docs/concepts/retrievers). For detailed documentation of all `AWSKendraRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.AmazonKendraRetriever.html).\n", + "\n", + "### Integration details\n", + "\n", + "| Retriever | Source | Package |\n", + "| :--- | :--- | :---: |\n", + "[AWSKendraRetriever](https://api.js.langchain.com/classes/langchain_aws.AmazonKendraRetriever.html) | Various AWS resources | [`@langchain/aws`](https://www.npmjs.com/package/@langchain/aws) |\n", + "\n", + "## Setup\n", + "\n", + "You'll need an AWS account and an Amazon Kendra instance to get started. See this [tutorial](https://docs.aws.amazon.com/kendra/latest/dg/getting-started.html) from AWS for more information.\n", + "\n", + "If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGSMITH_API_KEY = \"\";\n", + "// process.env.LANGSMITH_TRACING = \"true\";\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "This retriever lives in the `@langchain/aws` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/aws @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our retriever:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70cc8e65-2a02-408a-bbc6-8ef649057d82", + "metadata": {}, + "outputs": [], + "source": [ + "import { AmazonKendraRetriever } from \"@langchain/aws\";\n", + "\n", + "const retriever = new AmazonKendraRetriever({\n", + " topK: 10,\n", + " indexId: \"YOUR_INDEX_ID\",\n", + " region: \"us-east-2\", // Your region\n", + " clientOptions: {\n", + " credentials: {\n", + " accessKeyId: \"YOUR_ACCESS_KEY_ID\",\n", + " secretAccessKey: \"YOUR_SECRET_ACCESS_KEY\",\n", + " },\n", + " },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "5c5f2839-4020-424e-9fc9-07777eede442", + "metadata": {}, + "source": [ + "## Usage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", + "metadata": {}, + "outputs": [], + "source": [ + "const query = \"...\"\n", + "\n", + "await retriever.invoke(query);" + ] + }, + { + "cell_type": "markdown", + "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", + "metadata": {}, + "source": [ + "## Use within a chain\n", + "\n", + "Like other retrievers, the `AWSKendraRetriever` can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n", + "\n", + "We will need a LLM or chat model:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25b647a3-f8f2-4541-a289-7a241e43f9df", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`\n", + "Answer the question based only on the context provided.\n", + "\n", + "Context: {context}\n", + "\n", + "Question: {question}`);\n", + "\n", + "const formatDocs = (docs: Document[]) => {\n", + " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", + "}\n", + "\n", + "// See https://js.langchain.com/docs/tutorials/rag\n", + "const ragChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocs),\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser(),\n", + "]);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8", + "metadata": {}, + "outputs": [], + "source": [ + "await ragChain.invoke(query);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `AmazonKendraRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.AmazonKendraRetriever.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "typescript", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.4" } - }, - "source": [ - "---\n", - "sidebar_label: Amazon Kendra Retriever\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# AWSKendraRetriever\n", - "\n", - "## Overview\n", - "\n", - "[Amazon Kendra](https://aws.amazon.com/kendra/) is an intelligent search service provided by Amazon Web Services (AWS).\n", - "It utilizes advanced natural language processing (NLP) and machine learning algorithms to enable powerful search capabilities across various data sources within an organization.\n", - "Kendra is designed to help users find the information they need quickly and accurately, improving productivity and decision-making.\n", - "\n", - "With Kendra, users can search across a wide range of content types, including documents, FAQs, knowledge bases, manuals, and websites.\n", - "It supports multiple languages and can understand complex queries, synonyms, and contextual meanings to provide highly relevant search results.\n", - "\n", - "This will help you getting started with the Amazon Kendra [`retriever`](/docs/concepts/#retrievers). For detailed documentation of all `AWSKendraRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.AmazonKendraRetriever.html).\n", - "\n", - "### Integration details\n", - "\n", - "| Retriever | Source | Package |\n", - "| :--- | :--- | :---: |\n", - "[AWSKendraRetriever](https://api.js.langchain.com/classes/langchain_aws.AmazonKendraRetriever.html) | Various AWS resources | [`@langchain/aws`](https://www.npmjs.com/package/@langchain/aws) |\n", - "\n", - "## Setup\n", - "\n", - "You'll need an AWS account and an Amazon Kendra instance to get started. See this [tutorial](https://docs.aws.amazon.com/kendra/latest/dg/getting-started.html) from AWS for more information.\n", - "\n", - "If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGSMITH_API_KEY = \"\";\n", - "// process.env.LANGSMITH_TRACING = \"true\";\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "This retriever lives in the `@langchain/aws` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/aws @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our retriever:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70cc8e65-2a02-408a-bbc6-8ef649057d82", - "metadata": {}, - "outputs": [], - "source": [ - "import { AmazonKendraRetriever } from \"@langchain/aws\";\n", - "\n", - "const retriever = new AmazonKendraRetriever({\n", - " topK: 10,\n", - " indexId: \"YOUR_INDEX_ID\",\n", - " region: \"us-east-2\", // Your region\n", - " clientOptions: {\n", - " credentials: {\n", - " accessKeyId: \"YOUR_ACCESS_KEY_ID\",\n", - " secretAccessKey: \"YOUR_SECRET_ACCESS_KEY\",\n", - " },\n", - " },\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "5c5f2839-4020-424e-9fc9-07777eede442", - "metadata": {}, - "source": [ - "## Usage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", - "metadata": {}, - "outputs": [], - "source": [ - "const query = \"...\"\n", - "\n", - "await retriever.invoke(query);" - ] - }, - { - "cell_type": "markdown", - "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", - "metadata": {}, - "source": [ - "## Use within a chain\n", - "\n", - "Like other retrievers, the `AWSKendraRetriever` can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n", - "\n", - "We will need a LLM or chat model:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25b647a3-f8f2-4541-a289-7a241e43f9df", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`\n", - "Answer the question based only on the context provided.\n", - "\n", - "Context: {context}\n", - "\n", - "Question: {question}`);\n", - "\n", - "const formatDocs = (docs: Document[]) => {\n", - " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", - "}\n", - "\n", - "// See https://js.langchain.com/docs/tutorials/rag\n", - "const ragChain = RunnableSequence.from([\n", - " {\n", - " context: retriever.pipe(formatDocs),\n", - " question: new RunnablePassthrough(),\n", - " },\n", - " prompt,\n", - " llm,\n", - " new StringOutputParser(),\n", - "]);" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8", - "metadata": {}, - "outputs": [], - "source": [ - "await ragChain.invoke(query);" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `AmazonKendraRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_aws.AmazonKendraRetriever.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "typescript", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.4" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/retrievers/metal-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/metal-retriever.mdx index 7b9e05721d70..2598e42441dc 100644 --- a/docs/core_docs/docs/integrations/retrievers/metal-retriever.mdx +++ b/docs/core_docs/docs/integrations/retrievers/metal-retriever.mdx @@ -25,5 +25,5 @@ import Example from "@examples/retrievers/metal.ts"; ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/supabase-hybrid.mdx b/docs/core_docs/docs/integrations/retrievers/supabase-hybrid.mdx index b03c9f915ecd..193184de3287 100644 --- a/docs/core_docs/docs/integrations/retrievers/supabase-hybrid.mdx +++ b/docs/core_docs/docs/integrations/retrievers/supabase-hybrid.mdx @@ -88,5 +88,5 @@ import Example from "@examples/retrievers/supabase_hybrid.ts"; ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/tavily.ipynb b/docs/core_docs/docs/integrations/retrievers/tavily.ipynb index 38d2e22c201c..618bd63b852e 100644 --- a/docs/core_docs/docs/integrations/retrievers/tavily.ipynb +++ b/docs/core_docs/docs/integrations/retrievers/tavily.ipynb @@ -1,274 +1,274 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Tavily Search API\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# TavilySearchAPIRetriever\n", - "\n", - "[Tavily's Search API](https://tavily.com) is a search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed.\n", - "\n", - "## Overview\n", - "\n", - "This will help you getting started with the Tavily Search API [retriever](/docs/concepts/#retrievers). For detailed documentation of all `TavilySearchAPIRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_retrievers_tavily_search_api.TavilySearchAPIRetriever.html).\n", - "\n", - "### Integration details\n", - "\n", - "| Retriever | Source | Package |\n", - "| :--- | :--- | :---: |\n", - "[`TavilySearchAPIRetriever`](https://api.js.langchain.com/classes/langchain_community_retrievers_tavily_search_api.TavilySearchAPIRetriever.html) | Information on the web. | [`@langchain/community`](https://npmjs.com/@langchain/community/) |\n", - "\n", - "## Setup\n", - "\n", - "You will need to populate a `TAVILY_API_KEY` environment variable with your Tavily API key or pass it into the constructor as `apiKey`. Obtain a key by signing up [on their website](https://tavily.com/).\n", - "\n", - "If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGSMITH_API_KEY = \"\";\n", - "// process.env.LANGSMITH_TRACING = \"true\";\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "This retriever lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our retriever:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "70cc8e65-2a02-408a-bbc6-8ef649057d82", - "metadata": {}, - "outputs": [], - "source": [ - "import { TavilySearchAPIRetriever } from \"@langchain/community/retrievers/tavily_search_api\";\n", - "\n", - "const retriever = new TavilySearchAPIRetriever({\n", - " k: 3,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "c9da7fc7", - "metadata": {}, - "source": [ - "For a full list of allowed arguments, see [the official documentation](https://docs.tavily.com/docs/tavily-api/rest_api#parameters). You can pass any param to the SDK via a `kwargs` object." - ] - }, - { - "cell_type": "markdown", - "id": "5c5f2839-4020-424e-9fc9-07777eede442", - "metadata": {}, - "source": [ - "## Usage" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1722900266, 'localtime': '2024-08-05 16:24'}, 'current': {'last_updated_epoch': 1722899700, 'last_updated': '2024-08-05 16:15', 'temp_c': 16.8, 'temp_f': 62.2, 'is_day': 1, 'condition': {'text': 'Partly Cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 13.2, 'wind_kph': 21.2, 'wind_degree': 261, 'wind_dir': 'W', 'pressure_mb': 1014.0, 'pressure_in': 29.94, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 74, 'cloud': 60, 'feelslike_c': 16.8, 'feelslike_f': 62.2, 'windchill_c': 16.8, 'windchill_f': 62.2, 'heatindex_c': 16.8, 'heatindex_f': 62.2, 'dewpoint_c': 12.3, 'dewpoint_f': 54.1, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 5.0, 'gust_mph': 17.3, 'gust_kph': 27.8}}\",\n", - " metadata: {\n", - " title: 'Weather in San Francisco',\n", - " source: 'https://www.weatherapi.com/',\n", - " score: 0.9947009,\n", - " images: []\n", - " },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Current Weather for Popular Cities . San Francisco, CA 56 ° F Mostly Cloudy; Manhattan, NY warning 85 ° F Fair; Schiller Park, IL (60176) 71 ° F Mostly Cloudy; Boston, MA warning 84 ° F Partly ...',\n", - " metadata: {\n", - " title: 'San Francisco, CA Hourly Weather Forecast | Weather Underground',\n", - " source: 'https://www.wunderground.com/hourly/us/ca/san-francisco/date/2024-08-02',\n", - " score: 0.9859904,\n", - " images: []\n", - " },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'San Francisco CA 37.77°N 122.41°W (Elev. 131 ft) Last Update: 2:42 pm PDT Aug 4, 2024. Forecast Valid: 5pm PDT Aug 4, 2024-6pm PDT Aug 11, 2024 . Forecast Discussion . Additional Resources. Radar & Satellite Image. Hourly Weather Forecast. ... Severe Weather ; Current Outlook Maps ; Drought ; Fire Weather ; Fronts/Precipitation Maps ; Current ...',\n", - " metadata: {\n", - " title: 'National Weather Service',\n", - " source: 'https://forecast.weather.gov/zipcity.php?inputstring=San+Francisco,CA',\n", - " score: 0.98141783,\n", - " images: []\n", - " },\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const query = \"what is the current weather in SF?\";\n", - "\n", - "await retriever.invoke(query);" - ] - }, - { - "cell_type": "markdown", - "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", - "metadata": {}, - "source": [ - "## Use within a chain\n", - "\n", - "Like other retrievers, `TavilySearchAPIRetriever` can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n", - "\n", - "We will need a LLM or chat model:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "25b647a3-f8f2-4541-a289-7a241e43f9df", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromTemplate(`\n", - "Answer the question based only on the context provided.\n", - "\n", - "Context: {context}\n", - "\n", - "Question: {question}`);\n", - "\n", - "const formatDocs = (docs: Document[]) => {\n", - " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", - "}\n", - "\n", - "// See https://js.langchain.com/docs/tutorials/rag\n", - "const ragChain = RunnableSequence.from([\n", - " {\n", - " context: retriever.pipe(formatDocs),\n", - " question: new RunnablePassthrough(),\n", - " },\n", - " prompt,\n", - " llm,\n", - " new StringOutputParser(),\n", - "]);" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Tavily Search API\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "The current weather in San Francisco is partly cloudy with a temperature of 16.8°C (62.2°F). The wind is coming from the west at 13.2 mph (21.2 kph), and the humidity is at 74%. There is no precipitation, and visibility is 10 km (6 miles).\n" - ] + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# TavilySearchAPIRetriever\n", + "\n", + "[Tavily's Search API](https://tavily.com) is a search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed.\n", + "\n", + "## Overview\n", + "\n", + "This will help you getting started with the Tavily Search API [retriever](/docs/concepts/retrievers). For detailed documentation of all `TavilySearchAPIRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_retrievers_tavily_search_api.TavilySearchAPIRetriever.html).\n", + "\n", + "### Integration details\n", + "\n", + "| Retriever | Source | Package |\n", + "| :--- | :--- | :---: |\n", + "[`TavilySearchAPIRetriever`](https://api.js.langchain.com/classes/langchain_community_retrievers_tavily_search_api.TavilySearchAPIRetriever.html) | Information on the web. | [`@langchain/community`](https://npmjs.com/@langchain/community/) |\n", + "\n", + "## Setup\n", + "\n", + "You will need to populate a `TAVILY_API_KEY` environment variable with your Tavily API key or pass it into the constructor as `apiKey`. Obtain a key by signing up [on their website](https://tavily.com/).\n", + "\n", + "If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGSMITH_API_KEY = \"\";\n", + "// process.env.LANGSMITH_TRACING = \"true\";\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "This retriever lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our retriever:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "70cc8e65-2a02-408a-bbc6-8ef649057d82", + "metadata": {}, + "outputs": [], + "source": [ + "import { TavilySearchAPIRetriever } from \"@langchain/community/retrievers/tavily_search_api\";\n", + "\n", + "const retriever = new TavilySearchAPIRetriever({\n", + " k: 3,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "c9da7fc7", + "metadata": {}, + "source": [ + "For a full list of allowed arguments, see [the official documentation](https://docs.tavily.com/docs/tavily-api/rest_api#parameters). You can pass any param to the SDK via a `kwargs` object." + ] + }, + { + "cell_type": "markdown", + "id": "5c5f2839-4020-424e-9fc9-07777eede442", + "metadata": {}, + "source": [ + "## Usage" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1722900266, 'localtime': '2024-08-05 16:24'}, 'current': {'last_updated_epoch': 1722899700, 'last_updated': '2024-08-05 16:15', 'temp_c': 16.8, 'temp_f': 62.2, 'is_day': 1, 'condition': {'text': 'Partly Cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 13.2, 'wind_kph': 21.2, 'wind_degree': 261, 'wind_dir': 'W', 'pressure_mb': 1014.0, 'pressure_in': 29.94, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 74, 'cloud': 60, 'feelslike_c': 16.8, 'feelslike_f': 62.2, 'windchill_c': 16.8, 'windchill_f': 62.2, 'heatindex_c': 16.8, 'heatindex_f': 62.2, 'dewpoint_c': 12.3, 'dewpoint_f': 54.1, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 5.0, 'gust_mph': 17.3, 'gust_kph': 27.8}}\",\n", + " metadata: {\n", + " title: 'Weather in San Francisco',\n", + " source: 'https://www.weatherapi.com/',\n", + " score: 0.9947009,\n", + " images: []\n", + " },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Current Weather for Popular Cities . San Francisco, CA 56 ° F Mostly Cloudy; Manhattan, NY warning 85 ° F Fair; Schiller Park, IL (60176) 71 ° F Mostly Cloudy; Boston, MA warning 84 ° F Partly ...',\n", + " metadata: {\n", + " title: 'San Francisco, CA Hourly Weather Forecast | Weather Underground',\n", + " source: 'https://www.wunderground.com/hourly/us/ca/san-francisco/date/2024-08-02',\n", + " score: 0.9859904,\n", + " images: []\n", + " },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'San Francisco CA 37.77°N 122.41°W (Elev. 131 ft) Last Update: 2:42 pm PDT Aug 4, 2024. Forecast Valid: 5pm PDT Aug 4, 2024-6pm PDT Aug 11, 2024 . Forecast Discussion . Additional Resources. Radar & Satellite Image. Hourly Weather Forecast. ... Severe Weather ; Current Outlook Maps ; Drought ; Fire Weather ; Fronts/Precipitation Maps ; Current ...',\n", + " metadata: {\n", + " title: 'National Weather Service',\n", + " source: 'https://forecast.weather.gov/zipcity.php?inputstring=San+Francisco,CA',\n", + " score: 0.98141783,\n", + " images: []\n", + " },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const query = \"what is the current weather in SF?\";\n", + "\n", + "await retriever.invoke(query);" + ] + }, + { + "cell_type": "markdown", + "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", + "metadata": {}, + "source": [ + "## Use within a chain\n", + "\n", + "Like other retrievers, `TavilySearchAPIRetriever` can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n", + "\n", + "We will need a LLM or chat model:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "25b647a3-f8f2-4541-a289-7a241e43f9df", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromTemplate(`\n", + "Answer the question based only on the context provided.\n", + "\n", + "Context: {context}\n", + "\n", + "Question: {question}`);\n", + "\n", + "const formatDocs = (docs: Document[]) => {\n", + " return docs.map((doc) => doc.pageContent).join(\"\\n\\n\");\n", + "}\n", + "\n", + "// See https://js.langchain.com/docs/tutorials/rag\n", + "const ragChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocs),\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser(),\n", + "]);" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "d47c37dd-5c11-416c-a3b6-bec413cd70e8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The current weather in San Francisco is partly cloudy with a temperature of 16.8°C (62.2°F). The wind is coming from the west at 13.2 mph (21.2 kph), and the humidity is at 74%. There is no precipitation, and visibility is 10 km (6 miles).\n" + ] + } + ], + "source": [ + "await ragChain.invoke(query);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `TavilySearchAPIRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_retrievers_tavily_search_api.TavilySearchAPIRetriever.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "await ragChain.invoke(query);" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `TavilySearchAPIRetriever` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_retrievers_tavily_search_api.TavilySearchAPIRetriever.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/retrievers/time-weighted-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/time-weighted-retriever.mdx index 1acf33509f90..838d8dce4e97 100644 --- a/docs/core_docs/docs/integrations/retrievers/time-weighted-retriever.mdx +++ b/docs/core_docs/docs/integrations/retrievers/time-weighted-retriever.mdx @@ -32,5 +32,5 @@ npm install @langchain/openai @langchain/core ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/vespa-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/vespa-retriever.mdx index b3a75fa44cf9..56e650587def 100644 --- a/docs/core_docs/docs/integrations/retrievers/vespa-retriever.mdx +++ b/docs/core_docs/docs/integrations/retrievers/vespa-retriever.mdx @@ -28,5 +28,5 @@ Now you can return the results and continue using them in LangChain. ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/zep-cloud-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/zep-cloud-retriever.mdx index 38c72128f6bd..3381e2b4459e 100644 --- a/docs/core_docs/docs/integrations/retrievers/zep-cloud-retriever.mdx +++ b/docs/core_docs/docs/integrations/retrievers/zep-cloud-retriever.mdx @@ -37,5 +37,5 @@ import Example from "@examples/retrievers/zep_cloud.ts"; ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/retrievers/zep-retriever.mdx b/docs/core_docs/docs/integrations/retrievers/zep-retriever.mdx index 2e7dae378de7..a6d5dfc8befd 100644 --- a/docs/core_docs/docs/integrations/retrievers/zep-retriever.mdx +++ b/docs/core_docs/docs/integrations/retrievers/zep-retriever.mdx @@ -35,5 +35,5 @@ import Example from "@examples/retrievers/zep.ts"; ## Related -- Retriever [conceptual guide](/docs/concepts/#retrievers) +- Retriever [conceptual guide](/docs/concepts/retrievers) - Retriever [how-to guides](/docs/how_to/#retrievers) diff --git a/docs/core_docs/docs/integrations/stores/cassandra_storage.mdx b/docs/core_docs/docs/integrations/stores/cassandra_storage.mdx index d4115891c593..160bcd1f01b0 100644 --- a/docs/core_docs/docs/integrations/stores/cassandra_storage.mdx +++ b/docs/core_docs/docs/integrations/stores/cassandra_storage.mdx @@ -61,4 +61,4 @@ import Example from "@examples/stores/cassandra_storage.ts"; ## Related -- [Key-value store conceptual guide](/docs/concepts/#key-value-stores) +- [Key-value store conceptual guide](/docs/concepts/key_value_stores) diff --git a/docs/core_docs/docs/integrations/stores/file_system.ipynb b/docs/core_docs/docs/integrations/stores/file_system.ipynb index 24331a29924c..74851a2f3df9 100644 --- a/docs/core_docs/docs/integrations/stores/file_system.ipynb +++ b/docs/core_docs/docs/integrations/stores/file_system.ipynb @@ -1,284 +1,284 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: File System Store\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# LocalFileStore\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Compatibility\n", - "\n", - "Only available on Node.js.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "This will help you get started with [LocalFileStore](/docs/concepts/#key-value-stores). For detailed documentation of all LocalFileStore features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.storage_file_system.LocalFileStore.html).\n", - "\n", - "## Overview\n", - "\n", - "The `LocalFileStore` is a wrapper around the `fs` module for storing data as key-value pairs.\n", - "Each key value pair has its own file nested inside the directory passed to the `.fromPath` method.\n", - "The file name is the key and inside contains the value of the key.\n", - "\n", - "```{=mdx}\n", - "\n", - ":::info\n", - "\n", - "The path passed to the `.fromPath` must be a directory, not a file.\n", - "\n", - ":::\n", - "\n", - ":::warning\n", - "\n", - "\n", - "This file store can alter any text file in the provided directory and any subfolders.\n", - "Make sure that the path you specify when initializing the store is free of other files.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [PY support](https://python.langchain.com/docs/integrations/stores/file_system/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [LocalFileStore](https://api.js.langchain.com/classes/langchain.storage_file_system.LocalFileStore.html) | [langchain](https://api.js.langchain.com/modules/langchain.storage_file_system.html) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/langchain?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/langchain?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `LocalFileStore` integration lives in the `langchain` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " langchain @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our byte store:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { LocalFileStore } from \"langchain/storage/file_system\"\n", - "\n", - "const kvStore = await LocalFileStore.fromPath(\"./messages\");" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define an encoder and decoder for converting the data to `Uint8Array` and back:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "const encoder = new TextEncoder();\n", - "const decoder = new TextDecoder();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Usage\n", - "\n", - "You can set data under keys like this using the `mset` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 'value1', 'value2' ]\n" - ] - } - ], - "source": [ - "await kvStore.mset(\n", - " [\n", - " [\"key1\", encoder.encode(\"value1\")],\n", - " [\"key2\", encoder.encode(\"value2\")],\n", - " ]\n", - ")\n", - "\n", - "const results = await kvStore.mget(\n", - " [\n", - " \"key1\",\n", - " \"key2\",\n", - " ]\n", - ")\n", - "console.log(results.map((v) => decoder.decode(v)));" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And you can delete data using the `mdelete` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: File System Store\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ undefined, undefined ]\n" - ] - } - ], - "source": [ - "await kvStore.mdelete(\n", - " [\n", - " \"key1\",\n", - " \"key2\",\n", - " ]\n", - ")\n", - "\n", - "await kvStore.mget(\n", - " [\n", - " \"key1\",\n", - " \"key2\",\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Yielding values\n", - "\n", - "If you want to get back all the keys you can call the `yieldKeys` method. Optionally, you can pass a key prefix to only get back keys which match that prefix." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LocalFileStore\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "\n", + "Only available on Node.js.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This will help you get started with [LocalFileStore](/docs/concepts/key_value_stores). For detailed documentation of all LocalFileStore features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.storage_file_system.LocalFileStore.html).\n", + "\n", + "## Overview\n", + "\n", + "The `LocalFileStore` is a wrapper around the `fs` module for storing data as key-value pairs.\n", + "Each key value pair has its own file nested inside the directory passed to the `.fromPath` method.\n", + "The file name is the key and inside contains the value of the key.\n", + "\n", + "```{=mdx}\n", + "\n", + ":::info\n", + "\n", + "The path passed to the `.fromPath` must be a directory, not a file.\n", + "\n", + ":::\n", + "\n", + ":::warning\n", + "\n", + "\n", + "This file store can alter any text file in the provided directory and any subfolders.\n", + "Make sure that the path you specify when initializing the store is free of other files.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [PY support](https://python.langchain.com/docs/integrations/stores/file_system/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [LocalFileStore](https://api.js.langchain.com/classes/langchain.storage_file_system.LocalFileStore.html) | [langchain](https://api.js.langchain.com/modules/langchain.storage_file_system.html) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/langchain?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/langchain?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `LocalFileStore` integration lives in the `langchain` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " langchain @langchain/core\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 'message:id:key1', 'message:id:key2' ]\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our byte store:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { LocalFileStore } from \"langchain/storage/file_system\"\n", + "\n", + "const kvStore = await LocalFileStore.fromPath(\"./messages\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define an encoder and decoder for converting the data to `Uint8Array` and back:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "const encoder = new TextEncoder();\n", + "const decoder = new TextDecoder();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Usage\n", + "\n", + "You can set data under keys like this using the `mset` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ 'value1', 'value2' ]\n" + ] + } + ], + "source": [ + "await kvStore.mset(\n", + " [\n", + " [\"key1\", encoder.encode(\"value1\")],\n", + " [\"key2\", encoder.encode(\"value2\")],\n", + " ]\n", + ")\n", + "\n", + "const results = await kvStore.mget(\n", + " [\n", + " \"key1\",\n", + " \"key2\",\n", + " ]\n", + ")\n", + "console.log(results.map((v) => decoder.decode(v)));" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And you can delete data using the `mdelete` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ undefined, undefined ]\n" + ] + } + ], + "source": [ + "await kvStore.mdelete(\n", + " [\n", + " \"key1\",\n", + " \"key2\",\n", + " ]\n", + ")\n", + "\n", + "await kvStore.mget(\n", + " [\n", + " \"key1\",\n", + " \"key2\",\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Yielding values\n", + "\n", + "If you want to get back all the keys you can call the `yieldKeys` method. Optionally, you can pass a key prefix to only get back keys which match that prefix." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ 'message:id:key1', 'message:id:key2' ]\n" + ] + } + ], + "source": [ + "import { LocalFileStore } from \"langchain/storage/file_system\"\n", + "\n", + "const kvStoreForYield = await LocalFileStore.fromPath(\"./messages\");\n", + "\n", + "const encoderForYield = new TextEncoder();\n", + "\n", + "// Add some data to the store\n", + "await kvStoreForYield.mset(\n", + " [\n", + " [\"message:id:key1\", encoderForYield.encode(\"value1\")],\n", + " [\"message:id:key2\", encoderForYield.encode(\"value2\")],\n", + " ]\n", + ")\n", + "\n", + "const yieldedKeys = [];\n", + "for await (const key of kvStoreForYield.yieldKeys(\"message:id:\")) {\n", + " yieldedKeys.push(key);\n", + "}\n", + "\n", + "console.log(yieldedKeys);" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "import fs from \"fs\";\n", + "\n", + "// Cleanup\n", + "await fs.promises.rm(\"./messages\", { recursive: true, force: true });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all LocalFileStore features and configurations, head to the [API reference](https://api.js.langchain.com/classes/langchain_storage_file_system.LocalFileStore.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { LocalFileStore } from \"langchain/storage/file_system\"\n", - "\n", - "const kvStoreForYield = await LocalFileStore.fromPath(\"./messages\");\n", - "\n", - "const encoderForYield = new TextEncoder();\n", - "\n", - "// Add some data to the store\n", - "await kvStoreForYield.mset(\n", - " [\n", - " [\"message:id:key1\", encoderForYield.encode(\"value1\")],\n", - " [\"message:id:key2\", encoderForYield.encode(\"value2\")],\n", - " ]\n", - ")\n", - "\n", - "const yieldedKeys = [];\n", - "for await (const key of kvStoreForYield.yieldKeys(\"message:id:\")) {\n", - " yieldedKeys.push(key);\n", - "}\n", - "\n", - "console.log(yieldedKeys);" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "import fs from \"fs\";\n", - "\n", - "// Cleanup\n", - "await fs.promises.rm(\"./messages\", { recursive: true, force: true });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all LocalFileStore features and configurations, head to the [API reference](https://api.js.langchain.com/classes/langchain_storage_file_system.LocalFileStore.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/stores/in_memory.ipynb b/docs/core_docs/docs/integrations/stores/in_memory.ipynb index 0c927df1291a..3e0f54520f8e 100644 --- a/docs/core_docs/docs/integrations/stores/in_memory.ipynb +++ b/docs/core_docs/docs/integrations/stores/in_memory.ipynb @@ -1,238 +1,238 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: InMemory Store\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# InMemoryStore\n", - "\n", - "This will help you get started with [InMemoryStore](/docs/concepts/#key-value-stores). For detailed documentation of all InMemoryStore features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html).\n", - "\n", - "The `InMemoryStore` allows for a generic type to be assigned to the values in the store. We'll assign type `BaseMessage` as the type of our values, keeping with the theme of a chat history store.\n", - "\n", - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [PY support](https://python.langchain.com/docs/integrations/stores/in_memory/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [InMemoryStore](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html) | [@langchain/core](https://api.js.langchain.com/modules/langchain_core.stores.html) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/core?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/core?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "### Installation\n", - "\n", - "The LangChain InMemoryStore integration lives in the `@langchain/core` package:\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our byte store:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import { InMemoryStore } from \"@langchain/core/stores\"\n", - "import { BaseMessage } from \"@langchain/core/messages\";\n", - "\n", - "const kvStore = new InMemoryStore();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Usage\n", - "\n", - "You can set data under keys like this using the `mset` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " HumanMessage {\n", - " \"content\": \"value1\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"content\": \"value2\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "await kvStore.mset(\n", - " [\n", - " [\"key1\", new HumanMessage(\"value1\")],\n", - " [\"key2\", new AIMessage(\"value2\")],\n", - " ]\n", - ")\n", - "\n", - "await kvStore.mget(\n", - " [\n", - " \"key1\",\n", - " \"key2\",\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And you can delete data using the `mdelete` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: InMemory Store\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ undefined, undefined ]\n" - ] - } - ], - "source": [ - "await kvStore.mdelete(\n", - " [\n", - " \"key1\",\n", - " \"key2\",\n", - " ]\n", - ")\n", - "\n", - "await kvStore.mget(\n", - " [\n", - " \"key1\",\n", - " \"key2\",\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Yielding values\n", - "\n", - "If you want to get back all the keys you can call the `yieldKeys` method. Optionally, you can pass a key prefix to only get back keys which match that prefix." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# InMemoryStore\n", + "\n", + "This will help you get started with [InMemoryStore](/docs/concepts/key_value_stores). For detailed documentation of all InMemoryStore features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html).\n", + "\n", + "The `InMemoryStore` allows for a generic type to be assigned to the values in the store. We'll assign type `BaseMessage` as the type of our values, keeping with the theme of a chat history store.\n", + "\n", + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [PY support](https://python.langchain.com/docs/integrations/stores/in_memory/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [InMemoryStore](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html) | [@langchain/core](https://api.js.langchain.com/modules/langchain_core.stores.html) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/core?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/core?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "### Installation\n", + "\n", + "The LangChain InMemoryStore integration lives in the `@langchain/core` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our byte store:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import { InMemoryStore } from \"@langchain/core/stores\"\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", + "\n", + "const kvStore = new InMemoryStore();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Usage\n", + "\n", + "You can set data under keys like this using the `mset` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " HumanMessage {\n", + " \"content\": \"value1\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"value2\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "await kvStore.mset(\n", + " [\n", + " [\"key1\", new HumanMessage(\"value1\")],\n", + " [\"key2\", new AIMessage(\"value2\")],\n", + " ]\n", + ")\n", + "\n", + "await kvStore.mget(\n", + " [\n", + " \"key1\",\n", + " \"key2\",\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And you can delete data using the `mdelete` method:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 'message:id:key1', 'message:id:key2' ]\n" - ] + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ undefined, undefined ]\n" + ] + } + ], + "source": [ + "await kvStore.mdelete(\n", + " [\n", + " \"key1\",\n", + " \"key2\",\n", + " ]\n", + ")\n", + "\n", + "await kvStore.mget(\n", + " [\n", + " \"key1\",\n", + " \"key2\",\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Yielding values\n", + "\n", + "If you want to get back all the keys you can call the `yieldKeys` method. Optionally, you can pass a key prefix to only get back keys which match that prefix." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ 'message:id:key1', 'message:id:key2' ]\n" + ] + } + ], + "source": [ + "import { InMemoryStore } from \"@langchain/core/stores\"\n", + "import { AIMessage, BaseMessage, HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const kvStoreForYield = new InMemoryStore();\n", + "\n", + "// Add some data to the store\n", + "await kvStoreForYield.mset(\n", + " [\n", + " [\"message:id:key1\", new HumanMessage(\"value1\")],\n", + " [\"message:id:key2\", new AIMessage(\"value2\")],\n", + " ]\n", + ")\n", + "\n", + "const yieldedKeys = [];\n", + "for await (const key of kvStoreForYield.yieldKeys(\"message:id:\")) {\n", + " yieldedKeys.push(key);\n", + "}\n", + "\n", + "console.log(yieldedKeys);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all InMemoryStore features and configurations, head to the [API reference](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { InMemoryStore } from \"@langchain/core/stores\"\n", - "import { AIMessage, BaseMessage, HumanMessage } from \"@langchain/core/messages\";\n", - "\n", - "const kvStoreForYield = new InMemoryStore();\n", - "\n", - "// Add some data to the store\n", - "await kvStoreForYield.mset(\n", - " [\n", - " [\"message:id:key1\", new HumanMessage(\"value1\")],\n", - " [\"message:id:key2\", new AIMessage(\"value2\")],\n", - " ]\n", - ")\n", - "\n", - "const yieldedKeys = [];\n", - "for await (const key of kvStoreForYield.yieldKeys(\"message:id:\")) {\n", - " yieldedKeys.push(key);\n", - "}\n", - "\n", - "console.log(yieldedKeys);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all InMemoryStore features and configurations, head to the [API reference](https://api.js.langchain.com/classes/langchain_core.stores.InMemoryStore.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/stores/index.mdx b/docs/core_docs/docs/integrations/stores/index.mdx index c5782bb1c919..eedabb641d92 100644 --- a/docs/core_docs/docs/integrations/stores/index.mdx +++ b/docs/core_docs/docs/integrations/stores/index.mdx @@ -6,7 +6,7 @@ sidebar_class_name: hidden import { CategoryTable, IndexTable } from "@theme/FeatureTables"; -[Key-value stores](/docs/concepts/#key-value-stores) are used by other LangChain components to store and retrieve data. +[Key-value stores](/docs/concepts/key_value_stores) are used by other LangChain components to store and retrieve data. ## All key-value stores diff --git a/docs/core_docs/docs/integrations/stores/ioredis_storage.mdx b/docs/core_docs/docs/integrations/stores/ioredis_storage.mdx index 6f6cf10377c1..1ae94342f05f 100644 --- a/docs/core_docs/docs/integrations/stores/ioredis_storage.mdx +++ b/docs/core_docs/docs/integrations/stores/ioredis_storage.mdx @@ -17,4 +17,4 @@ import Example from "@examples/stores/ioredis_storage.ts"; ## Related -- [Key-value store conceptual guide](/docs/concepts/#key-value-stores) +- [Key-value store conceptual guide](/docs/concepts/key_value_stores) diff --git a/docs/core_docs/docs/integrations/stores/upstash_redis_storage.mdx b/docs/core_docs/docs/integrations/stores/upstash_redis_storage.mdx index e422ec658bd1..41a3a86ea25c 100644 --- a/docs/core_docs/docs/integrations/stores/upstash_redis_storage.mdx +++ b/docs/core_docs/docs/integrations/stores/upstash_redis_storage.mdx @@ -17,4 +17,4 @@ import Example from "@examples/stores/upstash_redis_storage.ts"; ## Related -- [Key-value store conceptual guide](/docs/concepts/#key-value-stores) +- [Key-value store conceptual guide](/docs/concepts/key_value_stores) diff --git a/docs/core_docs/docs/integrations/stores/vercel_kv_storage.mdx b/docs/core_docs/docs/integrations/stores/vercel_kv_storage.mdx index ffe5dc692e04..0d3533027f14 100644 --- a/docs/core_docs/docs/integrations/stores/vercel_kv_storage.mdx +++ b/docs/core_docs/docs/integrations/stores/vercel_kv_storage.mdx @@ -17,4 +17,4 @@ import Example from "@examples/stores/vercel_kv_storage.ts"; ## Related -- [Key-value store conceptual guide](/docs/concepts/#key-value-stores) +- [Key-value store conceptual guide](/docs/concepts/key_value_stores) diff --git a/docs/core_docs/docs/integrations/text_embedding/alibaba_tongyi.mdx b/docs/core_docs/docs/integrations/text_embedding/alibaba_tongyi.mdx index 1ddc3ce1b994..997882b79274 100644 --- a/docs/core_docs/docs/integrations/text_embedding/alibaba_tongyi.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/alibaba_tongyi.mdx @@ -29,5 +29,5 @@ import AlibabaTongyiExample from "@examples/embeddings/alibaba_tongyi.ts"; ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/azure_openai.ipynb b/docs/core_docs/docs/integrations/text_embedding/azure_openai.ipynb index 3d9583386dec..d270824ee0c6 100644 --- a/docs/core_docs/docs/integrations/text_embedding/azure_openai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/azure_openai.ipynb @@ -1,490 +1,490 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Azure OpenAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# AzureOpenAIEmbeddings\n", - "\n", - "[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond.\n", - "\n", - "LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node).\n", - "\n", - "You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview). If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started.\n", - "\n", - "This will help you get started with AzureOpenAIEmbeddings [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `AzureOpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAIEmbeddings.html).\n", - "\n", - "\n", - "```{=mdx}\n", - "\n", - ":::info\n", - "\n", - "Previously, LangChain.js supported integration with Azure OpenAI using the dedicated [Azure OpenAI SDK](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai). This SDK is now deprecated in favor of the new Azure integration in the OpenAI SDK, which allows to access the latest OpenAI models and features the same day they are released, and allows seamless transition between the OpenAI API and Azure OpenAI.\n", - "\n", - "If you are using Azure OpenAI with the deprecated SDK, see the [migration guide](#migration-from-azure-openai-sdk) to update to the new API.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/azureopenai/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [AzureOpenAIEmbeddings](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAIEmbeddings.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Azure OpenAI embedding models you'll need to create an Azure account, get an API key, and install the `@langchain/openai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "You'll need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", - "\n", - "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance.\n", - "\n", - "If you're using Node.js, you can define the following environment variables to use the service:\n", - "\n", - "```bash\n", - "AZURE_OPENAI_API_INSTANCE_NAME=\n", - "AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=\n", - "AZURE_OPENAI_API_KEY=\n", - "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain AzureOpenAIEmbeddings integration lives in the `@langchain/openai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "\n", - ":::info\n", - "\n", - "You can find the list of supported API versions in the [Azure OpenAI documentation](https://learn.microsoft.com/azure/ai-services/openai/reference).\n", - "\n", - ":::\n", - "\n", - ":::tip\n", - "\n", - "If `AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME` is not defined, it will fall back to the value of `AZURE_OPENAI_API_DEPLOYMENT_NAME` for the deployment name. The same applies to the `azureOpenAIApiEmbeddingsDeploymentName` parameter in the `AzureOpenAIEmbeddings` constructor, which will fall back to the value of `azureOpenAIApiDeploymentName` if not defined.\n", - "\n", - ":::\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and embed text:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings = new AzureOpenAIEmbeddings({\n", - " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", - " azureOpenAIApiInstanceName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", - " azureOpenAIApiEmbeddingsDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME\n", - " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", - " maxRetries: 1,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Azure OpenAI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.024253517, -0.0054218727, 0.048715446, 0.020580322, 0.03180832,\n", - " 0.0028770117, -0.012367731, 0.037383243, -0.054915592, 0.032225136,\n", - " 0.00825818, -0.023888804, -0.01184671, 0.012257014, 0.016294925,\n", - " 0.009254632, 0.0051353113, -0.008889917, 0.016855022, 0.04207243,\n", - " 0.00082589936, -0.011664353, 0.00818654, 0.029020859, -0.012335167,\n", - " -0.019603407, 0.0013945447, 0.05538451, -0.011625277, -0.008153976,\n", - " 0.038607642, -0.03811267, -0.0074440846, 0.047647353, -0.00927417,\n", - " 0.024201415, -0.0069230637, -0.008538228, 0.003910912, 0.052805457,\n", - " -0.023159374, 0.0014352495, -0.038659744, 0.017141584, 0.005587948,\n", - " 0.007971618, -0.016920151, 0.06658646, -0.0016916894, 0.045667473,\n", - " -0.042202685, -0.03983204, -0.04160351, -0.011729481, -0.055905532,\n", - " 0.012543576, 0.0038848612, 0.007919516, 0.010915386, 0.0033117384,\n", - " -0.007548289, -0.030427614, -0.041890074, 0.036002535, -0.023771575,\n", - " -0.008792226, -0.049444873, 0.016490309, -0.0060568666, 0.040196754,\n", - " 0.014106638, -0.014575557, -0.0017356506, -0.011234511, -0.012517525,\n", - " 0.008362384, 0.01253055, 0.036158845, 0.008297256, -0.0010908874,\n", - " -0.014888169, -0.020489143, 0.018965157, -0.057937514, -0.0037122732,\n", - " 0.004402626, -0.00840146, 0.042984217, -0.04936672, -0.03714878,\n", - " 0.004969236, 0.03707063, 0.015396165, -0.02055427, 0.01988997,\n", - " 0.030219207, -0.021257648, 0.01340326, 0.003692735, 0.012595678\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# AzureOpenAIEmbeddings\n", + "\n", + "[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond.\n", + "\n", + "LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node).\n", + "\n", + "You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview). If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started.\n", + "\n", + "This will help you get started with AzureOpenAIEmbeddings [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `AzureOpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAIEmbeddings.html).\n", + "\n", + "\n", + "```{=mdx}\n", + "\n", + ":::info\n", + "\n", + "Previously, LangChain.js supported integration with Azure OpenAI using the dedicated [Azure OpenAI SDK](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai). This SDK is now deprecated in favor of the new Azure integration in the OpenAI SDK, which allows to access the latest OpenAI models and features the same day they are released, and allows seamless transition between the OpenAI API and Azure OpenAI.\n", + "\n", + "If you are using Azure OpenAI with the deprecated SDK, see the [migration guide](#migration-from-azure-openai-sdk) to update to the new API.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/azureopenai/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [AzureOpenAIEmbeddings](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAIEmbeddings.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Azure OpenAI embedding models you'll need to create an Azure account, get an API key, and install the `@langchain/openai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "You'll need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", + "\n", + "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance.\n", + "\n", + "If you're using Node.js, you can define the following environment variables to use the service:\n", + "\n", + "```bash\n", + "AZURE_OPENAI_API_INSTANCE_NAME=\n", + "AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=\n", + "AZURE_OPENAI_API_KEY=\n", + "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain AzureOpenAIEmbeddings integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "\n", + ":::info\n", + "\n", + "You can find the list of supported API versions in the [Azure OpenAI documentation](https://learn.microsoft.com/azure/ai-services/openai/reference).\n", + "\n", + ":::\n", + "\n", + ":::tip\n", + "\n", + "If `AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME` is not defined, it will fall back to the value of `AZURE_OPENAI_API_DEPLOYMENT_NAME` for the deployment name. The same applies to the `azureOpenAIApiEmbeddingsDeploymentName` parameter in the `AzureOpenAIEmbeddings` constructor, which will fall back to the value of `azureOpenAIApiDeploymentName` if not defined.\n", + "\n", + ":::\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and embed text:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings = new AzureOpenAIEmbeddings({\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiInstanceName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", + " azureOpenAIApiEmbeddingsDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " maxRetries: 1,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.024253517, -0.0054218727, 0.048715446, 0.020580322, 0.03180832,\n", + " 0.0028770117, -0.012367731, 0.037383243, -0.054915592, 0.032225136,\n", + " 0.00825818, -0.023888804, -0.01184671, 0.012257014, 0.016294925,\n", + " 0.009254632, 0.0051353113, -0.008889917, 0.016855022, 0.04207243,\n", + " 0.00082589936, -0.011664353, 0.00818654, 0.029020859, -0.012335167,\n", + " -0.019603407, 0.0013945447, 0.05538451, -0.011625277, -0.008153976,\n", + " 0.038607642, -0.03811267, -0.0074440846, 0.047647353, -0.00927417,\n", + " 0.024201415, -0.0069230637, -0.008538228, 0.003910912, 0.052805457,\n", + " -0.023159374, 0.0014352495, -0.038659744, 0.017141584, 0.005587948,\n", + " 0.007971618, -0.016920151, 0.06658646, -0.0016916894, 0.045667473,\n", + " -0.042202685, -0.03983204, -0.04160351, -0.011729481, -0.055905532,\n", + " 0.012543576, 0.0038848612, 0.007919516, 0.010915386, 0.0033117384,\n", + " -0.007548289, -0.030427614, -0.041890074, 0.036002535, -0.023771575,\n", + " -0.008792226, -0.049444873, 0.016490309, -0.0060568666, 0.040196754,\n", + " 0.014106638, -0.014575557, -0.0017356506, -0.011234511, -0.012517525,\n", + " 0.008362384, 0.01253055, 0.036158845, 0.008297256, -0.0010908874,\n", + " -0.014888169, -0.020489143, 0.018965157, -0.057937514, -0.0037122732,\n", + " 0.004402626, -0.00840146, 0.042984217, -0.04936672, -0.03714878,\n", + " 0.004969236, 0.03707063, 0.015396165, -0.02055427, 0.01988997,\n", + " 0.030219207, -0.021257648, 0.01340326, 0.003692735, 0.012595678\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.024253517, -0.0054218727, 0.048715446, 0.020580322, 0.03180832,\n", - " 0.0028770117, -0.012367731, 0.037383243, -0.054915592, 0.032225136,\n", - " 0.00825818, -0.023888804, -0.01184671, 0.012257014, 0.016294925,\n", - " 0.009254632, 0.0051353113, -0.008889917, 0.016855022, 0.04207243,\n", - " 0.00082589936, -0.011664353, 0.00818654, 0.029020859, -0.012335167,\n", - " -0.019603407, 0.0013945447, 0.05538451, -0.011625277, -0.008153976,\n", - " 0.038607642, -0.03811267, -0.0074440846, 0.047647353, -0.00927417,\n", - " 0.024201415, -0.0069230637, -0.008538228, 0.003910912, 0.052805457,\n", - " -0.023159374, 0.0014352495, -0.038659744, 0.017141584, 0.005587948,\n", - " 0.007971618, -0.016920151, 0.06658646, -0.0016916894, 0.045667473,\n", - " -0.042202685, -0.03983204, -0.04160351, -0.011729481, -0.055905532,\n", - " 0.012543576, 0.0038848612, 0.007919516, 0.010915386, 0.0033117384,\n", - " -0.007548289, -0.030427614, -0.041890074, 0.036002535, -0.023771575,\n", - " -0.008792226, -0.049444873, 0.016490309, -0.0060568666, 0.040196754,\n", - " 0.014106638, -0.014575557, -0.0017356506, -0.011234511, -0.012517525,\n", - " 0.008362384, 0.01253055, 0.036158845, 0.008297256, -0.0010908874,\n", - " -0.014888169, -0.020489143, 0.018965157, -0.057937514, -0.0037122732,\n", - " 0.004402626, -0.00840146, 0.042984217, -0.04936672, -0.03714878,\n", - " 0.004969236, 0.03707063, 0.015396165, -0.02055427, 0.01988997,\n", - " 0.030219207, -0.021257648, 0.01340326, 0.003692735, 0.012595678\n", - "]\n", - "[\n", - " -0.033366997, 0.010419146, 0.0118083665, -0.040441725, 0.0020355924,\n", - " -0.015808804, -0.023629595, -0.0066180876, -0.040004376, 0.020053642,\n", - " -0.0010797002, -0.03900105, -0.009956073, 0.0027896944, 0.003305828,\n", - " -0.034010153, 0.009833873, 0.0061164247, 0.022536227, 0.029147884,\n", - " 0.017789727, 0.03182342, 0.010869357, 0.031849146, -0.028093107,\n", - " 0.008283865, -0.0145610785, 0.01645196, -0.029430874, -0.02508313,\n", - " 0.046178687, -0.01722375, -0.010046115, 0.013101112, 0.0044538635,\n", - " 0.02197025, 0.03985002, 0.007955855, 0.0008819293, 0.012657333,\n", - " 0.014368132, -0.014007963, -0.03722594, 0.031617608, -0.011570398,\n", - " 0.039052505, 0.0020018267, 0.023706773, -0.0046950476, 0.056083307,\n", - " -0.08412496, -0.043425974, -0.015512952, 0.015950298, -0.03624834,\n", - " -0.0053317733, -0.037251666, 0.0046339477, 0.04193385, 0.023475237,\n", - " -0.021378545, 0.013699248, -0.026009277, 0.050757967, -0.0494202,\n", - " 0.0007874656, -0.07208506, 0.015885983, -0.003259199, 0.015127057,\n", - " 0.0068946453, -0.035373647, -0.005875241, -0.0032238255, -0.04185667,\n", - " -0.022047428, 0.0014326327, -0.0070940237, -0.0027864785, -0.016271876,\n", - " 0.005097021, 0.034473225, 0.012361481, -0.026498076, 0.0067274245,\n", - " -0.026330855, -0.006132504, 0.008180959, -0.049368747, -0.032337945,\n", - " 0.011049441, 0.00186194, -0.012097787, 0.01930758, 0.07059293,\n", - " 0.029713862, 0.04337452, -0.0048461896, -0.019976463, 0.011473924\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.024253517, -0.0054218727, 0.048715446, 0.020580322, 0.03180832,\n", + " 0.0028770117, -0.012367731, 0.037383243, -0.054915592, 0.032225136,\n", + " 0.00825818, -0.023888804, -0.01184671, 0.012257014, 0.016294925,\n", + " 0.009254632, 0.0051353113, -0.008889917, 0.016855022, 0.04207243,\n", + " 0.00082589936, -0.011664353, 0.00818654, 0.029020859, -0.012335167,\n", + " -0.019603407, 0.0013945447, 0.05538451, -0.011625277, -0.008153976,\n", + " 0.038607642, -0.03811267, -0.0074440846, 0.047647353, -0.00927417,\n", + " 0.024201415, -0.0069230637, -0.008538228, 0.003910912, 0.052805457,\n", + " -0.023159374, 0.0014352495, -0.038659744, 0.017141584, 0.005587948,\n", + " 0.007971618, -0.016920151, 0.06658646, -0.0016916894, 0.045667473,\n", + " -0.042202685, -0.03983204, -0.04160351, -0.011729481, -0.055905532,\n", + " 0.012543576, 0.0038848612, 0.007919516, 0.010915386, 0.0033117384,\n", + " -0.007548289, -0.030427614, -0.041890074, 0.036002535, -0.023771575,\n", + " -0.008792226, -0.049444873, 0.016490309, -0.0060568666, 0.040196754,\n", + " 0.014106638, -0.014575557, -0.0017356506, -0.011234511, -0.012517525,\n", + " 0.008362384, 0.01253055, 0.036158845, 0.008297256, -0.0010908874,\n", + " -0.014888169, -0.020489143, 0.018965157, -0.057937514, -0.0037122732,\n", + " 0.004402626, -0.00840146, 0.042984217, -0.04936672, -0.03714878,\n", + " 0.004969236, 0.03707063, 0.015396165, -0.02055427, 0.01988997,\n", + " 0.030219207, -0.021257648, 0.01340326, 0.003692735, 0.012595678\n", + "]\n", + "[\n", + " -0.033366997, 0.010419146, 0.0118083665, -0.040441725, 0.0020355924,\n", + " -0.015808804, -0.023629595, -0.0066180876, -0.040004376, 0.020053642,\n", + " -0.0010797002, -0.03900105, -0.009956073, 0.0027896944, 0.003305828,\n", + " -0.034010153, 0.009833873, 0.0061164247, 0.022536227, 0.029147884,\n", + " 0.017789727, 0.03182342, 0.010869357, 0.031849146, -0.028093107,\n", + " 0.008283865, -0.0145610785, 0.01645196, -0.029430874, -0.02508313,\n", + " 0.046178687, -0.01722375, -0.010046115, 0.013101112, 0.0044538635,\n", + " 0.02197025, 0.03985002, 0.007955855, 0.0008819293, 0.012657333,\n", + " 0.014368132, -0.014007963, -0.03722594, 0.031617608, -0.011570398,\n", + " 0.039052505, 0.0020018267, 0.023706773, -0.0046950476, 0.056083307,\n", + " -0.08412496, -0.043425974, -0.015512952, 0.015950298, -0.03624834,\n", + " -0.0053317733, -0.037251666, 0.0046339477, 0.04193385, 0.023475237,\n", + " -0.021378545, 0.013699248, -0.026009277, 0.050757967, -0.0494202,\n", + " 0.0007874656, -0.07208506, 0.015885983, -0.003259199, 0.015127057,\n", + " 0.0068946453, -0.035373647, -0.005875241, -0.0032238255, -0.04185667,\n", + " -0.022047428, 0.0014326327, -0.0070940237, -0.0027864785, -0.016271876,\n", + " 0.005097021, 0.034473225, 0.012361481, -0.026498076, 0.0067274245,\n", + " -0.026330855, -0.006132504, 0.008180959, -0.049368747, -0.032337945,\n", + " 0.011049441, 0.00186194, -0.012097787, 0.01930758, 0.07059293,\n", + " 0.029713862, 0.04337452, -0.0048461896, -0.019976463, 0.011473924\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "2d918b72", + "metadata": {}, + "source": [ + "## Using Azure Managed Identity\n", + "\n", + "If you're using Azure Managed Identity, you can configure the credentials like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29ccc1e1", + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " DefaultAzureCredential,\n", + " getBearerTokenProvider,\n", + "} from \"@azure/identity\";\n", + "import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const credentials = new DefaultAzureCredential();\n", + "const azureADTokenProvider = getBearerTokenProvider(\n", + " credentials,\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ");\n", + "\n", + "const modelWithManagedIdentity = new AzureOpenAIEmbeddings({\n", + " azureADTokenProvider,\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiEmbeddingsDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "1909f724", + "metadata": {}, + "source": [ + "## Using a different domain\n", + "\n", + "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", + "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b4b16f32", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddingsDifferentDomain = new AzureOpenAIEmbeddings({\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiEmbeddingsDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " azureOpenAIBasePath:\n", + " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "77960c17", + "metadata": {}, + "source": [ + "## Custom headers\n", + "\n", + "You can specify custom headers by passing in a `configuration` field:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7aad2a2", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddingsWithCustomHeaders = new AzureOpenAIEmbeddings({\n", + " azureOpenAIApiKey: \"\",\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiEmbeddingsDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + " configuration: {\n", + " defaultHeaders: {\n", + " \"x-custom-header\": `SOME_VALUE`,\n", + " },\n", + " },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "225e191b", + "metadata": {}, + "source": [ + "The `configuration` field also accepts other `ClientOptions` parameters accepted by the official SDK.\n", + "\n", + "**Note:** The specific header `api-key` currently cannot be overridden in this manner and will pass through the value from `azureOpenAIApiKey`." + ] + }, + { + "cell_type": "markdown", + "id": "7b2e885a", + "metadata": {}, + "source": [ + "## Migration from Azure OpenAI SDK\n", + "\n", + "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", + "\n", + "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", + " ```bash npm2yarn\n", + " npm install @langchain/openai\n", + " npm uninstall @langchain/azure-openai\n", + " ```\n", + "2. Update your imports to use the new `AzureOpenAIEmbeddings` classe from the `@langchain/openai` package:\n", + " ```typescript\n", + " import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", + " ```\n", + "3. Update your code to use the new `AzureOpenAIEmbeddings` class and pass the required parameters:\n", + "\n", + " ```typescript\n", + " const model = new AzureOpenAIEmbeddings({\n", + " azureOpenAIApiKey: \"\",\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiEmbeddingsDeploymentName:\n", + " \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + " });\n", + " ```\n", + "\n", + " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", + "\n", + " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", + "\n", + " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all AzureOpenAIEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureOpenAIEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "2d918b72", - "metadata": {}, - "source": [ - "## Using Azure Managed Identity\n", - "\n", - "If you're using Azure Managed Identity, you can configure the credentials like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29ccc1e1", - "metadata": {}, - "outputs": [], - "source": [ - "import {\n", - " DefaultAzureCredential,\n", - " getBearerTokenProvider,\n", - "} from \"@azure/identity\";\n", - "import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const credentials = new DefaultAzureCredential();\n", - "const azureADTokenProvider = getBearerTokenProvider(\n", - " credentials,\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ");\n", - "\n", - "const modelWithManagedIdentity = new AzureOpenAIEmbeddings({\n", - " azureADTokenProvider,\n", - " azureOpenAIApiInstanceName: \"\",\n", - " azureOpenAIApiEmbeddingsDeploymentName: \"\",\n", - " azureOpenAIApiVersion: \"\",\n", - "});\n" - ] - }, - { - "cell_type": "markdown", - "id": "1909f724", - "metadata": {}, - "source": [ - "## Using a different domain\n", - "\n", - "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", - "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b4b16f32", - "metadata": {}, - "outputs": [], - "source": [ - "import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddingsDifferentDomain = new AzureOpenAIEmbeddings({\n", - " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", - " azureOpenAIApiEmbeddingsDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME\n", - " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", - " azureOpenAIBasePath:\n", - " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", - "});\n" - ] - }, - { - "cell_type": "markdown", - "id": "77960c17", - "metadata": {}, - "source": [ - "## Custom headers\n", - "\n", - "You can specify custom headers by passing in a `configuration` field:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a7aad2a2", - "metadata": {}, - "outputs": [], - "source": [ - "import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddingsWithCustomHeaders = new AzureOpenAIEmbeddings({\n", - " azureOpenAIApiKey: \"\",\n", - " azureOpenAIApiInstanceName: \"\",\n", - " azureOpenAIApiEmbeddingsDeploymentName: \"\",\n", - " azureOpenAIApiVersion: \"\",\n", - " configuration: {\n", - " defaultHeaders: {\n", - " \"x-custom-header\": `SOME_VALUE`,\n", - " },\n", - " },\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "225e191b", - "metadata": {}, - "source": [ - "The `configuration` field also accepts other `ClientOptions` parameters accepted by the official SDK.\n", - "\n", - "**Note:** The specific header `api-key` currently cannot be overridden in this manner and will pass through the value from `azureOpenAIApiKey`." - ] - }, - { - "cell_type": "markdown", - "id": "7b2e885a", - "metadata": {}, - "source": [ - "## Migration from Azure OpenAI SDK\n", - "\n", - "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", - "\n", - "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", - " ```bash npm2yarn\n", - " npm install @langchain/openai\n", - " npm uninstall @langchain/azure-openai\n", - " ```\n", - "2. Update your imports to use the new `AzureOpenAIEmbeddings` classe from the `@langchain/openai` package:\n", - " ```typescript\n", - " import { AzureOpenAIEmbeddings } from \"@langchain/openai\";\n", - " ```\n", - "3. Update your code to use the new `AzureOpenAIEmbeddings` class and pass the required parameters:\n", - "\n", - " ```typescript\n", - " const model = new AzureOpenAIEmbeddings({\n", - " azureOpenAIApiKey: \"\",\n", - " azureOpenAIApiInstanceName: \"\",\n", - " azureOpenAIApiEmbeddingsDeploymentName:\n", - " \"\",\n", - " azureOpenAIApiVersion: \"\",\n", - " });\n", - " ```\n", - "\n", - " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", - "\n", - " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", - "\n", - " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all AzureOpenAIEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureOpenAIEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/baidu_qianfan.mdx b/docs/core_docs/docs/integrations/text_embedding/baidu_qianfan.mdx index 64697325241d..a888cd077eb9 100644 --- a/docs/core_docs/docs/integrations/text_embedding/baidu_qianfan.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/baidu_qianfan.mdx @@ -27,5 +27,5 @@ import BaiduQianFanExample from "@examples/embeddings/baidu_qianfan.ts"; ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/bedrock.ipynb b/docs/core_docs/docs/integrations/text_embedding/bedrock.ipynb index 73986c7f7bbd..7d597e9c37d8 100644 --- a/docs/core_docs/docs/integrations/text_embedding/bedrock.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/bedrock.ipynb @@ -1,343 +1,343 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Amazon Bedrock\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# BedrockEmbeddings\n", - "\n", - "[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI.\n", - "\n", - "This will help you get started with Amazon Bedrock [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `Bedrock` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_aws.BedrockEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/bedrock/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [Bedrock](https://api.js.langchain.com/classes/langchain_aws.BedrockEmbeddings.html) | [@langchain/aws](https://api.js.langchain.com/modules/langchain_aws.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/aws?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/aws?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Bedrock embedding models you'll need to create an AWS account, get an API key, and install the `@langchain/aws` integration package.\n", - "\n", - "Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html) to sign up for AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by [following these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html).\n", - "\n", - "### Credentials\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain Bedrock integration lives in the `@langchain/aws` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/aws @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and embed text.\n", - "\n", - "There are a few different ways to authenticate with AWS - the below examples rely on an access key, secret access key and region set in your environment variables:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { BedrockEmbeddings } from \"@langchain/aws\";\n", - "\n", - "const embeddings = new BedrockEmbeddings({\n", - " region: process.env.BEDROCK_AWS_REGION!,\n", - " credentials: {\n", - " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,\n", - " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,\n", - " },\n", - " model: \"amazon.titan-embed-text-v1\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Amazon Bedrock\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " 0.625, 0.111328125, 0.265625, -0.20019531, 0.40820312,\n", - " -0.010803223, -0.22460938, -0.0002937317, 0.29882812, -0.14355469,\n", - " -0.068847656, -0.3984375, 0.75, -0.1953125, -0.5546875,\n", - " -0.087402344, 0.5625, 1.390625, -0.3515625, 0.39257812,\n", - " -0.061767578, 0.65625, -0.36328125, -0.06591797, 0.234375,\n", - " -0.36132812, 0.42382812, -0.115234375, -0.28710938, -0.29296875,\n", - " -0.765625, -0.16894531, 0.23046875, 0.6328125, -0.08544922,\n", - " 0.13671875, 0.0004272461, 0.3125, 0.12207031, -0.546875,\n", - " 0.14257812, -0.119628906, -0.111328125, 0.61328125, 0.6875,\n", - " 0.3671875, -0.2578125, -0.27734375, 0.703125, 0.203125,\n", - " 0.17675781, -0.26757812, -0.76171875, 0.71484375, 0.77734375,\n", - " -0.1953125, -0.007232666, -0.044921875, 0.23632812, -0.24121094,\n", - " -0.012207031, 0.5078125, 0.08984375, 0.56640625, -0.3046875,\n", - " 0.6484375, -0.25, -0.37890625, -0.2421875, 0.38476562,\n", - " -0.18164062, -0.05810547, 0.7578125, 0.04296875, 0.609375,\n", - " 0.50390625, 0.023803711, -0.23046875, 0.099121094, 0.79296875,\n", - " -1.296875, 0.671875, -0.66796875, 0.43359375, 0.087890625,\n", - " 0.14550781, -0.37304688, -0.068359375, 0.00012874603, -0.47265625,\n", - " -0.765625, 0.07861328, -0.029663086, 0.076660156, -0.32617188,\n", - " -0.453125, -0.5546875, -0.45703125, 1.1015625, -0.29492188\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# BedrockEmbeddings\n", + "\n", + "[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI.\n", + "\n", + "This will help you get started with Amazon Bedrock [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `Bedrock` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_aws.BedrockEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/bedrock/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [Bedrock](https://api.js.langchain.com/classes/langchain_aws.BedrockEmbeddings.html) | [@langchain/aws](https://api.js.langchain.com/modules/langchain_aws.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/aws?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/aws?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Bedrock embedding models you'll need to create an AWS account, get an API key, and install the `@langchain/aws` integration package.\n", + "\n", + "Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html) to sign up for AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by [following these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html).\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Bedrock integration lives in the `@langchain/aws` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/aws @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and embed text.\n", + "\n", + "There are a few different ways to authenticate with AWS - the below examples rely on an access key, secret access key and region set in your environment variables:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " 0.625, 0.111328125, 0.265625, -0.20019531, 0.40820312,\n", - " -0.010803223, -0.22460938, -0.0002937317, 0.29882812, -0.14355469,\n", - " -0.068847656, -0.3984375, 0.75, -0.1953125, -0.5546875,\n", - " -0.087402344, 0.5625, 1.390625, -0.3515625, 0.39257812,\n", - " -0.061767578, 0.65625, -0.36328125, -0.06591797, 0.234375,\n", - " -0.36132812, 0.42382812, -0.115234375, -0.28710938, -0.29296875,\n", - " -0.765625, -0.16894531, 0.23046875, 0.6328125, -0.08544922,\n", - " 0.13671875, 0.0004272461, 0.3125, 0.12207031, -0.546875,\n", - " 0.14257812, -0.119628906, -0.111328125, 0.61328125, 0.6875,\n", - " 0.3671875, -0.2578125, -0.27734375, 0.703125, 0.203125,\n", - " 0.17675781, -0.26757812, -0.76171875, 0.71484375, 0.77734375,\n", - " -0.1953125, -0.007232666, -0.044921875, 0.23632812, -0.24121094,\n", - " -0.012207031, 0.5078125, 0.08984375, 0.56640625, -0.3046875,\n", - " 0.6484375, -0.25, -0.37890625, -0.2421875, 0.38476562,\n", - " -0.18164062, -0.05810547, 0.7578125, 0.04296875, 0.609375,\n", - " 0.50390625, 0.023803711, -0.23046875, 0.099121094, 0.79296875,\n", - " -1.296875, 0.671875, -0.66796875, 0.43359375, 0.087890625,\n", - " 0.14550781, -0.37304688, -0.068359375, 0.00012874603, -0.47265625,\n", - " -0.765625, 0.07861328, -0.029663086, 0.076660156, -0.32617188,\n", - " -0.453125, -0.5546875, -0.45703125, 1.1015625, -0.29492188\n", - "]\n", - "[\n", - " 0.65625, 0.48242188, 0.70703125, -0.13378906, 0.859375,\n", - " 0.2578125, -0.13378906, -0.0002670288, -0.34375, 0.25585938,\n", - " -0.33984375, -0.26367188, 0.828125, -0.23242188, -0.61328125,\n", - " 0.12695312, 0.43359375, 1.3828125, -0.099121094, 0.3203125,\n", - " -0.34765625, 0.35351562, -0.28710938, 0.009521484, 0.083496094,\n", - " 0.040283203, -0.25390625, 0.17871094, 0.044189453, -0.19628906,\n", - " 0.45898438, 0.21191406, 0.67578125, 0.8359375, -0.29101562,\n", - " 0.021118164, 0.13671875, 0.083984375, 0.34570312, 0.30859375,\n", - " -0.001625061, 0.31835938, -0.18164062, -0.0058288574, 0.22460938,\n", - " 0.26757812, -0.09082031, 0.17480469, 1.4921875, -0.24316406,\n", - " 0.36523438, 0.14550781, -0.609375, 0.33007812, 0.10595703,\n", - " 0.3671875, 0.18359375, -0.62109375, 0.51171875, 0.024047852,\n", - " 0.092285156, -0.44335938, 0.4921875, 0.609375, -0.48242188,\n", - " 0.796875, -0.47851562, -0.53125, -0.66796875, 0.68359375,\n", - " -0.16796875, 0.110839844, 0.84765625, 0.703125, 0.8671875,\n", - " 0.37695312, -0.0022888184, -0.30664062, 0.3671875, 0.16503906,\n", - " -0.59765625, 0.3203125, -0.34375, 0.08251953, 0.890625,\n", - " 0.38476562, -0.24707031, -0.125, 0.00013160706, -0.69921875,\n", - " -0.53125, 0.052490234, 0.27734375, 0.42773438, -0.38867188,\n", - " -0.2578125, -0.25, -0.46875, 0.828125, -0.94140625\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { BedrockEmbeddings } from \"@langchain/aws\";\n", + "\n", + "const embeddings = new BedrockEmbeddings({\n", + " region: process.env.BEDROCK_AWS_REGION!,\n", + " credentials: {\n", + " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,\n", + " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,\n", + " },\n", + " model: \"amazon.titan-embed-text-v1\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " 0.625, 0.111328125, 0.265625, -0.20019531, 0.40820312,\n", + " -0.010803223, -0.22460938, -0.0002937317, 0.29882812, -0.14355469,\n", + " -0.068847656, -0.3984375, 0.75, -0.1953125, -0.5546875,\n", + " -0.087402344, 0.5625, 1.390625, -0.3515625, 0.39257812,\n", + " -0.061767578, 0.65625, -0.36328125, -0.06591797, 0.234375,\n", + " -0.36132812, 0.42382812, -0.115234375, -0.28710938, -0.29296875,\n", + " -0.765625, -0.16894531, 0.23046875, 0.6328125, -0.08544922,\n", + " 0.13671875, 0.0004272461, 0.3125, 0.12207031, -0.546875,\n", + " 0.14257812, -0.119628906, -0.111328125, 0.61328125, 0.6875,\n", + " 0.3671875, -0.2578125, -0.27734375, 0.703125, 0.203125,\n", + " 0.17675781, -0.26757812, -0.76171875, 0.71484375, 0.77734375,\n", + " -0.1953125, -0.007232666, -0.044921875, 0.23632812, -0.24121094,\n", + " -0.012207031, 0.5078125, 0.08984375, 0.56640625, -0.3046875,\n", + " 0.6484375, -0.25, -0.37890625, -0.2421875, 0.38476562,\n", + " -0.18164062, -0.05810547, 0.7578125, 0.04296875, 0.609375,\n", + " 0.50390625, 0.023803711, -0.23046875, 0.099121094, 0.79296875,\n", + " -1.296875, 0.671875, -0.66796875, 0.43359375, 0.087890625,\n", + " 0.14550781, -0.37304688, -0.068359375, 0.00012874603, -0.47265625,\n", + " -0.765625, 0.07861328, -0.029663086, 0.076660156, -0.32617188,\n", + " -0.453125, -0.5546875, -0.45703125, 1.1015625, -0.29492188\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " 0.625, 0.111328125, 0.265625, -0.20019531, 0.40820312,\n", + " -0.010803223, -0.22460938, -0.0002937317, 0.29882812, -0.14355469,\n", + " -0.068847656, -0.3984375, 0.75, -0.1953125, -0.5546875,\n", + " -0.087402344, 0.5625, 1.390625, -0.3515625, 0.39257812,\n", + " -0.061767578, 0.65625, -0.36328125, -0.06591797, 0.234375,\n", + " -0.36132812, 0.42382812, -0.115234375, -0.28710938, -0.29296875,\n", + " -0.765625, -0.16894531, 0.23046875, 0.6328125, -0.08544922,\n", + " 0.13671875, 0.0004272461, 0.3125, 0.12207031, -0.546875,\n", + " 0.14257812, -0.119628906, -0.111328125, 0.61328125, 0.6875,\n", + " 0.3671875, -0.2578125, -0.27734375, 0.703125, 0.203125,\n", + " 0.17675781, -0.26757812, -0.76171875, 0.71484375, 0.77734375,\n", + " -0.1953125, -0.007232666, -0.044921875, 0.23632812, -0.24121094,\n", + " -0.012207031, 0.5078125, 0.08984375, 0.56640625, -0.3046875,\n", + " 0.6484375, -0.25, -0.37890625, -0.2421875, 0.38476562,\n", + " -0.18164062, -0.05810547, 0.7578125, 0.04296875, 0.609375,\n", + " 0.50390625, 0.023803711, -0.23046875, 0.099121094, 0.79296875,\n", + " -1.296875, 0.671875, -0.66796875, 0.43359375, 0.087890625,\n", + " 0.14550781, -0.37304688, -0.068359375, 0.00012874603, -0.47265625,\n", + " -0.765625, 0.07861328, -0.029663086, 0.076660156, -0.32617188,\n", + " -0.453125, -0.5546875, -0.45703125, 1.1015625, -0.29492188\n", + "]\n", + "[\n", + " 0.65625, 0.48242188, 0.70703125, -0.13378906, 0.859375,\n", + " 0.2578125, -0.13378906, -0.0002670288, -0.34375, 0.25585938,\n", + " -0.33984375, -0.26367188, 0.828125, -0.23242188, -0.61328125,\n", + " 0.12695312, 0.43359375, 1.3828125, -0.099121094, 0.3203125,\n", + " -0.34765625, 0.35351562, -0.28710938, 0.009521484, 0.083496094,\n", + " 0.040283203, -0.25390625, 0.17871094, 0.044189453, -0.19628906,\n", + " 0.45898438, 0.21191406, 0.67578125, 0.8359375, -0.29101562,\n", + " 0.021118164, 0.13671875, 0.083984375, 0.34570312, 0.30859375,\n", + " -0.001625061, 0.31835938, -0.18164062, -0.0058288574, 0.22460938,\n", + " 0.26757812, -0.09082031, 0.17480469, 1.4921875, -0.24316406,\n", + " 0.36523438, 0.14550781, -0.609375, 0.33007812, 0.10595703,\n", + " 0.3671875, 0.18359375, -0.62109375, 0.51171875, 0.024047852,\n", + " 0.092285156, -0.44335938, 0.4921875, 0.609375, -0.48242188,\n", + " 0.796875, -0.47851562, -0.53125, -0.66796875, 0.68359375,\n", + " -0.16796875, 0.110839844, 0.84765625, 0.703125, 0.8671875,\n", + " 0.37695312, -0.0022888184, -0.30664062, 0.3671875, 0.16503906,\n", + " -0.59765625, 0.3203125, -0.34375, 0.08251953, 0.890625,\n", + " 0.38476562, -0.24707031, -0.125, 0.00013160706, -0.69921875,\n", + " -0.53125, 0.052490234, 0.27734375, 0.42773438, -0.38867188,\n", + " -0.2578125, -0.25, -0.46875, 0.828125, -0.94140625\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1d337b4e", + "metadata": {}, + "source": [ + "## Configuring the Bedrock Runtime Client\n", + "\n", + "You can pass in your own instance of the `BedrockRuntimeClient` if you want to customize options like\n", + "`credentials`, `region`, `retryPolicy`, etc." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c3eb2444", + "metadata": {}, + "outputs": [], + "source": [ + "import { BedrockRuntimeClient } from \"@aws-sdk/client-bedrock-runtime\";\n", + "import { BedrockEmbeddings } from \"@langchain/aws\";\n", + "\n", + "const getCredentials = () => {\n", + " // do something to get credentials\n", + "}\n", + "\n", + "// @lc-ts-ignore\n", + "const client = new BedrockRuntimeClient({\n", + " region: \"us-east-1\",\n", + " credentials: getCredentials(),\n", + "});\n", + "\n", + "const embeddingsWithCustomClient = new BedrockEmbeddings({\n", + " client,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all Bedrock features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_aws.BedrockEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1d337b4e", - "metadata": {}, - "source": [ - "## Configuring the Bedrock Runtime Client\n", - "\n", - "You can pass in your own instance of the `BedrockRuntimeClient` if you want to customize options like\n", - "`credentials`, `region`, `retryPolicy`, etc." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c3eb2444", - "metadata": {}, - "outputs": [], - "source": [ - "import { BedrockRuntimeClient } from \"@aws-sdk/client-bedrock-runtime\";\n", - "import { BedrockEmbeddings } from \"@langchain/aws\";\n", - "\n", - "const getCredentials = () => {\n", - " // do something to get credentials\n", - "}\n", - "\n", - "// @lc-ts-ignore\n", - "const client = new BedrockRuntimeClient({\n", - " region: \"us-east-1\",\n", - " credentials: getCredentials(),\n", - "});\n", - "\n", - "const embeddingsWithCustomClient = new BedrockEmbeddings({\n", - " client,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all Bedrock features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_aws.BedrockEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.ipynb b/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.ipynb index d7973063e516..21cc3a3dbe65 100644 --- a/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/cloudflare_ai.ipynb @@ -1,191 +1,191 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Cloudflare Workers AI\n", + "lc_docs_skip_validation: true\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# CloudflareWorkersAIEmbeddings\n", + "\n", + "This will help you get started with Cloudflare Workers AI [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `CloudflareWorkersAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAIEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Py support | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [`CloudflareWorkersAIEmbeddings`](https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAIEmbeddings.html) | [`@langchain/cloudflare`](https://npmjs.com/@langchain/cloudflare) | ❌ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cloudflare?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cloudflare?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Cloudflare embedding models you'll need to create a Cloudflare account and install the `@langchain/cloudflare` integration package. This integration is made to run in a Cloudflare worker and accept a binding.\n", + "\n", + "Follow [the official docs](https://developers.cloudflare.com/workers-ai/get-started/workers-wrangler/) to set up your worker.\n", + "\n", + "Your `wrangler.toml` file should look similar to this:\n", + "\n", + "```toml\n", + "name = \"langchain-test\"\n", + "main = \"worker.js\"\n", + "compatibility_date = \"2024-01-10\"\n", + "\n", + "[[vectorize]]\n", + "binding = \"VECTORIZE_INDEX\"\n", + "index_name = \"langchain-test\"\n", + "\n", + "[ai]\n", + "binding = \"AI\"\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain CloudflareWorkersAIEmbeddings integration lives in the `@langchain/cloudflare` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/cloudflare @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Usage\n", + "\n", + "Below is an example worker that uses Workers AI embeddings with a [Cloudflare Vectorize vectorstore](/docs/integrations/vectorstores/cloudflare_vectorize/)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "// @ts-nocheck\n", + "\n", + "import type {\n", + " VectorizeIndex,\n", + " Fetcher,\n", + " Request,\n", + "} from \"@cloudflare/workers-types\";\n", + "\n", + "import {\n", + " CloudflareVectorizeStore,\n", + " CloudflareWorkersAIEmbeddings,\n", + "} from \"@langchain/cloudflare\";\n", + "\n", + "export interface Env {\n", + " VECTORIZE_INDEX: VectorizeIndex;\n", + " AI: Fetcher;\n", + "}\n", + "\n", + "export default {\n", + " async fetch(request: Request, env: Env) {\n", + " const { pathname } = new URL(request.url);\n", + " const embeddings = new CloudflareWorkersAIEmbeddings({\n", + " binding: env.AI,\n", + " model: \"@cf/baai/bge-small-en-v1.5\",\n", + " });\n", + " const store = new CloudflareVectorizeStore(embeddings, {\n", + " index: env.VECTORIZE_INDEX,\n", + " });\n", + " if (pathname === \"/\") {\n", + " const results = await store.similaritySearch(\"hello\", 5);\n", + " return Response.json(results);\n", + " } else if (pathname === \"/load\") {\n", + " // Upsertion by id is supported\n", + " await store.addDocuments(\n", + " [\n", + " {\n", + " pageContent: \"hello\",\n", + " metadata: {},\n", + " },\n", + " {\n", + " pageContent: \"world\",\n", + " metadata: {},\n", + " },\n", + " {\n", + " pageContent: \"hi\",\n", + " metadata: {},\n", + " },\n", + " ],\n", + " { ids: [\"id1\", \"id2\", \"id3\"] }\n", + " );\n", + "\n", + " return Response.json({ success: true });\n", + " } else if (pathname === \"/clear\") {\n", + " await store.delete({ ids: [\"id1\", \"id2\", \"id3\"] });\n", + " return Response.json({ success: true });\n", + " }\n", + "\n", + " return Response.json({ error: \"Not Found\" }, { status: 404 });\n", + " },\n", + "};" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `CloudflareWorkersAIEmbeddings` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAIEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "typescript", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.5" } - }, - "source": [ - "---\n", - "sidebar_label: Cloudflare Workers AI\n", - "lc_docs_skip_validation: true\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# CloudflareWorkersAIEmbeddings\n", - "\n", - "This will help you get started with Cloudflare Workers AI [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `CloudflareWorkersAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAIEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | Py support | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [`CloudflareWorkersAIEmbeddings`](https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAIEmbeddings.html) | [`@langchain/cloudflare`](https://npmjs.com/@langchain/cloudflare) | ❌ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cloudflare?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cloudflare?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Cloudflare embedding models you'll need to create a Cloudflare account and install the `@langchain/cloudflare` integration package. This integration is made to run in a Cloudflare worker and accept a binding.\n", - "\n", - "Follow [the official docs](https://developers.cloudflare.com/workers-ai/get-started/workers-wrangler/) to set up your worker.\n", - "\n", - "Your `wrangler.toml` file should look similar to this:\n", - "\n", - "```toml\n", - "name = \"langchain-test\"\n", - "main = \"worker.js\"\n", - "compatibility_date = \"2024-01-10\"\n", - "\n", - "[[vectorize]]\n", - "binding = \"VECTORIZE_INDEX\"\n", - "index_name = \"langchain-test\"\n", - "\n", - "[ai]\n", - "binding = \"AI\"\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain CloudflareWorkersAIEmbeddings integration lives in the `@langchain/cloudflare` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/cloudflare @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Usage\n", - "\n", - "Below is an example worker that uses Workers AI embeddings with a [Cloudflare Vectorize vectorstore](/docs/integrations/vectorstores/cloudflare_vectorize/)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "// @ts-nocheck\n", - "\n", - "import type {\n", - " VectorizeIndex,\n", - " Fetcher,\n", - " Request,\n", - "} from \"@cloudflare/workers-types\";\n", - "\n", - "import {\n", - " CloudflareVectorizeStore,\n", - " CloudflareWorkersAIEmbeddings,\n", - "} from \"@langchain/cloudflare\";\n", - "\n", - "export interface Env {\n", - " VECTORIZE_INDEX: VectorizeIndex;\n", - " AI: Fetcher;\n", - "}\n", - "\n", - "export default {\n", - " async fetch(request: Request, env: Env) {\n", - " const { pathname } = new URL(request.url);\n", - " const embeddings = new CloudflareWorkersAIEmbeddings({\n", - " binding: env.AI,\n", - " model: \"@cf/baai/bge-small-en-v1.5\",\n", - " });\n", - " const store = new CloudflareVectorizeStore(embeddings, {\n", - " index: env.VECTORIZE_INDEX,\n", - " });\n", - " if (pathname === \"/\") {\n", - " const results = await store.similaritySearch(\"hello\", 5);\n", - " return Response.json(results);\n", - " } else if (pathname === \"/load\") {\n", - " // Upsertion by id is supported\n", - " await store.addDocuments(\n", - " [\n", - " {\n", - " pageContent: \"hello\",\n", - " metadata: {},\n", - " },\n", - " {\n", - " pageContent: \"world\",\n", - " metadata: {},\n", - " },\n", - " {\n", - " pageContent: \"hi\",\n", - " metadata: {},\n", - " },\n", - " ],\n", - " { ids: [\"id1\", \"id2\", \"id3\"] }\n", - " );\n", - "\n", - " return Response.json({ success: true });\n", - " } else if (pathname === \"/clear\") {\n", - " await store.delete({ ids: [\"id1\", \"id2\", \"id3\"] });\n", - " return Response.json({ success: true });\n", - " }\n", - "\n", - " return Response.json({ error: \"Not Found\" }, { status: 404 });\n", - " },\n", - "};" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `CloudflareWorkersAIEmbeddings` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cloudflare.CloudflareWorkersAIEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "typescript", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/cohere.ipynb b/docs/core_docs/docs/integrations/text_embedding/cohere.ipynb index 2e0ab4847719..209aa6352f5d 100644 --- a/docs/core_docs/docs/integrations/text_embedding/cohere.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/cohere.ipynb @@ -1,339 +1,339 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Cohere\n", - "lc_docs_skip_validation: true\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# CohereEmbeddings\n", - "\n", - "This will help you get started with CohereEmbeddings [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `CohereEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.CohereEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/cohere/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [CohereEmbeddings](https://api.js.langchain.com/classes/langchain_cohere.CohereEmbeddings.html) | [@langchain/cohere](https://api.js.langchain.com/modules/langchain_cohere.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Cohere embedding models you'll need to create a Cohere account, get an API key, and install the `@langchain/cohere` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [cohere.com](https://cohere.com) to sign up to `Cohere` and generate an API key. Once you've done this set the `COHERE_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export COHERE_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain CohereEmbeddings integration lives in the `@langchain/cohere` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/cohere @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { CohereEmbeddings } from \"@langchain/cohere\";\n", - "\n", - "const embeddings = new CohereEmbeddings({\n", - " apiKey: \"YOUR-API-KEY\", // In Node.js defaults to process.env.COHERE_API_KEY\n", - " batchSize: 48, // Default value if omitted is 48. Max value is 96\n", - " model: \"embed-english-v3.0\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "b6470d5e", - "metadata": {}, - "source": [ - "### Custom client for Cohere on Azure, Cohere on AWS Bedrock, and Standalone Cohere Instance.\n", - "\n", - "We can instantiate a custom `CohereClient` and pass it to the ChatCohere constructor.\n", - "\n", - "**Note:** If a custom client is provided both `COHERE_API_KEY` environment variable and apiKey parameter in the constructor will be ignored" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a241b0f0", - "metadata": {}, - "outputs": [], - "source": [ - "import { CohereEmbeddings } from \"@langchain/cohere\";\n", - "import { CohereClient } from \"cohere-ai\";\n", - "\n", - "const client = new CohereClient({\n", - " token: \"\",\n", - " environment: \"\", //optional\n", - " // other params\n", - "});\n", - "\n", - "const embeddingsWithCustomClient = new CohereEmbeddings({\n", - " client,\n", - " // other params...\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Cohere\n", + "lc_docs_skip_validation: true\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.022979736, -0.030212402, -0.08886719, -0.08569336, 0.007030487,\n", - " -0.0010671616, -0.033813477, 0.08843994, 0.0119018555, 0.049926758,\n", - " -0.03616333, 0.007408142, 0.00034809113, -0.005744934, -0.016021729,\n", - " -0.015296936, -0.0011606216, -0.02458191, -0.044006348, -0.0335083,\n", - " 0.024658203, -0.051086426, 0.0020427704, 0.06298828, 0.020507812,\n", - " 0.037475586, 0.05117798, 0.0059814453, 0.025360107, 0.0060577393,\n", - " 0.02255249, -0.070129395, 0.024017334, 0.022766113, -0.042755127,\n", - " -0.024673462, -0.0236969, -0.0073623657, 0.002161026, 0.011329651,\n", - " 0.038330078, -0.03050232, 0.0022201538, -0.007911682, -0.0023536682,\n", - " 0.029937744, -0.027297974, -0.064086914, 0.027267456, 0.016738892,\n", - " 0.0028972626, 0.015510559, -0.01725769, 0.011497498, -0.012954712,\n", - " 0.002380371, -0.03366089, -0.02746582, 0.014022827, 0.04196167,\n", - " 0.007698059, -0.027069092, 0.025405884, -0.029815674, 0.013298035,\n", - " 0.01737976, 0.07269287, 0.017822266, 0.0012550354, -0.009597778,\n", - " -0.02961731, 0.0049057007, 0.01965332, -0.009994507, -0.019561768,\n", - " -0.004764557, 0.019317627, -0.0045433044, 0.031143188, -0.018188477,\n", - " -0.0026893616, 0.0050964355, -0.044189453, 0.02029419, -0.019088745,\n", - " 0.02166748, -0.011657715, -0.025405884, -0.028030396, -0.0051460266,\n", - " -0.010818481, -0.000364542, -0.028686523, 0.015029907, 0.0013790131,\n", - " -0.0069770813, -0.030639648, -0.051208496, 0.005279541, -0.0109939575\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# CohereEmbeddings\n", + "\n", + "This will help you get started with CohereEmbeddings [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `CohereEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.CohereEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/cohere/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [CohereEmbeddings](https://api.js.langchain.com/classes/langchain_cohere.CohereEmbeddings.html) | [@langchain/cohere](https://api.js.langchain.com/modules/langchain_cohere.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Cohere embedding models you'll need to create a Cohere account, get an API key, and install the `@langchain/cohere` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [cohere.com](https://cohere.com) to sign up to `Cohere` and generate an API key. Once you've done this set the `COHERE_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export COHERE_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain CohereEmbeddings integration lives in the `@langchain/cohere` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/cohere @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { CohereEmbeddings } from \"@langchain/cohere\";\n", + "\n", + "const embeddings = new CohereEmbeddings({\n", + " apiKey: \"YOUR-API-KEY\", // In Node.js defaults to process.env.COHERE_API_KEY\n", + " batchSize: 48, // Default value if omitted is 48. Max value is 96\n", + " model: \"embed-english-v3.0\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "b6470d5e", + "metadata": {}, + "source": [ + "### Custom client for Cohere on Azure, Cohere on AWS Bedrock, and Standalone Cohere Instance.\n", + "\n", + "We can instantiate a custom `CohereClient` and pass it to the ChatCohere constructor.\n", + "\n", + "**Note:** If a custom client is provided both `COHERE_API_KEY` environment variable and apiKey parameter in the constructor will be ignored" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a241b0f0", + "metadata": {}, + "outputs": [], + "source": [ + "import { CohereEmbeddings } from \"@langchain/cohere\";\n", + "import { CohereClient } from \"cohere-ai\";\n", + "\n", + "const client = new CohereClient({\n", + " token: \"\",\n", + " environment: \"\", //optional\n", + " // other params\n", + "});\n", + "\n", + "const embeddingsWithCustomClient = new CohereEmbeddings({\n", + " client,\n", + " // other params...\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.028869629, -0.030410767, -0.099121094, -0.07116699, -0.012748718,\n", - " -0.0059432983, -0.04360962, 0.07965088, -0.027114868, 0.057403564,\n", - " -0.013549805, 0.014480591, 0.021697998, -0.026870728, 0.0071983337,\n", - " -0.0099105835, -0.0034332275, -0.026031494, -0.05206299, -0.045288086,\n", - " 0.027450562, -0.060333252, -0.019210815, 0.039794922, 0.0055351257,\n", - " 0.046325684, 0.017837524, -0.012619019, 0.023147583, -0.008201599,\n", - " 0.022155762, -0.035888672, 0.016921997, 0.027679443, -0.023605347,\n", - " -0.0022029877, -0.025253296, 0.013076782, 0.0049705505, -0.0024280548,\n", - " 0.021957397, -0.008644104, -0.00004029274, -0.003501892, -0.012641907,\n", - " 0.01600647, -0.014312744, -0.037841797, 0.011764526, -0.019622803,\n", - " -0.01928711, -0.017044067, -0.017547607, 0.028533936, -0.019073486,\n", - " -0.0061073303, -0.024520874, 0.01638794, 0.017852783, -0.0013303757,\n", - " -0.023040771, -0.01713562, 0.027786255, -0.02583313, 0.03060913,\n", - " 0.00013923645, 0.01977539, 0.025283813, -0.00068569183, 0.032806396,\n", - " -0.021392822, -0.016174316, 0.016464233, 0.006023407, -0.0025043488,\n", - " -0.033813477, 0.023269653, 0.012329102, 0.030334473, 0.014419556,\n", - " -0.026245117, -0.018356323, -0.016433716, 0.022628784, -0.024108887,\n", - " 0.02897644, -0.017105103, -0.009208679, -0.015541077, -0.020004272,\n", - " -0.005153656, 0.03741455, -0.050750732, 0.012176514, -0.017501831,\n", - " -0.014503479, 0.0052223206, -0.03250122, 0.008666992, -0.015823364\n", - "]\n", - "[\n", - " -0.047332764, -0.049957275, -0.07458496, -0.034332275, -0.057922363,\n", - " -0.0112838745, -0.06994629, 0.06347656, -0.03326416, 0.019897461,\n", - " 0.0103302, 0.04660034, -0.059753418, -0.027511597, 0.012245178,\n", - " -0.03164673, -0.010215759, -0.00687027, -0.03314209, -0.019866943,\n", - " 0.008399963, -0.042144775, -0.03781128, 0.025970459, 0.007335663,\n", - " 0.04107666, -0.015991211, 0.0158844, -0.008483887, -0.008399963,\n", - " 0.01777649, -0.01109314, 0.01864624, 0.014328003, -0.005264282,\n", - " 0.077697754, 0.017684937, 0.0020427704, 0.032470703, -0.0029354095,\n", - " 0.003063202, 0.0008301735, 0.016281128, -0.005897522, -0.023254395,\n", - " 0.004043579, -0.021987915, -0.015419006, 0.0009803772, 0.044677734,\n", - " -0.0045814514, 0.0039901733, -0.019058228, 0.063964844, -0.012496948,\n", - " -0.027755737, 0.01574707, -0.03781128, 0.0038909912, -0.00002193451,\n", - " 0.00013685226, 0.027832031, 0.015182495, -0.008590698, 0.03933716,\n", - " -0.0020141602, -0.050567627, 0.02017212, 0.020523071, 0.07287598,\n", - " 0.0031375885, -0.05227661, -0.01838684, -0.0019626617, -0.0039482117,\n", - " 0.02494812, 0.0009508133, 0.008583069, 0.02923584, 0.028198242,\n", - " -0.030334473, -0.014076233, -0.017990112, 0.0026245117, -0.017150879,\n", - " 0.004497528, -0.00365448, -0.0012168884, 0.011741638, 0.012886047,\n", - " 0.00084400177, 0.060638428, -0.024002075, 0.022415161, -0.015823364,\n", - " -0.0026760101, 0.028625488, 0.041015625, 0.006893158, -0.01902771\n", - "]\n" - ] + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.022979736, -0.030212402, -0.08886719, -0.08569336, 0.007030487,\n", + " -0.0010671616, -0.033813477, 0.08843994, 0.0119018555, 0.049926758,\n", + " -0.03616333, 0.007408142, 0.00034809113, -0.005744934, -0.016021729,\n", + " -0.015296936, -0.0011606216, -0.02458191, -0.044006348, -0.0335083,\n", + " 0.024658203, -0.051086426, 0.0020427704, 0.06298828, 0.020507812,\n", + " 0.037475586, 0.05117798, 0.0059814453, 0.025360107, 0.0060577393,\n", + " 0.02255249, -0.070129395, 0.024017334, 0.022766113, -0.042755127,\n", + " -0.024673462, -0.0236969, -0.0073623657, 0.002161026, 0.011329651,\n", + " 0.038330078, -0.03050232, 0.0022201538, -0.007911682, -0.0023536682,\n", + " 0.029937744, -0.027297974, -0.064086914, 0.027267456, 0.016738892,\n", + " 0.0028972626, 0.015510559, -0.01725769, 0.011497498, -0.012954712,\n", + " 0.002380371, -0.03366089, -0.02746582, 0.014022827, 0.04196167,\n", + " 0.007698059, -0.027069092, 0.025405884, -0.029815674, 0.013298035,\n", + " 0.01737976, 0.07269287, 0.017822266, 0.0012550354, -0.009597778,\n", + " -0.02961731, 0.0049057007, 0.01965332, -0.009994507, -0.019561768,\n", + " -0.004764557, 0.019317627, -0.0045433044, 0.031143188, -0.018188477,\n", + " -0.0026893616, 0.0050964355, -0.044189453, 0.02029419, -0.019088745,\n", + " 0.02166748, -0.011657715, -0.025405884, -0.028030396, -0.0051460266,\n", + " -0.010818481, -0.000364542, -0.028686523, 0.015029907, 0.0013790131,\n", + " -0.0069770813, -0.030639648, -0.051208496, 0.005279541, -0.0109939575\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.028869629, -0.030410767, -0.099121094, -0.07116699, -0.012748718,\n", + " -0.0059432983, -0.04360962, 0.07965088, -0.027114868, 0.057403564,\n", + " -0.013549805, 0.014480591, 0.021697998, -0.026870728, 0.0071983337,\n", + " -0.0099105835, -0.0034332275, -0.026031494, -0.05206299, -0.045288086,\n", + " 0.027450562, -0.060333252, -0.019210815, 0.039794922, 0.0055351257,\n", + " 0.046325684, 0.017837524, -0.012619019, 0.023147583, -0.008201599,\n", + " 0.022155762, -0.035888672, 0.016921997, 0.027679443, -0.023605347,\n", + " -0.0022029877, -0.025253296, 0.013076782, 0.0049705505, -0.0024280548,\n", + " 0.021957397, -0.008644104, -0.00004029274, -0.003501892, -0.012641907,\n", + " 0.01600647, -0.014312744, -0.037841797, 0.011764526, -0.019622803,\n", + " -0.01928711, -0.017044067, -0.017547607, 0.028533936, -0.019073486,\n", + " -0.0061073303, -0.024520874, 0.01638794, 0.017852783, -0.0013303757,\n", + " -0.023040771, -0.01713562, 0.027786255, -0.02583313, 0.03060913,\n", + " 0.00013923645, 0.01977539, 0.025283813, -0.00068569183, 0.032806396,\n", + " -0.021392822, -0.016174316, 0.016464233, 0.006023407, -0.0025043488,\n", + " -0.033813477, 0.023269653, 0.012329102, 0.030334473, 0.014419556,\n", + " -0.026245117, -0.018356323, -0.016433716, 0.022628784, -0.024108887,\n", + " 0.02897644, -0.017105103, -0.009208679, -0.015541077, -0.020004272,\n", + " -0.005153656, 0.03741455, -0.050750732, 0.012176514, -0.017501831,\n", + " -0.014503479, 0.0052223206, -0.03250122, 0.008666992, -0.015823364\n", + "]\n", + "[\n", + " -0.047332764, -0.049957275, -0.07458496, -0.034332275, -0.057922363,\n", + " -0.0112838745, -0.06994629, 0.06347656, -0.03326416, 0.019897461,\n", + " 0.0103302, 0.04660034, -0.059753418, -0.027511597, 0.012245178,\n", + " -0.03164673, -0.010215759, -0.00687027, -0.03314209, -0.019866943,\n", + " 0.008399963, -0.042144775, -0.03781128, 0.025970459, 0.007335663,\n", + " 0.04107666, -0.015991211, 0.0158844, -0.008483887, -0.008399963,\n", + " 0.01777649, -0.01109314, 0.01864624, 0.014328003, -0.005264282,\n", + " 0.077697754, 0.017684937, 0.0020427704, 0.032470703, -0.0029354095,\n", + " 0.003063202, 0.0008301735, 0.016281128, -0.005897522, -0.023254395,\n", + " 0.004043579, -0.021987915, -0.015419006, 0.0009803772, 0.044677734,\n", + " -0.0045814514, 0.0039901733, -0.019058228, 0.063964844, -0.012496948,\n", + " -0.027755737, 0.01574707, -0.03781128, 0.0038909912, -0.00002193451,\n", + " 0.00013685226, 0.027832031, 0.015182495, -0.008590698, 0.03933716,\n", + " -0.0020141602, -0.050567627, 0.02017212, 0.020523071, 0.07287598,\n", + " 0.0031375885, -0.05227661, -0.01838684, -0.0019626617, -0.0039482117,\n", + " 0.02494812, 0.0009508133, 0.008583069, 0.02923584, 0.028198242,\n", + " -0.030334473, -0.014076233, -0.017990112, 0.0026245117, -0.017150879,\n", + " 0.004497528, -0.00365448, -0.0012168884, 0.011741638, 0.012886047,\n", + " 0.00084400177, 0.060638428, -0.024002075, 0.022415161, -0.015823364,\n", + " -0.0026760101, 0.028625488, 0.041015625, 0.006893158, -0.01902771\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all CohereEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cohere.CohereEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all CohereEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cohere.CohereEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/deepinfra.mdx b/docs/core_docs/docs/integrations/text_embedding/deepinfra.mdx index 01fe0e012fa6..74a3a3e83005 100644 --- a/docs/core_docs/docs/integrations/text_embedding/deepinfra.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/deepinfra.mdx @@ -128,5 +128,5 @@ For feedback or questions, please contact [feedback@deepinfra.com](mailto:feedba ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/fireworks.ipynb b/docs/core_docs/docs/integrations/text_embedding/fireworks.ipynb index 493f5c74d2c8..bae29ba34b96 100644 --- a/docs/core_docs/docs/integrations/text_embedding/fireworks.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/fireworks.ipynb @@ -1,344 +1,344 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Fireworks\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# FireworksEmbeddings\n", - "\n", - "This will help you get started with FireworksEmbeddings [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `FireworksEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_embeddings_fireworks.FireworksEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/fireworks/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [FireworksEmbeddings](https://api.js.langchain.com/classes/langchain_community_embeddings_fireworks.FireworksEmbeddings.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_embeddings_fireworks.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Fireworks embedding models you'll need to create a Fireworks account, get an API key, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [fireworks.ai](https://fireworks.ai/) to sign up to `Fireworks` and generate an API key. Once you've done this set the `FIREWORKS_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export FIREWORKS_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `FireworksEmbeddings` integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { FireworksEmbeddings } from \"@langchain/community/embeddings/fireworks\";\n", - "\n", - "const embeddings = new FireworksEmbeddings({\n", - " modelName: \"nomic-ai/nomic-embed-text-v1.5\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "\u001b[32m\"LangChain is the framework for building context-aware reasoning applications\"\u001b[39m" + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Fireworks\n", + "---" ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " 0.01666259765625, 0.011688232421875, -0.1181640625,\n", - " -0.10205078125, 0.05438232421875, -0.08905029296875,\n", - " -0.018096923828125, 0.00952911376953125, -0.08056640625,\n", - " -0.0283050537109375, -0.01512908935546875, 0.0312042236328125,\n", - " 0.08197021484375, 0.022552490234375, 0.0012683868408203125,\n", - " 0.0133056640625, -0.04327392578125, -0.004322052001953125,\n", - " -0.02410888671875, -0.0012350082397460938, -0.04632568359375,\n", - " 0.02996826171875, -0.0134124755859375, -0.037811279296875,\n", - " 0.07672119140625, 0.021759033203125, 0.0179290771484375,\n", - " -0.0002741813659667969, -0.0582275390625, -0.0224456787109375,\n", - " 0.0027675628662109375, -0.017425537109375, -0.01520538330078125,\n", - " -0.01146697998046875, -0.055267333984375, -0.083984375,\n", - " 0.056793212890625, -0.003383636474609375, -0.034271240234375,\n", - " 0.05108642578125, -0.01018524169921875, 0.0462646484375,\n", - " 0.0012178421020507812, 0.005779266357421875, 0.0684814453125,\n", - " 0.00797271728515625, -0.0176544189453125, 0.00257110595703125,\n", - " 0.059539794921875, -0.06573486328125, -0.075439453125,\n", - " -0.0247344970703125, -0.0276947021484375, 0.003940582275390625,\n", - " 0.02630615234375, 0.0660400390625, 0.0157470703125,\n", - " 0.033050537109375, -0.0478515625, -0.03338623046875,\n", - " 0.050384521484375, 0.07757568359375, -0.045166015625,\n", - " 0.07586669921875, 0.0021915435791015625, 0.0237579345703125,\n", - " -0.052703857421875, 0.05023193359375, -0.0274810791015625,\n", - " -0.0025081634521484375, 0.019287109375, -0.03802490234375,\n", - " 0.0216217041015625, 0.025360107421875, -0.04443359375,\n", - " -0.029205322265625, -0.002414703369140625, 0.027130126953125,\n", - " 0.028961181640625, 0.078857421875, -0.0009660720825195312,\n", - " 0.017608642578125, 0.05755615234375, -0.0285797119140625,\n", - " 0.0039215087890625, -0.006908416748046875, -0.05364990234375,\n", - " -0.01342010498046875, -0.0247802734375, 0.08331298828125,\n", - " 0.032928466796875, 0.00543975830078125, -0.0168304443359375,\n", - " -0.050018310546875, -0.05908203125, 0.031951904296875,\n", - " -0.0200347900390625, 0.019134521484375, -0.018035888671875,\n", - " -0.01178741455078125\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# FireworksEmbeddings\n", + "\n", + "This will help you get started with FireworksEmbeddings [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `FireworksEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_embeddings_fireworks.FireworksEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/fireworks/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [FireworksEmbeddings](https://api.js.langchain.com/classes/langchain_community_embeddings_fireworks.FireworksEmbeddings.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_embeddings_fireworks.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Fireworks embedding models you'll need to create a Fireworks account, get an API key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [fireworks.ai](https://fireworks.ai/) to sign up to `Fireworks` and generate an API key. Once you've done this set the `FIREWORKS_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export FIREWORKS_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `FireworksEmbeddings` integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { FireworksEmbeddings } from \"@langchain/community/embeddings/fireworks\";\n", + "\n", + "const embeddings = new FireworksEmbeddings({\n", + " modelName: \"nomic-ai/nomic-embed-text-v1.5\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"LangChain is the framework for building context-aware reasoning applications\"\u001b[39m" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " 0.01666259765625, 0.011688232421875, -0.1181640625,\n", + " -0.10205078125, 0.05438232421875, -0.08905029296875,\n", + " -0.018096923828125, 0.00952911376953125, -0.08056640625,\n", + " -0.0283050537109375, -0.01512908935546875, 0.0312042236328125,\n", + " 0.08197021484375, 0.022552490234375, 0.0012683868408203125,\n", + " 0.0133056640625, -0.04327392578125, -0.004322052001953125,\n", + " -0.02410888671875, -0.0012350082397460938, -0.04632568359375,\n", + " 0.02996826171875, -0.0134124755859375, -0.037811279296875,\n", + " 0.07672119140625, 0.021759033203125, 0.0179290771484375,\n", + " -0.0002741813659667969, -0.0582275390625, -0.0224456787109375,\n", + " 0.0027675628662109375, -0.017425537109375, -0.01520538330078125,\n", + " -0.01146697998046875, -0.055267333984375, -0.083984375,\n", + " 0.056793212890625, -0.003383636474609375, -0.034271240234375,\n", + " 0.05108642578125, -0.01018524169921875, 0.0462646484375,\n", + " 0.0012178421020507812, 0.005779266357421875, 0.0684814453125,\n", + " 0.00797271728515625, -0.0176544189453125, 0.00257110595703125,\n", + " 0.059539794921875, -0.06573486328125, -0.075439453125,\n", + " -0.0247344970703125, -0.0276947021484375, 0.003940582275390625,\n", + " 0.02630615234375, 0.0660400390625, 0.0157470703125,\n", + " 0.033050537109375, -0.0478515625, -0.03338623046875,\n", + " 0.050384521484375, 0.07757568359375, -0.045166015625,\n", + " 0.07586669921875, 0.0021915435791015625, 0.0237579345703125,\n", + " -0.052703857421875, 0.05023193359375, -0.0274810791015625,\n", + " -0.0025081634521484375, 0.019287109375, -0.03802490234375,\n", + " 0.0216217041015625, 0.025360107421875, -0.04443359375,\n", + " -0.029205322265625, -0.002414703369140625, 0.027130126953125,\n", + " 0.028961181640625, 0.078857421875, -0.0009660720825195312,\n", + " 0.017608642578125, 0.05755615234375, -0.0285797119140625,\n", + " 0.0039215087890625, -0.006908416748046875, -0.05364990234375,\n", + " -0.01342010498046875, -0.0247802734375, 0.08331298828125,\n", + " 0.032928466796875, 0.00543975830078125, -0.0168304443359375,\n", + " -0.050018310546875, -0.05908203125, 0.031951904296875,\n", + " -0.0200347900390625, 0.019134521484375, -0.018035888671875,\n", + " -0.01178741455078125\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " 0.016632080078125, 0.01165008544921875, -0.1181640625,\n", + " -0.10186767578125, 0.05438232421875, -0.08905029296875,\n", + " -0.0180511474609375, 0.00957489013671875, -0.08056640625,\n", + " -0.0283203125, -0.0151214599609375, 0.0311279296875,\n", + " 0.08184814453125, 0.0225982666015625, 0.0012750625610351562,\n", + " 0.01336669921875, -0.043365478515625, -0.004322052001953125,\n", + " -0.02410888671875, -0.0012559890747070312, -0.046356201171875,\n", + " 0.0298919677734375, -0.013458251953125, -0.03765869140625,\n", + " 0.07672119140625, 0.0217132568359375, 0.0179290771484375,\n", + " -0.0002269744873046875, -0.0582275390625, -0.0224609375,\n", + " 0.002834320068359375, -0.0174407958984375, -0.01512908935546875,\n", + " -0.01146697998046875, -0.055206298828125, -0.08404541015625,\n", + " 0.0567626953125, -0.0033092498779296875, -0.034271240234375,\n", + " 0.05108642578125, -0.010101318359375, 0.046173095703125,\n", + " 0.0011806488037109375, 0.005706787109375, 0.06854248046875,\n", + " 0.0079193115234375, -0.0176239013671875, 0.002552032470703125,\n", + " 0.059539794921875, -0.06573486328125, -0.07537841796875,\n", + " -0.02484130859375, -0.027740478515625, 0.003925323486328125,\n", + " 0.0263671875, 0.0660400390625, 0.0156402587890625,\n", + " 0.033050537109375, -0.047821044921875, -0.0333251953125,\n", + " 0.050445556640625, 0.07757568359375, -0.045257568359375,\n", + " 0.07586669921875, 0.0021724700927734375, 0.0237274169921875,\n", + " -0.052703857421875, 0.050323486328125, -0.0274658203125,\n", + " -0.0024662017822265625, 0.0194244384765625, -0.03802490234375,\n", + " 0.02166748046875, 0.025360107421875, -0.044464111328125,\n", + " -0.0292816162109375, -0.0025119781494140625, 0.0271148681640625,\n", + " 0.028961181640625, 0.078857421875, -0.0008907318115234375,\n", + " 0.017669677734375, 0.0576171875, -0.0285797119140625,\n", + " 0.0039825439453125, -0.00687408447265625, -0.0535888671875,\n", + " -0.0134735107421875, -0.0247650146484375, 0.0831298828125,\n", + " 0.032989501953125, 0.005443572998046875, -0.0167999267578125,\n", + " -0.050018310546875, -0.059051513671875, 0.0318603515625,\n", + " -0.0200958251953125, 0.0191192626953125, -0.0180206298828125,\n", + " -0.01175689697265625\n", + "]\n", + "[\n", + " -0.02667236328125, 0.036651611328125, -0.1630859375,\n", + " -0.0904541015625, -0.022430419921875, -0.095458984375,\n", + " -0.037628173828125, 0.00473785400390625, -0.046051025390625,\n", + " 0.0109710693359375, 0.0113525390625, 0.0254364013671875,\n", + " 0.09368896484375, 0.0144195556640625, -0.007564544677734375,\n", + " -0.0014705657958984375, -0.0007691383361816406, -0.015716552734375,\n", + " -0.0242156982421875, -0.024871826171875, 0.00885009765625,\n", + " 0.0012922286987304688, 0.023712158203125, -0.054595947265625,\n", + " 0.06329345703125, 0.0289306640625, 0.0233612060546875,\n", + " -0.0374755859375, -0.0489501953125, -0.029510498046875,\n", + " 0.0173492431640625, 0.0171356201171875, -0.01629638671875,\n", + " -0.0352783203125, -0.039398193359375, -0.11138916015625,\n", + " 0.0296783447265625, -0.01467132568359375, 0.0009188652038574219,\n", + " 0.048187255859375, -0.010650634765625, 0.03125,\n", + " 0.005214691162109375, -0.015869140625, 0.06939697265625,\n", + " -0.0428466796875, 0.0266571044921875, 0.044189453125,\n", + " 0.049957275390625, -0.054290771484375, 0.0107574462890625,\n", + " -0.03265380859375, -0.0109100341796875, -0.0144805908203125,\n", + " 0.03936767578125, 0.07989501953125, -0.056976318359375,\n", + " 0.0308380126953125, -0.035125732421875, -0.038848876953125,\n", + " 0.10748291015625, 0.01129150390625, -0.0665283203125,\n", + " 0.09710693359375, 0.03143310546875, -0.0104522705078125,\n", + " -0.062469482421875, 0.021148681640625, -0.00970458984375,\n", + " -0.06756591796875, 0.01019287109375, 0.00433349609375,\n", + " 0.032928466796875, 0.020233154296875, -0.01336669921875,\n", + " -0.015472412109375, -0.0175933837890625, -0.0142364501953125,\n", + " -0.007450103759765625, 0.03759765625, 0.003551483154296875,\n", + " 0.0069580078125, 0.042266845703125, -0.007488250732421875,\n", + " 0.01922607421875, 0.007080078125, -0.0255889892578125,\n", + " -0.007686614990234375, -0.0848388671875, 0.058563232421875,\n", + " 0.021148681640625, 0.034393310546875, 0.01087188720703125,\n", + " -0.0196380615234375, -0.09515380859375, 0.0054931640625,\n", + " -0.012481689453125, 0.003322601318359375, -0.019683837890625,\n", + " -0.0307159423828125\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " 0.016632080078125, 0.01165008544921875, -0.1181640625,\n", - " -0.10186767578125, 0.05438232421875, -0.08905029296875,\n", - " -0.0180511474609375, 0.00957489013671875, -0.08056640625,\n", - " -0.0283203125, -0.0151214599609375, 0.0311279296875,\n", - " 0.08184814453125, 0.0225982666015625, 0.0012750625610351562,\n", - " 0.01336669921875, -0.043365478515625, -0.004322052001953125,\n", - " -0.02410888671875, -0.0012559890747070312, -0.046356201171875,\n", - " 0.0298919677734375, -0.013458251953125, -0.03765869140625,\n", - " 0.07672119140625, 0.0217132568359375, 0.0179290771484375,\n", - " -0.0002269744873046875, -0.0582275390625, -0.0224609375,\n", - " 0.002834320068359375, -0.0174407958984375, -0.01512908935546875,\n", - " -0.01146697998046875, -0.055206298828125, -0.08404541015625,\n", - " 0.0567626953125, -0.0033092498779296875, -0.034271240234375,\n", - " 0.05108642578125, -0.010101318359375, 0.046173095703125,\n", - " 0.0011806488037109375, 0.005706787109375, 0.06854248046875,\n", - " 0.0079193115234375, -0.0176239013671875, 0.002552032470703125,\n", - " 0.059539794921875, -0.06573486328125, -0.07537841796875,\n", - " -0.02484130859375, -0.027740478515625, 0.003925323486328125,\n", - " 0.0263671875, 0.0660400390625, 0.0156402587890625,\n", - " 0.033050537109375, -0.047821044921875, -0.0333251953125,\n", - " 0.050445556640625, 0.07757568359375, -0.045257568359375,\n", - " 0.07586669921875, 0.0021724700927734375, 0.0237274169921875,\n", - " -0.052703857421875, 0.050323486328125, -0.0274658203125,\n", - " -0.0024662017822265625, 0.0194244384765625, -0.03802490234375,\n", - " 0.02166748046875, 0.025360107421875, -0.044464111328125,\n", - " -0.0292816162109375, -0.0025119781494140625, 0.0271148681640625,\n", - " 0.028961181640625, 0.078857421875, -0.0008907318115234375,\n", - " 0.017669677734375, 0.0576171875, -0.0285797119140625,\n", - " 0.0039825439453125, -0.00687408447265625, -0.0535888671875,\n", - " -0.0134735107421875, -0.0247650146484375, 0.0831298828125,\n", - " 0.032989501953125, 0.005443572998046875, -0.0167999267578125,\n", - " -0.050018310546875, -0.059051513671875, 0.0318603515625,\n", - " -0.0200958251953125, 0.0191192626953125, -0.0180206298828125,\n", - " -0.01175689697265625\n", - "]\n", - "[\n", - " -0.02667236328125, 0.036651611328125, -0.1630859375,\n", - " -0.0904541015625, -0.022430419921875, -0.095458984375,\n", - " -0.037628173828125, 0.00473785400390625, -0.046051025390625,\n", - " 0.0109710693359375, 0.0113525390625, 0.0254364013671875,\n", - " 0.09368896484375, 0.0144195556640625, -0.007564544677734375,\n", - " -0.0014705657958984375, -0.0007691383361816406, -0.015716552734375,\n", - " -0.0242156982421875, -0.024871826171875, 0.00885009765625,\n", - " 0.0012922286987304688, 0.023712158203125, -0.054595947265625,\n", - " 0.06329345703125, 0.0289306640625, 0.0233612060546875,\n", - " -0.0374755859375, -0.0489501953125, -0.029510498046875,\n", - " 0.0173492431640625, 0.0171356201171875, -0.01629638671875,\n", - " -0.0352783203125, -0.039398193359375, -0.11138916015625,\n", - " 0.0296783447265625, -0.01467132568359375, 0.0009188652038574219,\n", - " 0.048187255859375, -0.010650634765625, 0.03125,\n", - " 0.005214691162109375, -0.015869140625, 0.06939697265625,\n", - " -0.0428466796875, 0.0266571044921875, 0.044189453125,\n", - " 0.049957275390625, -0.054290771484375, 0.0107574462890625,\n", - " -0.03265380859375, -0.0109100341796875, -0.0144805908203125,\n", - " 0.03936767578125, 0.07989501953125, -0.056976318359375,\n", - " 0.0308380126953125, -0.035125732421875, -0.038848876953125,\n", - " 0.10748291015625, 0.01129150390625, -0.0665283203125,\n", - " 0.09710693359375, 0.03143310546875, -0.0104522705078125,\n", - " -0.062469482421875, 0.021148681640625, -0.00970458984375,\n", - " -0.06756591796875, 0.01019287109375, 0.00433349609375,\n", - " 0.032928466796875, 0.020233154296875, -0.01336669921875,\n", - " -0.015472412109375, -0.0175933837890625, -0.0142364501953125,\n", - " -0.007450103759765625, 0.03759765625, 0.003551483154296875,\n", - " 0.0069580078125, 0.042266845703125, -0.007488250732421875,\n", - " 0.01922607421875, 0.007080078125, -0.0255889892578125,\n", - " -0.007686614990234375, -0.0848388671875, 0.058563232421875,\n", - " 0.021148681640625, 0.034393310546875, 0.01087188720703125,\n", - " -0.0196380615234375, -0.09515380859375, 0.0054931640625,\n", - " -0.012481689453125, 0.003322601318359375, -0.019683837890625,\n", - " -0.0307159423828125\n", - "]\n" - ] + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all FireworksEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_embeddings_fireworks.FireworksEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all FireworksEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_embeddings_fireworks.FireworksEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/google_generativeai.ipynb b/docs/core_docs/docs/integrations/text_embedding/google_generativeai.ipynb index cd079888786f..91cf095a5e8a 100644 --- a/docs/core_docs/docs/integrations/text_embedding/google_generativeai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/google_generativeai.ipynb @@ -1,312 +1,312 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Google Generative AI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# GoogleGenerativeAIEmbeddings\n", - "\n", - "This will help you get started with Google Generative AI [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `GoogleGenerativeAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_google_genai.GoogleGenerativeAIEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/google_generative_ai/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [`GoogleGenerativeAIEmbeddings`](https://api.js.langchain.com/classes/langchain_google_genai.GoogleGenerativeAIEmbeddings.html) | [`@langchain/google-genai`](https://npmjs.com/@langchain/google-genai) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-genai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-genai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Google Generative AI embedding models you'll need to sign up for a Google AI account, get an API key, and install the `@langchain/google-genai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Get an API key here: https://ai.google.dev/tutorials/setup.\n", - "\n", - "Next, set your key as an environment variable named `GOOGLE_API_KEY`:\n", - "\n", - "```bash\n", - "export GOOGLE_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `GoogleGenerativeAIEmbeddings` integration lives in the `@langchain/google-genai` package. You may also wish to install the official SDK:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/google-genai @langchain/core @google/generative-ai\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and embed text:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { GoogleGenerativeAIEmbeddings } from \"@langchain/google-genai\";\n", - "import { TaskType } from \"@google/generative-ai\";\n", - "\n", - "const embeddings = new GoogleGenerativeAIEmbeddings({\n", - " model: \"text-embedding-004\", // 768 dimensions\n", - " taskType: TaskType.RETRIEVAL_DOCUMENT,\n", - " title: \"Document title\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Google Generative AI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.018286658, 0.020051053, -0.057487167, 0.0059406986, -0.0036901247,\n", - " -0.010400916, 0.03396853, -0.010867519, 0.015650319, 0.026443942,\n", - " 0.012251757, -0.01581729, 0.02031182, -0.00062176475, 0.0065521155,\n", - " -0.07107355, 0.033614952, 0.07109807, -0.021078493, 0.048039366,\n", - " 0.022973344, -0.0361746, -0.04550704, -0.048807852, 0.03414146,\n", - " 0.042450827, 0.02930612, 0.027274853, -0.027707053, -0.04167595,\n", - " 0.01708843, 0.028532283, -0.0018593844, -0.096786, -0.034648854,\n", - " 0.0013152987, 0.024425535, 0.04937838, 0.036890924, -0.074619934,\n", - " -0.028723065, 0.029158255, -0.023993572, 0.03163398, -0.02036324,\n", - " -0.02333609, -0.017407075, -0.0059643993, -0.05564625, 0.051022638,\n", - " 0.03264913, -0.008254581, -0.030552095, 0.072952054, -0.05448913,\n", - " 0.012030814, -0.07978849, -0.030417662, 0.0038343794, 0.03237516,\n", - " -0.054259773, -0.0524064, -0.02145499, 0.006439614, 0.04988943,\n", - " -0.03232189, 0.00990776, -0.03863326, -0.04979561, 0.009874035,\n", - " -0.02617946, 0.02135152, -0.070599854, 0.08655627, -0.02080979,\n", - " -0.014944934, 0.0034440767, -0.035236854, 0.027093545, 0.032249685,\n", - " -0.03559674, 0.046849757, 0.06965356, 0.028780492, 0.02865287,\n", - " -0.07999455, -0.0058599655, -0.050316703, -0.018346578, -0.038311094,\n", - " 0.08026719, 0.049136136, -0.05372233, -0.0062247813, 0.01791339,\n", - " -0.03635157, -0.031860247, -0.031322744, 0.044055287, 0.034934316\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# GoogleGenerativeAIEmbeddings\n", + "\n", + "This will help you get started with Google Generative AI [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `GoogleGenerativeAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_google_genai.GoogleGenerativeAIEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/google_generative_ai/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [`GoogleGenerativeAIEmbeddings`](https://api.js.langchain.com/classes/langchain_google_genai.GoogleGenerativeAIEmbeddings.html) | [`@langchain/google-genai`](https://npmjs.com/@langchain/google-genai) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-genai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-genai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Google Generative AI embedding models you'll need to sign up for a Google AI account, get an API key, and install the `@langchain/google-genai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Get an API key here: https://ai.google.dev/tutorials/setup.\n", + "\n", + "Next, set your key as an environment variable named `GOOGLE_API_KEY`:\n", + "\n", + "```bash\n", + "export GOOGLE_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `GoogleGenerativeAIEmbeddings` integration lives in the `@langchain/google-genai` package. You may also wish to install the official SDK:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/google-genai @langchain/core @google/generative-ai\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and embed text:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { GoogleGenerativeAIEmbeddings } from \"@langchain/google-genai\";\n", + "import { TaskType } from \"@google/generative-ai\";\n", + "\n", + "const embeddings = new GoogleGenerativeAIEmbeddings({\n", + " model: \"text-embedding-004\", // 768 dimensions\n", + " taskType: TaskType.RETRIEVAL_DOCUMENT,\n", + " title: \"Document title\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.018286658, 0.020051053, -0.057487167, 0.0059406986, -0.0036901247,\n", - " -0.010400916, 0.03396853, -0.010867519, 0.015650319, 0.026443942,\n", - " 0.012251757, -0.01581729, 0.02031182, -0.00062176475, 0.0065521155,\n", - " -0.07107355, 0.033614952, 0.07109807, -0.021078493, 0.048039366,\n", - " 0.022973344, -0.0361746, -0.04550704, -0.048807852, 0.03414146,\n", - " 0.042450827, 0.02930612, 0.027274853, -0.027707053, -0.04167595,\n", - " 0.01708843, 0.028532283, -0.0018593844, -0.096786, -0.034648854,\n", - " 0.0013152987, 0.024425535, 0.04937838, 0.036890924, -0.074619934,\n", - " -0.028723065, 0.029158255, -0.023993572, 0.03163398, -0.02036324,\n", - " -0.02333609, -0.017407075, -0.0059643993, -0.05564625, 0.051022638,\n", - " 0.03264913, -0.008254581, -0.030552095, 0.072952054, -0.05448913,\n", - " 0.012030814, -0.07978849, -0.030417662, 0.0038343794, 0.03237516,\n", - " -0.054259773, -0.0524064, -0.02145499, 0.006439614, 0.04988943,\n", - " -0.03232189, 0.00990776, -0.03863326, -0.04979561, 0.009874035,\n", - " -0.02617946, 0.02135152, -0.070599854, 0.08655627, -0.02080979,\n", - " -0.014944934, 0.0034440767, -0.035236854, 0.027093545, 0.032249685,\n", - " -0.03559674, 0.046849757, 0.06965356, 0.028780492, 0.02865287,\n", - " -0.07999455, -0.0058599655, -0.050316703, -0.018346578, -0.038311094,\n", - " 0.08026719, 0.049136136, -0.05372233, -0.0062247813, 0.01791339,\n", - " -0.03635157, -0.031860247, -0.031322744, 0.044055287, 0.034934316\n", - "]\n", - "[\n", - " 0.011669316, 0.02170385, -0.07519182, 0.003981285,\n", - " 0.0053525288, 0.008397044, 0.036672726, 0.016549919,\n", - " 0.061946314, 0.06280753, -0.009199135, 0.014644887,\n", - " 0.046459496, 0.0122919325, -0.013300706, -0.051746193,\n", - " -0.0490098, 0.045586824, -0.05053146, 0.044294067,\n", - " -0.012607168, -0.0071777054, -0.048455723, -0.075109236,\n", - " 0.013327612, -0.025612017, 0.050875787, 0.030091539,\n", - " -0.027163379, -0.05760821, 0.014368641, 0.0044602253,\n", - " 0.035219245, -0.033304706, -0.045474708, -0.038022216,\n", - " 0.012366698, 0.028978042, 0.038591366, -0.10646444,\n", - " -0.036803752, 0.018911313, 0.005681761, 0.025365992,\n", - " -0.017165288, -0.0048005017, -0.011460135, 0.0027811683,\n", - " -0.04971402, -0.0019232291, 0.02141983, -0.0013272346,\n", - " -0.03337951, 0.030568397, -0.05704511, -0.01187748,\n", - " -0.025354648, 0.016188234, -0.022018699, 0.0096449675,\n", - " -0.027020318, -0.038059015, -0.024455398, 0.021858294,\n", - " 0.010713859, -0.07203855, -0.05562406, 0.0000034690818,\n", - " -0.054289237, -0.0027928432, -0.0010051605, 0.008493095,\n", - " -0.064746305, 0.024419345, -0.016629996, -0.02686531,\n", - " -0.02300653, -0.03263113, 0.019998727, 0.029680967,\n", - " -0.04365641, 0.013594972, 0.056486532, 0.025913332,\n", - " 0.025457978, -0.048536208, 0.020046104, -0.05857287,\n", - " -0.032664414, -0.032940287, 0.10053288, -0.021389635,\n", - " -0.0044220444, 0.037026003, 0.03142132, -0.048912503,\n", - " -0.07961264, -0.051056523, 0.048032805, 0.04831778\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.018286658, 0.020051053, -0.057487167, 0.0059406986, -0.0036901247,\n", + " -0.010400916, 0.03396853, -0.010867519, 0.015650319, 0.026443942,\n", + " 0.012251757, -0.01581729, 0.02031182, -0.00062176475, 0.0065521155,\n", + " -0.07107355, 0.033614952, 0.07109807, -0.021078493, 0.048039366,\n", + " 0.022973344, -0.0361746, -0.04550704, -0.048807852, 0.03414146,\n", + " 0.042450827, 0.02930612, 0.027274853, -0.027707053, -0.04167595,\n", + " 0.01708843, 0.028532283, -0.0018593844, -0.096786, -0.034648854,\n", + " 0.0013152987, 0.024425535, 0.04937838, 0.036890924, -0.074619934,\n", + " -0.028723065, 0.029158255, -0.023993572, 0.03163398, -0.02036324,\n", + " -0.02333609, -0.017407075, -0.0059643993, -0.05564625, 0.051022638,\n", + " 0.03264913, -0.008254581, -0.030552095, 0.072952054, -0.05448913,\n", + " 0.012030814, -0.07978849, -0.030417662, 0.0038343794, 0.03237516,\n", + " -0.054259773, -0.0524064, -0.02145499, 0.006439614, 0.04988943,\n", + " -0.03232189, 0.00990776, -0.03863326, -0.04979561, 0.009874035,\n", + " -0.02617946, 0.02135152, -0.070599854, 0.08655627, -0.02080979,\n", + " -0.014944934, 0.0034440767, -0.035236854, 0.027093545, 0.032249685,\n", + " -0.03559674, 0.046849757, 0.06965356, 0.028780492, 0.02865287,\n", + " -0.07999455, -0.0058599655, -0.050316703, -0.018346578, -0.038311094,\n", + " 0.08026719, 0.049136136, -0.05372233, -0.0062247813, 0.01791339,\n", + " -0.03635157, -0.031860247, -0.031322744, 0.044055287, 0.034934316\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.018286658, 0.020051053, -0.057487167, 0.0059406986, -0.0036901247,\n", + " -0.010400916, 0.03396853, -0.010867519, 0.015650319, 0.026443942,\n", + " 0.012251757, -0.01581729, 0.02031182, -0.00062176475, 0.0065521155,\n", + " -0.07107355, 0.033614952, 0.07109807, -0.021078493, 0.048039366,\n", + " 0.022973344, -0.0361746, -0.04550704, -0.048807852, 0.03414146,\n", + " 0.042450827, 0.02930612, 0.027274853, -0.027707053, -0.04167595,\n", + " 0.01708843, 0.028532283, -0.0018593844, -0.096786, -0.034648854,\n", + " 0.0013152987, 0.024425535, 0.04937838, 0.036890924, -0.074619934,\n", + " -0.028723065, 0.029158255, -0.023993572, 0.03163398, -0.02036324,\n", + " -0.02333609, -0.017407075, -0.0059643993, -0.05564625, 0.051022638,\n", + " 0.03264913, -0.008254581, -0.030552095, 0.072952054, -0.05448913,\n", + " 0.012030814, -0.07978849, -0.030417662, 0.0038343794, 0.03237516,\n", + " -0.054259773, -0.0524064, -0.02145499, 0.006439614, 0.04988943,\n", + " -0.03232189, 0.00990776, -0.03863326, -0.04979561, 0.009874035,\n", + " -0.02617946, 0.02135152, -0.070599854, 0.08655627, -0.02080979,\n", + " -0.014944934, 0.0034440767, -0.035236854, 0.027093545, 0.032249685,\n", + " -0.03559674, 0.046849757, 0.06965356, 0.028780492, 0.02865287,\n", + " -0.07999455, -0.0058599655, -0.050316703, -0.018346578, -0.038311094,\n", + " 0.08026719, 0.049136136, -0.05372233, -0.0062247813, 0.01791339,\n", + " -0.03635157, -0.031860247, -0.031322744, 0.044055287, 0.034934316\n", + "]\n", + "[\n", + " 0.011669316, 0.02170385, -0.07519182, 0.003981285,\n", + " 0.0053525288, 0.008397044, 0.036672726, 0.016549919,\n", + " 0.061946314, 0.06280753, -0.009199135, 0.014644887,\n", + " 0.046459496, 0.0122919325, -0.013300706, -0.051746193,\n", + " -0.0490098, 0.045586824, -0.05053146, 0.044294067,\n", + " -0.012607168, -0.0071777054, -0.048455723, -0.075109236,\n", + " 0.013327612, -0.025612017, 0.050875787, 0.030091539,\n", + " -0.027163379, -0.05760821, 0.014368641, 0.0044602253,\n", + " 0.035219245, -0.033304706, -0.045474708, -0.038022216,\n", + " 0.012366698, 0.028978042, 0.038591366, -0.10646444,\n", + " -0.036803752, 0.018911313, 0.005681761, 0.025365992,\n", + " -0.017165288, -0.0048005017, -0.011460135, 0.0027811683,\n", + " -0.04971402, -0.0019232291, 0.02141983, -0.0013272346,\n", + " -0.03337951, 0.030568397, -0.05704511, -0.01187748,\n", + " -0.025354648, 0.016188234, -0.022018699, 0.0096449675,\n", + " -0.027020318, -0.038059015, -0.024455398, 0.021858294,\n", + " 0.010713859, -0.07203855, -0.05562406, 0.0000034690818,\n", + " -0.054289237, -0.0027928432, -0.0010051605, 0.008493095,\n", + " -0.064746305, 0.024419345, -0.016629996, -0.02686531,\n", + " -0.02300653, -0.03263113, 0.019998727, 0.029680967,\n", + " -0.04365641, 0.013594972, 0.056486532, 0.025913332,\n", + " 0.025457978, -0.048536208, 0.020046104, -0.05857287,\n", + " -0.032664414, -0.032940287, 0.10053288, -0.021389635,\n", + " -0.0044220444, 0.037026003, 0.03142132, -0.048912503,\n", + " -0.07961264, -0.051056523, 0.048032805, 0.04831778\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `GoogleGenerativeAIEmbeddings` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_genai.GoogleGenerativeAIEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `GoogleGenerativeAIEmbeddings` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_genai.GoogleGenerativeAIEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.ipynb index 83c899ded203..870be58b58bf 100644 --- a/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/google_vertex_ai.ipynb @@ -1,358 +1,358 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Google Vertex AI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# VertexAIEmbeddings\n", - "\n", - "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud.\n", - "\n", - "This will help you get started with Google Vertex AI [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `VertexAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.GoogleVertexAIEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/google_vertex_ai_palm/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [`VertexAIEmbeddings`](https://api.js.langchain.com/classes/langchain_google_vertexai.GoogleVertexAIEmbeddings.html) | [`@langchain/google-vertexai`](https://npmjs.com/@langchain/google-vertexai) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-vertexai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-vertexai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "LangChain.js supports two different authentication methods based on whether\n", - "you're running in a Node.js environment or a web environment.\n", - "\n", - "To access `ChatVertexAI` models you'll need to setup Google VertexAI in your Google Cloud Platform (GCP) account, save the credentials file, and install the `@langchain/google-vertexai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to your [GCP account](https://console.cloud.google.com/) and generate a credentials file. Once you've done this set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable:\n", - "\n", - "```bash\n", - "export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/your/credentials.json\"\n", - "```\n", - "\n", - "If running in a web environment, you should set the `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable as a JSON stringified object, and install the `@langchain/google-vertexai-web` package:\n", - "\n", - "```bash\n", - "GOOGLE_VERTEX_AI_WEB_CREDENTIALS={\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...}\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain `VertexAIEmbeddings` integration lives in the `@langchain/google-vertexai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/google-vertexai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and embed text:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { VertexAIEmbeddings } from \"@langchain/google-vertexai\";\n", - "// Uncomment the following line if you're running in a web environment:\n", - "// import { VertexAIEmbeddings } from \"@langchain/google-vertexai-web\"\n", - "\n", - "const embeddings = new VertexAIEmbeddings({\n", - " model: \"text-embedding-004\",\n", - " // ...\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Google Vertex AI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.02831101417541504, 0.022063178941607475, -0.07454229146242142,\n", - " 0.006448323838412762, 0.001955120824277401, -0.017617391422390938,\n", - " 0.018649872392416, -0.05262855067849159, 0.0006953597767278552,\n", - " -0.0018249079585075378, 0.022437218576669693, 0.0036489504855126143,\n", - " 0.0018086736090481281, 0.016940006986260414, -0.007894322276115417,\n", - " -0.04187627509236336, 0.039501357823610306, 0.06918870657682419,\n", - " -0.006931832991540432, 0.049655742943286896, 0.021211417391896248,\n", - " -0.029322246089577675, -0.04546992480754852, -0.01769082061946392,\n", - " 0.046703994274139404, 0.03127637133002281, 0.006355373188853264,\n", - " 0.014901148155331612, -0.006893016863614321, -0.05992589890956879,\n", - " -0.009733330458402634, 0.015709295868873596, -0.017982766032218933,\n", - " -0.0852997675538063, -0.032453566789627075, 0.0014507169835269451,\n", - " 0.03345133736729622, 0.048862338066101074, 0.006664620712399483,\n", - " -0.06287197023630142, -0.02109423652291298, 0.018176473677158356,\n", - " -0.022175665944814682, 0.03340170532464981, -0.008905526250600815,\n", - " -0.03492079675197601, -0.03819998353719711, -0.05230168625712395,\n", - " -0.05247239023447037, 0.048254698514938354, 0.046494755893945694,\n", - " -0.029708227142691612, -0.002180763054639101, 0.051957979798316956,\n", - " -0.05483679473400116, 0.00700812041759491, -0.08181990683078766,\n", - " -0.02295914851129055, 0.026530204340815544, 0.04028692841529846,\n", - " -0.05230272561311722, -0.057705819606781006, -0.015022763051092625,\n", - " 0.002143724123016, 0.06361843645572662, -0.027828887104988098,\n", - " 0.006870461627840996, -0.016140831634402275, -0.034440942108631134,\n", - " -0.004059414379298687, -0.042537953704595566, -0.00984653178602457,\n", - " -0.07701274752616882, 0.09815558046102524, -0.025801729410886765,\n", - " -0.008693721145391464, -0.0010926402173936367, -0.027235493063926697,\n", - " 0.06945550441741943, 0.023456251248717308, -0.02160717360675335,\n", - " 0.03252667561173439, 0.05874639376997948, -0.001329384627752006,\n", - " 0.03664775192737579, -0.07353461533784866, -0.028453022241592407,\n", - " -0.05666429176926613, -0.012955721467733383, -0.041723109781742096,\n", - " 0.07209191471338272, 0.0326194241642952, -0.0496046207845211,\n", - " -0.025037819519639015, 0.004625750705599785, -0.03622527793049812,\n", - " -0.022546149790287018, 0.0053468807600438595, 0.03879072889685631,\n", - " 0.03238753229379654\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# VertexAIEmbeddings\n", + "\n", + "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud.\n", + "\n", + "This will help you get started with Google Vertex AI [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `VertexAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.GoogleVertexAIEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/google_vertex_ai_palm/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [`VertexAIEmbeddings`](https://api.js.langchain.com/classes/langchain_google_vertexai.GoogleVertexAIEmbeddings.html) | [`@langchain/google-vertexai`](https://npmjs.com/@langchain/google-vertexai) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-vertexai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-vertexai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "LangChain.js supports two different authentication methods based on whether\n", + "you're running in a Node.js environment or a web environment.\n", + "\n", + "To access `ChatVertexAI` models you'll need to setup Google VertexAI in your Google Cloud Platform (GCP) account, save the credentials file, and install the `@langchain/google-vertexai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to your [GCP account](https://console.cloud.google.com/) and generate a credentials file. Once you've done this set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable:\n", + "\n", + "```bash\n", + "export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/your/credentials.json\"\n", + "```\n", + "\n", + "If running in a web environment, you should set the `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable as a JSON stringified object, and install the `@langchain/google-vertexai-web` package:\n", + "\n", + "```bash\n", + "GOOGLE_VERTEX_AI_WEB_CREDENTIALS={\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...}\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `VertexAIEmbeddings` integration lives in the `@langchain/google-vertexai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/google-vertexai @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and embed text:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { VertexAIEmbeddings } from \"@langchain/google-vertexai\";\n", + "// Uncomment the following line if you're running in a web environment:\n", + "// import { VertexAIEmbeddings } from \"@langchain/google-vertexai-web\"\n", + "\n", + "const embeddings = new VertexAIEmbeddings({\n", + " model: \"text-embedding-004\",\n", + " // ...\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.02831101417541504, 0.022063178941607475, -0.07454229146242142,\n", - " 0.006448323838412762, 0.001955120824277401, -0.017617391422390938,\n", - " 0.018649872392416, -0.05262855067849159, 0.0006953597767278552,\n", - " -0.0018249079585075378, 0.022437218576669693, 0.0036489504855126143,\n", - " 0.0018086736090481281, 0.016940006986260414, -0.007894322276115417,\n", - " -0.04187627509236336, 0.039501357823610306, 0.06918870657682419,\n", - " -0.006931832991540432, 0.049655742943286896, 0.021211417391896248,\n", - " -0.029322246089577675, -0.04546992480754852, -0.01769082061946392,\n", - " 0.046703994274139404, 0.03127637133002281, 0.006355373188853264,\n", - " 0.014901148155331612, -0.006893016863614321, -0.05992589890956879,\n", - " -0.009733330458402634, 0.015709295868873596, -0.017982766032218933,\n", - " -0.0852997675538063, -0.032453566789627075, 0.0014507169835269451,\n", - " 0.03345133736729622, 0.048862338066101074, 0.006664620712399483,\n", - " -0.06287197023630142, -0.02109423652291298, 0.018176473677158356,\n", - " -0.022175665944814682, 0.03340170532464981, -0.008905526250600815,\n", - " -0.03492079675197601, -0.03819998353719711, -0.05230168625712395,\n", - " -0.05247239023447037, 0.048254698514938354, 0.046494755893945694,\n", - " -0.029708227142691612, -0.002180763054639101, 0.051957979798316956,\n", - " -0.05483679473400116, 0.00700812041759491, -0.08181990683078766,\n", - " -0.02295914851129055, 0.026530204340815544, 0.04028692841529846,\n", - " -0.05230272561311722, -0.057705819606781006, -0.015022763051092625,\n", - " 0.002143724123016, 0.06361843645572662, -0.027828887104988098,\n", - " 0.006870461627840996, -0.016140831634402275, -0.034440942108631134,\n", - " -0.004059414379298687, -0.042537953704595566, -0.00984653178602457,\n", - " -0.07701274752616882, 0.09815558046102524, -0.025801729410886765,\n", - " -0.008693721145391464, -0.0010926402173936367, -0.027235493063926697,\n", - " 0.06945550441741943, 0.023456251248717308, -0.02160717360675335,\n", - " 0.03252667561173439, 0.05874639376997948, -0.001329384627752006,\n", - " 0.03664775192737579, -0.07353461533784866, -0.028453022241592407,\n", - " -0.05666429176926613, -0.012955721467733383, -0.041723109781742096,\n", - " 0.07209191471338272, 0.0326194241642952, -0.0496046207845211,\n", - " -0.025037819519639015, 0.004625750705599785, -0.03622527793049812,\n", - " -0.022546149790287018, 0.0053468807600438595, 0.03879072889685631,\n", - " 0.03238753229379654\n", - "]\n", - "[\n", - " -0.00007261172140715644, 0.03209814056754112, -0.10099327564239502,\n", - " -0.0017932605696842074, -0.0016863049240782857, 0.009428824298083782,\n", - " 0.023065969347953796, -0.018305035308003426, 0.03765229508280754,\n", - " 0.03357342258095741, 0.0018431750359013677, 0.03230319544672966,\n", - " 0.024983661249279976, 0.02752346731722355, -0.027390114963054657,\n", - " -0.01945030689239502, -0.05770668387413025, 0.046621184796094894,\n", - " -0.03308689966797829, 0.03985097259283066, -0.021250328049063683,\n", - " -0.001940526650287211, -0.06034174561500549, -0.05026412755250931,\n", - " 0.02385033667087555, -0.03279203176498413, 0.02966252714395523,\n", - " 0.01294293999671936, -0.009747475385665894, -0.07896383106708527,\n", - " -0.013269499875605106, -0.011228476651012897, 0.022224457934498787,\n", - " -0.018957728520035744, -0.05092151463031769, -0.043285638093948364,\n", - " 0.016826728358864784, 0.010665969923138618, 0.021219193935394287,\n", - " -0.08588971197605133, -0.038367897272109985, 0.012244532816112041,\n", - " 0.009497410617768764, 0.017629485577344894, 0.0013116559712216258,\n", - " -0.016468070447444916, -0.04423798993229866, -0.04043079912662506,\n", - " -0.05485917255282402, -0.007577189709991217, 0.028067218139767647,\n", - " -0.022974666208028793, 0.0006999042234383523, 0.009812192991375923,\n", - " -0.05387532711029053, -0.016531387344002724, -0.015153753571212292,\n", - " 0.03397523611783981, -0.0018232968868687749, 0.01200891938060522,\n", - " -0.013123664073646069, -0.043459296226501465, -0.01856262981891632,\n", - " 0.018269911408424377, 0.016155652701854706, -0.05597233399748802,\n", - " -0.05852395296096802, 0.020076945424079895, -0.033808667212724686,\n", - " -0.008225022815167904, -0.014589417725801468, -0.01408824510872364,\n", - " -0.06293410807847977, 0.026668129488825798, -0.01397104375064373,\n", - " -0.017627086490392685, -0.03409220278263092, -0.018559949472546577,\n", - " 0.07163946330547333, 0.015611495822668076, -0.034166790544986725,\n", - " -0.005098687019199133, 0.04163505882024765, -0.010681619867682457,\n", - " 0.027817489579319954, -0.031076539307832718, -0.006825212389230728,\n", - " -0.06810358166694641, -0.03793689236044884, -0.03981738165020943,\n", - " 0.09524374455213547, -0.03607913851737976, 0.003638653317466378,\n", - " 0.02828306518495083, 0.018808560445904732, -0.047244682908058167,\n", - " -0.06114668399095535, -0.02395530976355076, 0.036157332360744476,\n", - " 0.0422002375125885\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.02831101417541504, 0.022063178941607475, -0.07454229146242142,\n", + " 0.006448323838412762, 0.001955120824277401, -0.017617391422390938,\n", + " 0.018649872392416, -0.05262855067849159, 0.0006953597767278552,\n", + " -0.0018249079585075378, 0.022437218576669693, 0.0036489504855126143,\n", + " 0.0018086736090481281, 0.016940006986260414, -0.007894322276115417,\n", + " -0.04187627509236336, 0.039501357823610306, 0.06918870657682419,\n", + " -0.006931832991540432, 0.049655742943286896, 0.021211417391896248,\n", + " -0.029322246089577675, -0.04546992480754852, -0.01769082061946392,\n", + " 0.046703994274139404, 0.03127637133002281, 0.006355373188853264,\n", + " 0.014901148155331612, -0.006893016863614321, -0.05992589890956879,\n", + " -0.009733330458402634, 0.015709295868873596, -0.017982766032218933,\n", + " -0.0852997675538063, -0.032453566789627075, 0.0014507169835269451,\n", + " 0.03345133736729622, 0.048862338066101074, 0.006664620712399483,\n", + " -0.06287197023630142, -0.02109423652291298, 0.018176473677158356,\n", + " -0.022175665944814682, 0.03340170532464981, -0.008905526250600815,\n", + " -0.03492079675197601, -0.03819998353719711, -0.05230168625712395,\n", + " -0.05247239023447037, 0.048254698514938354, 0.046494755893945694,\n", + " -0.029708227142691612, -0.002180763054639101, 0.051957979798316956,\n", + " -0.05483679473400116, 0.00700812041759491, -0.08181990683078766,\n", + " -0.02295914851129055, 0.026530204340815544, 0.04028692841529846,\n", + " -0.05230272561311722, -0.057705819606781006, -0.015022763051092625,\n", + " 0.002143724123016, 0.06361843645572662, -0.027828887104988098,\n", + " 0.006870461627840996, -0.016140831634402275, -0.034440942108631134,\n", + " -0.004059414379298687, -0.042537953704595566, -0.00984653178602457,\n", + " -0.07701274752616882, 0.09815558046102524, -0.025801729410886765,\n", + " -0.008693721145391464, -0.0010926402173936367, -0.027235493063926697,\n", + " 0.06945550441741943, 0.023456251248717308, -0.02160717360675335,\n", + " 0.03252667561173439, 0.05874639376997948, -0.001329384627752006,\n", + " 0.03664775192737579, -0.07353461533784866, -0.028453022241592407,\n", + " -0.05666429176926613, -0.012955721467733383, -0.041723109781742096,\n", + " 0.07209191471338272, 0.0326194241642952, -0.0496046207845211,\n", + " -0.025037819519639015, 0.004625750705599785, -0.03622527793049812,\n", + " -0.022546149790287018, 0.0053468807600438595, 0.03879072889685631,\n", + " 0.03238753229379654\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.02831101417541504, 0.022063178941607475, -0.07454229146242142,\n", + " 0.006448323838412762, 0.001955120824277401, -0.017617391422390938,\n", + " 0.018649872392416, -0.05262855067849159, 0.0006953597767278552,\n", + " -0.0018249079585075378, 0.022437218576669693, 0.0036489504855126143,\n", + " 0.0018086736090481281, 0.016940006986260414, -0.007894322276115417,\n", + " -0.04187627509236336, 0.039501357823610306, 0.06918870657682419,\n", + " -0.006931832991540432, 0.049655742943286896, 0.021211417391896248,\n", + " -0.029322246089577675, -0.04546992480754852, -0.01769082061946392,\n", + " 0.046703994274139404, 0.03127637133002281, 0.006355373188853264,\n", + " 0.014901148155331612, -0.006893016863614321, -0.05992589890956879,\n", + " -0.009733330458402634, 0.015709295868873596, -0.017982766032218933,\n", + " -0.0852997675538063, -0.032453566789627075, 0.0014507169835269451,\n", + " 0.03345133736729622, 0.048862338066101074, 0.006664620712399483,\n", + " -0.06287197023630142, -0.02109423652291298, 0.018176473677158356,\n", + " -0.022175665944814682, 0.03340170532464981, -0.008905526250600815,\n", + " -0.03492079675197601, -0.03819998353719711, -0.05230168625712395,\n", + " -0.05247239023447037, 0.048254698514938354, 0.046494755893945694,\n", + " -0.029708227142691612, -0.002180763054639101, 0.051957979798316956,\n", + " -0.05483679473400116, 0.00700812041759491, -0.08181990683078766,\n", + " -0.02295914851129055, 0.026530204340815544, 0.04028692841529846,\n", + " -0.05230272561311722, -0.057705819606781006, -0.015022763051092625,\n", + " 0.002143724123016, 0.06361843645572662, -0.027828887104988098,\n", + " 0.006870461627840996, -0.016140831634402275, -0.034440942108631134,\n", + " -0.004059414379298687, -0.042537953704595566, -0.00984653178602457,\n", + " -0.07701274752616882, 0.09815558046102524, -0.025801729410886765,\n", + " -0.008693721145391464, -0.0010926402173936367, -0.027235493063926697,\n", + " 0.06945550441741943, 0.023456251248717308, -0.02160717360675335,\n", + " 0.03252667561173439, 0.05874639376997948, -0.001329384627752006,\n", + " 0.03664775192737579, -0.07353461533784866, -0.028453022241592407,\n", + " -0.05666429176926613, -0.012955721467733383, -0.041723109781742096,\n", + " 0.07209191471338272, 0.0326194241642952, -0.0496046207845211,\n", + " -0.025037819519639015, 0.004625750705599785, -0.03622527793049812,\n", + " -0.022546149790287018, 0.0053468807600438595, 0.03879072889685631,\n", + " 0.03238753229379654\n", + "]\n", + "[\n", + " -0.00007261172140715644, 0.03209814056754112, -0.10099327564239502,\n", + " -0.0017932605696842074, -0.0016863049240782857, 0.009428824298083782,\n", + " 0.023065969347953796, -0.018305035308003426, 0.03765229508280754,\n", + " 0.03357342258095741, 0.0018431750359013677, 0.03230319544672966,\n", + " 0.024983661249279976, 0.02752346731722355, -0.027390114963054657,\n", + " -0.01945030689239502, -0.05770668387413025, 0.046621184796094894,\n", + " -0.03308689966797829, 0.03985097259283066, -0.021250328049063683,\n", + " -0.001940526650287211, -0.06034174561500549, -0.05026412755250931,\n", + " 0.02385033667087555, -0.03279203176498413, 0.02966252714395523,\n", + " 0.01294293999671936, -0.009747475385665894, -0.07896383106708527,\n", + " -0.013269499875605106, -0.011228476651012897, 0.022224457934498787,\n", + " -0.018957728520035744, -0.05092151463031769, -0.043285638093948364,\n", + " 0.016826728358864784, 0.010665969923138618, 0.021219193935394287,\n", + " -0.08588971197605133, -0.038367897272109985, 0.012244532816112041,\n", + " 0.009497410617768764, 0.017629485577344894, 0.0013116559712216258,\n", + " -0.016468070447444916, -0.04423798993229866, -0.04043079912662506,\n", + " -0.05485917255282402, -0.007577189709991217, 0.028067218139767647,\n", + " -0.022974666208028793, 0.0006999042234383523, 0.009812192991375923,\n", + " -0.05387532711029053, -0.016531387344002724, -0.015153753571212292,\n", + " 0.03397523611783981, -0.0018232968868687749, 0.01200891938060522,\n", + " -0.013123664073646069, -0.043459296226501465, -0.01856262981891632,\n", + " 0.018269911408424377, 0.016155652701854706, -0.05597233399748802,\n", + " -0.05852395296096802, 0.020076945424079895, -0.033808667212724686,\n", + " -0.008225022815167904, -0.014589417725801468, -0.01408824510872364,\n", + " -0.06293410807847977, 0.026668129488825798, -0.01397104375064373,\n", + " -0.017627086490392685, -0.03409220278263092, -0.018559949472546577,\n", + " 0.07163946330547333, 0.015611495822668076, -0.034166790544986725,\n", + " -0.005098687019199133, 0.04163505882024765, -0.010681619867682457,\n", + " 0.027817489579319954, -0.031076539307832718, -0.006825212389230728,\n", + " -0.06810358166694641, -0.03793689236044884, -0.03981738165020943,\n", + " 0.09524374455213547, -0.03607913851737976, 0.003638653317466378,\n", + " 0.02828306518495083, 0.018808560445904732, -0.047244682908058167,\n", + " -0.06114668399095535, -0.02395530976355076, 0.036157332360744476,\n", + " 0.0422002375125885\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `VertexAIEmbeddings` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_vertexai.GoogleVertexAIEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `VertexAIEmbeddings` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_vertexai.GoogleVertexAIEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/gradient_ai.mdx b/docs/core_docs/docs/integrations/text_embedding/gradient_ai.mdx index 765c926adb71..f5c469e3c228 100644 --- a/docs/core_docs/docs/integrations/text_embedding/gradient_ai.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/gradient_ai.mdx @@ -40,5 +40,5 @@ import GradientEmbeddingsExample from "@examples/embeddings/gradient_ai.ts"; ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/hugging_face_inference.mdx b/docs/core_docs/docs/integrations/text_embedding/hugging_face_inference.mdx index ae34073f0dbd..39a02829124c 100644 --- a/docs/core_docs/docs/integrations/text_embedding/hugging_face_inference.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/hugging_face_inference.mdx @@ -26,5 +26,5 @@ const embeddings = new HuggingFaceInferenceEmbeddings({ ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb b/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb index ecfb9ac677b4..dfe43b07c462 100644 --- a/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb @@ -1,389 +1,389 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: IBM watsonx.ai\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# IBM watsonx.ai\n", - "\n", - "\n", - "This will help you get started with IBM watsonx.ai [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `IBM watsonx.ai` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/_langchain_community.embeddings_ibm.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/ibm_watsonx/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [`IBM watsonx.ai`](https://api.js.langchain.com/classes/_langchain_community.embeddings_ibm.WatsonxEmbeddings.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html)| ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access IBM WatsonxAI embeddings you'll need to create an IBM watsonx.ai account, get an API key or any other type of credentials, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "\n", - "Head to [IBM Cloud](https://cloud.ibm.com/login) to sign up to IBM watsonx.ai and generate an API key or provide any other authentication form as presented below.\n", - "\n", - "#### IAM authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=iam\n", - "export WATSONX_AI_APIKEY=\n", - "```\n", - "\n", - "#### Bearer token authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=bearertoken\n", - "export WATSONX_AI_BEARER_TOKEN=\n", - "```\n", - "\n", - "#### CP4D authentication\n", - "\n", - "```bash\n", - "export WATSONX_AI_AUTH_TYPE=cp4d\n", - "export WATSONX_AI_USERNAME=\n", - "export WATSONX_AI_PASSWORD=\n", - "export WATSONX_AI_URL=\n", - "```\n", - "\n", - "Once these are placed in your environment variables and object is initialized authentication will proceed automatically.\n", - "\n", - "Authentication can also be accomplished by passing these values as parameters to a new instance.\n", - "\n", - "## IAM authentication\n", - "\n", - "```typescript\n", - "import { WatsonxEmbeddings } from \"@langchain/community/embeddings/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"iam\",\n", - " watsonxAIApikey: \"\",\n", - "};\n", - "const instance = new WatsonxEmbeddings(props);\n", - "```\n", - "\n", - "## Bearer token authentication\n", - "\n", - "```typescript\n", - "import { WatsonxEmbeddings } from \"@langchain/community/embeddings/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"bearertoken\",\n", - " watsonxAIBearerToken: \"\",\n", - "};\n", - "const instance = new WatsonxEmbeddings(props);\n", - "```\n", - "\n", - "### CP4D authentication\n", - "\n", - "```typescript\n", - "import { WatsonxEmbeddings } from \"@langchain/community/embeddings/ibm\";\n", - "\n", - "const props = {\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: \"\",\n", - " projectId: \"\",\n", - " watsonxAIAuthType: \"cp4d\",\n", - " watsonxAIUsername: \"\",\n", - " watsonxAIPassword: \"\",\n", - " watsonxAIUrl: \"\",\n", - "};\n", - "const instance = new WatsonxEmbeddings(props);\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain IBM watsonx.ai integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and embed text:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { WatsonxEmbeddings } from \"@langchain/community/embeddings/ibm\";\n", - "\n", - "const embeddings = new WatsonxEmbeddings({\n", - " version: \"YYYY-MM-DD\",\n", - " serviceUrl: process.env.API_URL,\n", - " projectId: \"\",\n", - " spaceId: \"\",\n", - " idOrName: \"\",\n", - " model: \"\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ba7f5c8a", - "metadata": {}, - "source": [ - "Note:\n", - "\n", - "- You must provide `spaceId`, `projectId` or `idOrName`(deployment id) in order to proceed.\n", - "- Depending on the region of your provisioned service instance, use correct serviceUrl." - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: IBM watsonx.ai\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.017436018, -0.01469498, -0.015685871, -0.013543149, -0.0011519607,\n", - " -0.008123747, 0.015286108, -0.023845721, -0.02454774, 0.07235078,\n", - " -0.032333843, -0.0035843418, -0.015389036, 0.0455373, -0.021119863,\n", - " -0.022039745, 0.021746712, -0.017774817, -0.008232582, -0.036727764,\n", - " -0.015734928, 0.03606811, -0.005108186, -0.036052454, 0.024462992,\n", - " 0.02359307, 0.03273164, 0.009195497, -0.0077208397, -0.0127943,\n", - " -0.023869334, -0.029473905, -0.0080457395, -0.0021337876, 0.04949132,\n", - " 0.013950589, -0.010046689, 0.021029025, -0.031725302, 0.004251065,\n", - " -0.034171984, -0.03696642, -0.014253629, -0.017757406, -0.007531065,\n", - " 0.07187789, 0.009661725, 0.041889492, -0.04660478, 0.028036641,\n", - " 0.059334517, -0.04561291, 0.056029715, -0.00676024, 0.026493236,\n", - " 0.0116374, 0.050126843, -0.018036349, -0.013711887, 0.042252757,\n", - " -0.04453391, 0.04705777, -0.00044598224, -0.030227259, 0.029286578,\n", - " 0.0252211, 0.011694125, -0.031404093, 0.02951232, 0.08812359,\n", - " 0.023539362, -0.011082862, 0.008024676, 0.00084492035, -0.007984158,\n", - " -0.0005008702, -0.025189219, 0.021000557, -0.0065513053, 0.036524914,\n", - " 0.0015150858, -0.0042383806, 0.049065087, 0.000941666, 0.04447001,\n", - " 0.012942205, -0.078316726, -0.03004237, -0.025807172, -0.03446275,\n", - " -0.00932942, -0.044925686, 0.03190307, 0.010136769, -0.048854534,\n", - " 0.025738232, -0.017840309, 0.023738133, 0.014214792, 0.030452395\n", - "]\n" - ] - } - ], - "source": [ - " const singleVector = await embeddings.embedQuery(text);\n", - " singleVector.slice(0, 100);" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# IBM watsonx.ai\n", + "\n", + "\n", + "This will help you get started with IBM watsonx.ai [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `IBM watsonx.ai` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/_langchain_community.embeddings_ibm.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/ibm_watsonx/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [`IBM watsonx.ai`](https://api.js.langchain.com/classes/_langchain_community.embeddings_ibm.WatsonxEmbeddings.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html)| ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access IBM WatsonxAI embeddings you'll need to create an IBM watsonx.ai account, get an API key or any other type of credentials, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "\n", + "Head to [IBM Cloud](https://cloud.ibm.com/login) to sign up to IBM watsonx.ai and generate an API key or provide any other authentication form as presented below.\n", + "\n", + "#### IAM authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=iam\n", + "export WATSONX_AI_APIKEY=\n", + "```\n", + "\n", + "#### Bearer token authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=bearertoken\n", + "export WATSONX_AI_BEARER_TOKEN=\n", + "```\n", + "\n", + "#### CP4D authentication\n", + "\n", + "```bash\n", + "export WATSONX_AI_AUTH_TYPE=cp4d\n", + "export WATSONX_AI_USERNAME=\n", + "export WATSONX_AI_PASSWORD=\n", + "export WATSONX_AI_URL=\n", + "```\n", + "\n", + "Once these are placed in your environment variables and object is initialized authentication will proceed automatically.\n", + "\n", + "Authentication can also be accomplished by passing these values as parameters to a new instance.\n", + "\n", + "## IAM authentication\n", + "\n", + "```typescript\n", + "import { WatsonxEmbeddings } from \"@langchain/community/embeddings/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"iam\",\n", + " watsonxAIApikey: \"\",\n", + "};\n", + "const instance = new WatsonxEmbeddings(props);\n", + "```\n", + "\n", + "## Bearer token authentication\n", + "\n", + "```typescript\n", + "import { WatsonxEmbeddings } from \"@langchain/community/embeddings/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"bearertoken\",\n", + " watsonxAIBearerToken: \"\",\n", + "};\n", + "const instance = new WatsonxEmbeddings(props);\n", + "```\n", + "\n", + "### CP4D authentication\n", + "\n", + "```typescript\n", + "import { WatsonxEmbeddings } from \"@langchain/community/embeddings/ibm\";\n", + "\n", + "const props = {\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: \"\",\n", + " projectId: \"\",\n", + " watsonxAIAuthType: \"cp4d\",\n", + " watsonxAIUsername: \"\",\n", + " watsonxAIPassword: \"\",\n", + " watsonxAIUrl: \"\",\n", + "};\n", + "const instance = new WatsonxEmbeddings(props);\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain IBM watsonx.ai integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and embed text:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { WatsonxEmbeddings } from \"@langchain/community/embeddings/ibm\";\n", + "\n", + "const embeddings = new WatsonxEmbeddings({\n", + " version: \"YYYY-MM-DD\",\n", + " serviceUrl: process.env.API_URL,\n", + " projectId: \"\",\n", + " spaceId: \"\",\n", + " idOrName: \"\",\n", + " model: \"\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "ba7f5c8a", + "metadata": {}, + "source": [ + "Note:\n", + "\n", + "- You must provide `spaceId`, `projectId` or `idOrName`(deployment id) in order to proceed.\n", + "- Depending on the region of your provisioned service instance, use correct serviceUrl." + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.017436024, -0.014695002, -0.01568589, -0.013543164, -0.001151976,\n", - " -0.008123703, 0.015286064, -0.023845702, -0.024547677, 0.07235076,\n", - " -0.032333862, -0.0035843418, -0.015389038, 0.045537304, -0.021119865,\n", - " -0.02203975, 0.021746716, -0.01777481, -0.008232588, -0.03672781,\n", - " -0.015734889, 0.036068108, -0.0051082, -0.036052432, 0.024462998,\n", - " 0.023593083, 0.03273162, 0.009195521, -0.007720828, -0.012794304,\n", - " -0.023869323, -0.029473891, -0.008045726, -0.002133793, 0.049491342,\n", - " 0.013950573, -0.010046691, 0.02102898, -0.03172528, 0.0042510596,\n", - " -0.034171965, -0.036966413, -0.014253668, -0.017757434, -0.007531062,\n", - " 0.07187787, 0.009661732, 0.041889492, -0.04660476, 0.028036654,\n", - " 0.059334517, -0.045612894, 0.056029722, -0.00676024, 0.026493296,\n", - " 0.0116374055, 0.050126873, -0.018036384, -0.013711868, 0.0422528,\n", - " -0.044533912, 0.047057763, -0.00044596897, -0.030227251, 0.029286569,\n", - " 0.025221113, 0.011694138, -0.03140413, 0.029512335, 0.08812357,\n", - " 0.023539348, -0.011082865, 0.008024677, 0.00084490055, -0.007984145,\n", - " -0.0005008745, -0.025189226, 0.021000564, -0.0065513197, 0.036524955,\n", - " 0.0015150585, -0.0042383634, 0.049065102, 0.000941638, 0.044469994,\n", - " 0.012942193, -0.078316696, -0.0300424, -0.025807157, -0.0344627,\n", - " -0.009329439, -0.04492573, 0.031903077, 0.010136808, -0.048854522,\n", - " 0.025738247, -0.01784033, 0.023738142, 0.014214801, 0.030452369\n", - "]\n", - "[\n", - " 0.03278884, -0.017893745, -0.0027520044, 0.016506646, 0.028271576,\n", - " -0.01284331, 0.014344065, -0.007968607, -0.03899479, 0.039327156,\n", - " -0.047726233, 0.009559004, -0.05302522, 0.011498492, -0.0055542476,\n", - " -0.0020940166, -0.029262392, -0.025919685, 0.024261741, -0.0010863725,\n", - " 0.0074619935, 0.014191284, -0.009054746, -0.038633537, 0.039744128,\n", - " 0.012625762, 0.030490868, 0.013526139, -0.024638629, -0.011268263,\n", - " -0.012759613, -0.04693565, -0.013087251, -0.01971696, 0.0125782555,\n", - " 0.024156926, -0.011638484, 0.017364893, -0.0405832, -0.0032466082,\n", - " -0.01611277, -0.022583133, 0.019492855, -0.03664484, -0.022627067,\n", - " 0.011026938, -0.014631298, 0.043255687, -0.029447634, 0.017212389,\n", - " 0.029366229, -0.041978795, 0.005347565, -0.0106230285, -0.008334342,\n", - " -0.008841154, 0.045096103, 0.03996879, -0.002039457, -0.0051824683,\n", - " -0.019464444, 0.092018366, -0.009283633, -0.020052811, 0.0043408144,\n", - " -0.029403884, 0.02587689, -0.027253918, 0.0159064, 0.0421537,\n", - " 0.05078811, -0.012380686, -0.018032575, 0.01711449, 0.03636163,\n", - " -0.014590949, -0.015076142, 0.00018201554, 0.002490666, 0.044776678,\n", - " 0.05301749, -0.007891316, 0.028668318, -0.0016632816, 0.04487743,\n", - " -0.032529455, -0.040372133, -0.020566158, -0.011109745, -0.01724949,\n", - " -0.0047519016, -0.041635286, 0.0068111843, 0.039498538, -0.02491227,\n", - " 0.016853934, -0.017926402, -0.006154979, 0.025893573, 0.015262395\n", - "]\n" - ] + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.017436018, -0.01469498, -0.015685871, -0.013543149, -0.0011519607,\n", + " -0.008123747, 0.015286108, -0.023845721, -0.02454774, 0.07235078,\n", + " -0.032333843, -0.0035843418, -0.015389036, 0.0455373, -0.021119863,\n", + " -0.022039745, 0.021746712, -0.017774817, -0.008232582, -0.036727764,\n", + " -0.015734928, 0.03606811, -0.005108186, -0.036052454, 0.024462992,\n", + " 0.02359307, 0.03273164, 0.009195497, -0.0077208397, -0.0127943,\n", + " -0.023869334, -0.029473905, -0.0080457395, -0.0021337876, 0.04949132,\n", + " 0.013950589, -0.010046689, 0.021029025, -0.031725302, 0.004251065,\n", + " -0.034171984, -0.03696642, -0.014253629, -0.017757406, -0.007531065,\n", + " 0.07187789, 0.009661725, 0.041889492, -0.04660478, 0.028036641,\n", + " 0.059334517, -0.04561291, 0.056029715, -0.00676024, 0.026493236,\n", + " 0.0116374, 0.050126843, -0.018036349, -0.013711887, 0.042252757,\n", + " -0.04453391, 0.04705777, -0.00044598224, -0.030227259, 0.029286578,\n", + " 0.0252211, 0.011694125, -0.031404093, 0.02951232, 0.08812359,\n", + " 0.023539362, -0.011082862, 0.008024676, 0.00084492035, -0.007984158,\n", + " -0.0005008702, -0.025189219, 0.021000557, -0.0065513053, 0.036524914,\n", + " 0.0015150858, -0.0042383806, 0.049065087, 0.000941666, 0.04447001,\n", + " 0.012942205, -0.078316726, -0.03004237, -0.025807172, -0.03446275,\n", + " -0.00932942, -0.044925686, 0.03190307, 0.010136769, -0.048854534,\n", + " 0.025738232, -0.017840309, 0.023738133, 0.014214792, 0.030452395\n", + "]\n" + ] + } + ], + "source": [ + " const singleVector = await embeddings.embedQuery(text);\n", + " singleVector.slice(0, 100);" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.017436024, -0.014695002, -0.01568589, -0.013543164, -0.001151976,\n", + " -0.008123703, 0.015286064, -0.023845702, -0.024547677, 0.07235076,\n", + " -0.032333862, -0.0035843418, -0.015389038, 0.045537304, -0.021119865,\n", + " -0.02203975, 0.021746716, -0.01777481, -0.008232588, -0.03672781,\n", + " -0.015734889, 0.036068108, -0.0051082, -0.036052432, 0.024462998,\n", + " 0.023593083, 0.03273162, 0.009195521, -0.007720828, -0.012794304,\n", + " -0.023869323, -0.029473891, -0.008045726, -0.002133793, 0.049491342,\n", + " 0.013950573, -0.010046691, 0.02102898, -0.03172528, 0.0042510596,\n", + " -0.034171965, -0.036966413, -0.014253668, -0.017757434, -0.007531062,\n", + " 0.07187787, 0.009661732, 0.041889492, -0.04660476, 0.028036654,\n", + " 0.059334517, -0.045612894, 0.056029722, -0.00676024, 0.026493296,\n", + " 0.0116374055, 0.050126873, -0.018036384, -0.013711868, 0.0422528,\n", + " -0.044533912, 0.047057763, -0.00044596897, -0.030227251, 0.029286569,\n", + " 0.025221113, 0.011694138, -0.03140413, 0.029512335, 0.08812357,\n", + " 0.023539348, -0.011082865, 0.008024677, 0.00084490055, -0.007984145,\n", + " -0.0005008745, -0.025189226, 0.021000564, -0.0065513197, 0.036524955,\n", + " 0.0015150585, -0.0042383634, 0.049065102, 0.000941638, 0.044469994,\n", + " 0.012942193, -0.078316696, -0.0300424, -0.025807157, -0.0344627,\n", + " -0.009329439, -0.04492573, 0.031903077, 0.010136808, -0.048854522,\n", + " 0.025738247, -0.01784033, 0.023738142, 0.014214801, 0.030452369\n", + "]\n", + "[\n", + " 0.03278884, -0.017893745, -0.0027520044, 0.016506646, 0.028271576,\n", + " -0.01284331, 0.014344065, -0.007968607, -0.03899479, 0.039327156,\n", + " -0.047726233, 0.009559004, -0.05302522, 0.011498492, -0.0055542476,\n", + " -0.0020940166, -0.029262392, -0.025919685, 0.024261741, -0.0010863725,\n", + " 0.0074619935, 0.014191284, -0.009054746, -0.038633537, 0.039744128,\n", + " 0.012625762, 0.030490868, 0.013526139, -0.024638629, -0.011268263,\n", + " -0.012759613, -0.04693565, -0.013087251, -0.01971696, 0.0125782555,\n", + " 0.024156926, -0.011638484, 0.017364893, -0.0405832, -0.0032466082,\n", + " -0.01611277, -0.022583133, 0.019492855, -0.03664484, -0.022627067,\n", + " 0.011026938, -0.014631298, 0.043255687, -0.029447634, 0.017212389,\n", + " 0.029366229, -0.041978795, 0.005347565, -0.0106230285, -0.008334342,\n", + " -0.008841154, 0.045096103, 0.03996879, -0.002039457, -0.0051824683,\n", + " -0.019464444, 0.092018366, -0.009283633, -0.020052811, 0.0043408144,\n", + " -0.029403884, 0.02587689, -0.027253918, 0.0159064, 0.0421537,\n", + " 0.05078811, -0.012380686, -0.018032575, 0.01711449, 0.03636163,\n", + " -0.014590949, -0.015076142, 0.00018201554, 0.002490666, 0.044776678,\n", + " 0.05301749, -0.007891316, 0.028668318, -0.0016632816, 0.04487743,\n", + " -0.032529455, -0.040372133, -0.020566158, -0.011109745, -0.01724949,\n", + " -0.0047519016, -0.041635286, 0.0068111843, 0.039498538, -0.02491227,\n", + " 0.016853934, -0.017926402, -0.006154979, 0.025893573, 0.015262395\n", + "]\n" + ] + } + ], + "source": [ + "\n", + "\n", + " const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + " const vectors = await embeddings.embedDocuments([text, text2]);\n", + " \n", + " console.log(vectors[0].slice(0, 100));\n", + " console.log(vectors[1].slice(0, 100));\n", + " " + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all __module_name__ features and configurations head to the API reference: __api_ref_module__" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "JavaScript (Node.js)", + "language": "javascript", + "name": "javascript" + }, + "language_info": { + "file_extension": ".js", + "mimetype": "application/javascript", + "name": "javascript", + "version": "20.17.0" } - ], - "source": [ - "\n", - "\n", - " const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - " const vectors = await embeddings.embedDocuments([text, text2]);\n", - " \n", - " console.log(vectors[0].slice(0, 100));\n", - " console.log(vectors[1].slice(0, 100));\n", - " " - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all __module_name__ features and configurations head to the API reference: __api_ref_module__" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "JavaScript (Node.js)", - "language": "javascript", - "name": "javascript" }, - "language_info": { - "file_extension": ".js", - "mimetype": "application/javascript", - "name": "javascript", - "version": "20.17.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/index.mdx b/docs/core_docs/docs/integrations/text_embedding/index.mdx index 2e5b612041aa..3dab48bc7e3d 100644 --- a/docs/core_docs/docs/integrations/text_embedding/index.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/index.mdx @@ -5,7 +5,7 @@ sidebar_class_name: hidden # Embeddings -[Embedding models](/docs/concepts#embedding-models) create a vector representation of a piece of text. +[Embedding models](/docs/concepts/embedding_models) create a vector representation of a piece of text. This page documents integrations with various model providers that allow you to use embeddings in LangChain. diff --git a/docs/core_docs/docs/integrations/text_embedding/jina.mdx b/docs/core_docs/docs/integrations/text_embedding/jina.mdx index 1a8cd20d0bc9..e44100a91856 100644 --- a/docs/core_docs/docs/integrations/text_embedding/jina.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/jina.mdx @@ -111,5 +111,5 @@ For feedback or questions, please contact [support@jina.ai](mailto:support@jina. ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx b/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx index 59349a1e88bc..b4cb6868dd83 100644 --- a/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx @@ -53,5 +53,5 @@ import DocsExample from "@examples/embeddings/llama_cpp_docs.ts"; ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/minimax.mdx b/docs/core_docs/docs/integrations/text_embedding/minimax.mdx index f6e1a881fc20..6fbcce8e685f 100644 --- a/docs/core_docs/docs/integrations/text_embedding/minimax.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/minimax.mdx @@ -27,5 +27,5 @@ export const run = async () => { ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/mistralai.ipynb b/docs/core_docs/docs/integrations/text_embedding/mistralai.ipynb index 360e8fcfcdef..97660a2b1d90 100644 --- a/docs/core_docs/docs/integrations/text_embedding/mistralai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/mistralai.ipynb @@ -1,344 +1,344 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: MistralAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# MistralAIEmbeddings\n", - "\n", - "This will help you get started with MistralAIEmbeddings [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `MistralAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_mistralai.MistralAIEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/mistralai/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [MistralAIEmbeddings](https://api.js.langchain.com/classes/langchain_mistralai.MistralAIEmbeddings.html) | [@langchain/mistralai](https://api.js.langchain.com/modules/langchain_mistralai.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access MistralAI embedding models you'll need to create a MistralAI account, get an API key, and install the `@langchain/mistralai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [console.mistral.ai](https://console.mistral.ai/) to sign up to `MistralAI` and generate an API key. Once you've done this set the `MISTRAL_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export MISTRAL_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain MistralAIEmbeddings integration lives in the `@langchain/mistralai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/mistralai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { MistralAIEmbeddings } from \"@langchain/mistralai\";\n", - "\n", - "const embeddings = new MistralAIEmbeddings({\n", - " model: \"mistral-embed\", // Default value\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: MistralAI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.04443359375, 0.01885986328125, 0.018035888671875,\n", - " -0.00864410400390625, 0.049652099609375, -0.001190185546875,\n", - " 0.028900146484375, -0.035675048828125, -0.00702667236328125,\n", - " 0.00016105175018310547, -0.027587890625, 0.029388427734375,\n", - " -0.053253173828125, -0.0003020763397216797, -0.046112060546875,\n", - " 0.0258026123046875, -0.0010776519775390625, 0.02703857421875,\n", - " 0.040985107421875, -0.004547119140625, -0.020172119140625,\n", - " -0.02606201171875, -0.01457977294921875, 0.01220703125,\n", - " -0.0078582763671875, -0.0084228515625, -0.02056884765625,\n", - " -0.071044921875, -0.0404052734375, 0.00923919677734375,\n", - " 0.01407623291015625, -0.0210113525390625, 0.0006284713745117188,\n", - " -0.01465606689453125, 0.0186309814453125, -0.015838623046875,\n", - " 0.0007920265197753906, -0.04437255859375, 0.008758544921875,\n", - " -0.0172119140625, 0.01312255859375, -0.01358795166015625,\n", - " -0.0212860107421875, -0.000035822391510009766, -0.0226898193359375,\n", - " -0.01390838623046875, -0.007659912109375, -0.016021728515625,\n", - " 0.025909423828125, -0.034515380859375, -0.0372314453125,\n", - " 0.020355224609375, -0.02606201171875, -0.0158843994140625,\n", - " -0.037994384765625, 0.00450897216796875, 0.0142822265625,\n", - " -0.012725830078125, -0.0770263671875, 0.02630615234375,\n", - " -0.048614501953125, 0.006072998046875, 0.00417327880859375,\n", - " -0.005138397216796875, 0.02557373046875, 0.0311279296875,\n", - " 0.026519775390625, -0.0103607177734375, -0.0108489990234375,\n", - " -0.029510498046875, 0.022186279296875, 0.0256500244140625,\n", - " -0.0186309814453125, 0.0443115234375, -0.0304107666015625,\n", - " -0.03131103515625, 0.007427215576171875, 0.0234527587890625,\n", - " 0.0224761962890625, 0.00463104248046875, -0.0037021636962890625,\n", - " 0.0302581787109375, 0.0733642578125, -0.0121612548828125,\n", - " -0.0172576904296875, 0.019317626953125, 0.029052734375,\n", - " -0.0024871826171875, 0.0174713134765625, 0.026092529296875,\n", - " 0.04425048828125, -0.0004563331604003906, 0.0146026611328125,\n", - " -0.00748443603515625, 0.06146240234375, 0.02294921875,\n", - " -0.016845703125, -0.0014057159423828125, -0.01435089111328125,\n", - " 0.06097412109375\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# MistralAIEmbeddings\n", + "\n", + "This will help you get started with MistralAIEmbeddings [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `MistralAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_mistralai.MistralAIEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/mistralai/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [MistralAIEmbeddings](https://api.js.langchain.com/classes/langchain_mistralai.MistralAIEmbeddings.html) | [@langchain/mistralai](https://api.js.langchain.com/modules/langchain_mistralai.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access MistralAI embedding models you'll need to create a MistralAI account, get an API key, and install the `@langchain/mistralai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [console.mistral.ai](https://console.mistral.ai/) to sign up to `MistralAI` and generate an API key. Once you've done this set the `MISTRAL_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export MISTRAL_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain MistralAIEmbeddings integration lives in the `@langchain/mistralai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/mistralai @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { MistralAIEmbeddings } from \"@langchain/mistralai\";\n", + "\n", + "const embeddings = new MistralAIEmbeddings({\n", + " model: \"mistral-embed\", // Default value\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.04443359375, 0.01885986328125, 0.0180511474609375,\n", - " -0.0086517333984375, 0.049652099609375, -0.00121307373046875,\n", - " 0.0289154052734375, -0.03570556640625, -0.007015228271484375,\n", - " 0.0001499652862548828, -0.0276641845703125, 0.0294036865234375,\n", - " -0.05322265625, -0.0002808570861816406, -0.04608154296875,\n", - " 0.02581787109375, -0.0011072158813476562, 0.027099609375,\n", - " 0.040985107421875, -0.004547119140625, -0.0201873779296875,\n", - " -0.0260772705078125, -0.0146026611328125, 0.0121917724609375,\n", - " -0.007843017578125, -0.0084381103515625, -0.0205535888671875,\n", - " -0.07110595703125, -0.04046630859375, 0.00931549072265625,\n", - " 0.01409912109375, -0.02099609375, 0.0006232261657714844,\n", - " -0.014678955078125, 0.0186614990234375, -0.0158233642578125,\n", - " 0.000812530517578125, -0.04437255859375, 0.00873565673828125,\n", - " -0.0172119140625, 0.013092041015625, -0.0135498046875,\n", - " -0.0212860107421875, -0.000006735324859619141, -0.0226898193359375,\n", - " -0.01389312744140625, -0.0076751708984375, -0.0160064697265625,\n", - " 0.0259246826171875, -0.0345458984375, -0.037200927734375,\n", - " 0.020355224609375, -0.0260009765625, -0.0159149169921875,\n", - " -0.03802490234375, 0.004489898681640625, 0.0143280029296875,\n", - " -0.01274871826171875, -0.07708740234375, 0.0263214111328125,\n", - " -0.04864501953125, 0.00608062744140625, 0.004192352294921875,\n", - " -0.005115509033203125, 0.0255889892578125, 0.0311279296875,\n", - " 0.0265045166015625, -0.0103607177734375, -0.01084136962890625,\n", - " -0.0294952392578125, 0.022186279296875, 0.0256500244140625,\n", - " -0.0186767578125, 0.044342041015625, -0.030426025390625,\n", - " -0.03131103515625, 0.007396697998046875, 0.0234527587890625,\n", - " 0.0224609375, 0.004634857177734375, -0.003643035888671875,\n", - " 0.0302886962890625, 0.07342529296875, -0.01221466064453125,\n", - " -0.017303466796875, 0.0193023681640625, 0.029052734375,\n", - " -0.0024890899658203125, 0.0174407958984375, 0.026123046875,\n", - " 0.044219970703125, -0.0004944801330566406, 0.01462554931640625,\n", - " -0.007450103759765625, 0.06146240234375, 0.022979736328125,\n", - " -0.016845703125, -0.001445770263671875, -0.0143890380859375,\n", - " 0.06097412109375\n", - "]\n", - "[\n", - " -0.02032470703125, 0.02606201171875, 0.051605224609375,\n", - " -0.0281982421875, 0.055755615234375, 0.001987457275390625,\n", - " 0.031982421875, -0.0131378173828125, -0.0252685546875,\n", - " 0.001010894775390625, -0.024017333984375, 0.053375244140625,\n", - " -0.042816162109375, 0.005584716796875, -0.04132080078125,\n", - " 0.03021240234375, 0.01324462890625, 0.016876220703125,\n", - " 0.041961669921875, -0.004299163818359375, -0.0273895263671875,\n", - " -0.039642333984375, -0.021575927734375, 0.0309295654296875,\n", - " -0.0099945068359375, -0.0163726806640625, -0.00968170166015625,\n", - " -0.07733154296875, -0.030364990234375, -0.003864288330078125,\n", - " 0.016387939453125, -0.0389404296875, -0.0026702880859375,\n", - " -0.0176544189453125, 0.0264434814453125, -0.01226806640625,\n", - " -0.0022220611572265625, -0.039703369140625, -0.00907135009765625,\n", - " -0.0260467529296875, 0.03155517578125, -0.0004324913024902344,\n", - " -0.019500732421875, -0.0120697021484375, -0.008544921875,\n", - " -0.01654052734375, 0.00067138671875, -0.0134735107421875,\n", - " 0.01080322265625, -0.034759521484375, -0.06201171875,\n", - " 0.012359619140625, -0.006237030029296875, -0.0168914794921875,\n", - " -0.0183563232421875, 0.0236053466796875, -0.0021419525146484375,\n", - " -0.0164947509765625, -0.052581787109375, 0.022125244140625,\n", - " -0.045745849609375, -0.0009088516235351562, 0.0097808837890625,\n", - " -0.0009326934814453125, 0.041656494140625, 0.0269775390625,\n", - " 0.016845703125, -0.0022335052490234375, -0.0182342529296875,\n", - " -0.0245208740234375, 0.0036602020263671875, -0.0188751220703125,\n", - " -0.0023956298828125, 0.0238800048828125, -0.034942626953125,\n", - " -0.033782958984375, 0.0046234130859375, 0.0318603515625,\n", - " 0.0251007080078125, -0.0023288726806640625, -0.0225677490234375,\n", - " 0.0004394054412841797, 0.064208984375, -0.0254669189453125,\n", - " -0.0234222412109375, 0.0009264945983886719, 0.01464080810546875,\n", - " 0.006626129150390625, -0.007450103759765625, 0.02642822265625,\n", - " 0.0260009765625, 0.00536346435546875, 0.01479339599609375,\n", - " -0.0032253265380859375, 0.0498046875, 0.048248291015625,\n", - " -0.01519012451171875, 0.00605010986328125, 0.019744873046875,\n", - " 0.0296478271484375\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.04443359375, 0.01885986328125, 0.018035888671875,\n", + " -0.00864410400390625, 0.049652099609375, -0.001190185546875,\n", + " 0.028900146484375, -0.035675048828125, -0.00702667236328125,\n", + " 0.00016105175018310547, -0.027587890625, 0.029388427734375,\n", + " -0.053253173828125, -0.0003020763397216797, -0.046112060546875,\n", + " 0.0258026123046875, -0.0010776519775390625, 0.02703857421875,\n", + " 0.040985107421875, -0.004547119140625, -0.020172119140625,\n", + " -0.02606201171875, -0.01457977294921875, 0.01220703125,\n", + " -0.0078582763671875, -0.0084228515625, -0.02056884765625,\n", + " -0.071044921875, -0.0404052734375, 0.00923919677734375,\n", + " 0.01407623291015625, -0.0210113525390625, 0.0006284713745117188,\n", + " -0.01465606689453125, 0.0186309814453125, -0.015838623046875,\n", + " 0.0007920265197753906, -0.04437255859375, 0.008758544921875,\n", + " -0.0172119140625, 0.01312255859375, -0.01358795166015625,\n", + " -0.0212860107421875, -0.000035822391510009766, -0.0226898193359375,\n", + " -0.01390838623046875, -0.007659912109375, -0.016021728515625,\n", + " 0.025909423828125, -0.034515380859375, -0.0372314453125,\n", + " 0.020355224609375, -0.02606201171875, -0.0158843994140625,\n", + " -0.037994384765625, 0.00450897216796875, 0.0142822265625,\n", + " -0.012725830078125, -0.0770263671875, 0.02630615234375,\n", + " -0.048614501953125, 0.006072998046875, 0.00417327880859375,\n", + " -0.005138397216796875, 0.02557373046875, 0.0311279296875,\n", + " 0.026519775390625, -0.0103607177734375, -0.0108489990234375,\n", + " -0.029510498046875, 0.022186279296875, 0.0256500244140625,\n", + " -0.0186309814453125, 0.0443115234375, -0.0304107666015625,\n", + " -0.03131103515625, 0.007427215576171875, 0.0234527587890625,\n", + " 0.0224761962890625, 0.00463104248046875, -0.0037021636962890625,\n", + " 0.0302581787109375, 0.0733642578125, -0.0121612548828125,\n", + " -0.0172576904296875, 0.019317626953125, 0.029052734375,\n", + " -0.0024871826171875, 0.0174713134765625, 0.026092529296875,\n", + " 0.04425048828125, -0.0004563331604003906, 0.0146026611328125,\n", + " -0.00748443603515625, 0.06146240234375, 0.02294921875,\n", + " -0.016845703125, -0.0014057159423828125, -0.01435089111328125,\n", + " 0.06097412109375\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.04443359375, 0.01885986328125, 0.0180511474609375,\n", + " -0.0086517333984375, 0.049652099609375, -0.00121307373046875,\n", + " 0.0289154052734375, -0.03570556640625, -0.007015228271484375,\n", + " 0.0001499652862548828, -0.0276641845703125, 0.0294036865234375,\n", + " -0.05322265625, -0.0002808570861816406, -0.04608154296875,\n", + " 0.02581787109375, -0.0011072158813476562, 0.027099609375,\n", + " 0.040985107421875, -0.004547119140625, -0.0201873779296875,\n", + " -0.0260772705078125, -0.0146026611328125, 0.0121917724609375,\n", + " -0.007843017578125, -0.0084381103515625, -0.0205535888671875,\n", + " -0.07110595703125, -0.04046630859375, 0.00931549072265625,\n", + " 0.01409912109375, -0.02099609375, 0.0006232261657714844,\n", + " -0.014678955078125, 0.0186614990234375, -0.0158233642578125,\n", + " 0.000812530517578125, -0.04437255859375, 0.00873565673828125,\n", + " -0.0172119140625, 0.013092041015625, -0.0135498046875,\n", + " -0.0212860107421875, -0.000006735324859619141, -0.0226898193359375,\n", + " -0.01389312744140625, -0.0076751708984375, -0.0160064697265625,\n", + " 0.0259246826171875, -0.0345458984375, -0.037200927734375,\n", + " 0.020355224609375, -0.0260009765625, -0.0159149169921875,\n", + " -0.03802490234375, 0.004489898681640625, 0.0143280029296875,\n", + " -0.01274871826171875, -0.07708740234375, 0.0263214111328125,\n", + " -0.04864501953125, 0.00608062744140625, 0.004192352294921875,\n", + " -0.005115509033203125, 0.0255889892578125, 0.0311279296875,\n", + " 0.0265045166015625, -0.0103607177734375, -0.01084136962890625,\n", + " -0.0294952392578125, 0.022186279296875, 0.0256500244140625,\n", + " -0.0186767578125, 0.044342041015625, -0.030426025390625,\n", + " -0.03131103515625, 0.007396697998046875, 0.0234527587890625,\n", + " 0.0224609375, 0.004634857177734375, -0.003643035888671875,\n", + " 0.0302886962890625, 0.07342529296875, -0.01221466064453125,\n", + " -0.017303466796875, 0.0193023681640625, 0.029052734375,\n", + " -0.0024890899658203125, 0.0174407958984375, 0.026123046875,\n", + " 0.044219970703125, -0.0004944801330566406, 0.01462554931640625,\n", + " -0.007450103759765625, 0.06146240234375, 0.022979736328125,\n", + " -0.016845703125, -0.001445770263671875, -0.0143890380859375,\n", + " 0.06097412109375\n", + "]\n", + "[\n", + " -0.02032470703125, 0.02606201171875, 0.051605224609375,\n", + " -0.0281982421875, 0.055755615234375, 0.001987457275390625,\n", + " 0.031982421875, -0.0131378173828125, -0.0252685546875,\n", + " 0.001010894775390625, -0.024017333984375, 0.053375244140625,\n", + " -0.042816162109375, 0.005584716796875, -0.04132080078125,\n", + " 0.03021240234375, 0.01324462890625, 0.016876220703125,\n", + " 0.041961669921875, -0.004299163818359375, -0.0273895263671875,\n", + " -0.039642333984375, -0.021575927734375, 0.0309295654296875,\n", + " -0.0099945068359375, -0.0163726806640625, -0.00968170166015625,\n", + " -0.07733154296875, -0.030364990234375, -0.003864288330078125,\n", + " 0.016387939453125, -0.0389404296875, -0.0026702880859375,\n", + " -0.0176544189453125, 0.0264434814453125, -0.01226806640625,\n", + " -0.0022220611572265625, -0.039703369140625, -0.00907135009765625,\n", + " -0.0260467529296875, 0.03155517578125, -0.0004324913024902344,\n", + " -0.019500732421875, -0.0120697021484375, -0.008544921875,\n", + " -0.01654052734375, 0.00067138671875, -0.0134735107421875,\n", + " 0.01080322265625, -0.034759521484375, -0.06201171875,\n", + " 0.012359619140625, -0.006237030029296875, -0.0168914794921875,\n", + " -0.0183563232421875, 0.0236053466796875, -0.0021419525146484375,\n", + " -0.0164947509765625, -0.052581787109375, 0.022125244140625,\n", + " -0.045745849609375, -0.0009088516235351562, 0.0097808837890625,\n", + " -0.0009326934814453125, 0.041656494140625, 0.0269775390625,\n", + " 0.016845703125, -0.0022335052490234375, -0.0182342529296875,\n", + " -0.0245208740234375, 0.0036602020263671875, -0.0188751220703125,\n", + " -0.0023956298828125, 0.0238800048828125, -0.034942626953125,\n", + " -0.033782958984375, 0.0046234130859375, 0.0318603515625,\n", + " 0.0251007080078125, -0.0023288726806640625, -0.0225677490234375,\n", + " 0.0004394054412841797, 0.064208984375, -0.0254669189453125,\n", + " -0.0234222412109375, 0.0009264945983886719, 0.01464080810546875,\n", + " 0.006626129150390625, -0.007450103759765625, 0.02642822265625,\n", + " 0.0260009765625, 0.00536346435546875, 0.01479339599609375,\n", + " -0.0032253265380859375, 0.0498046875, 0.048248291015625,\n", + " -0.01519012451171875, 0.00605010986328125, 0.019744873046875,\n", + " 0.0296478271484375\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all MistralAIEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_mistralai.MistralAIEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all MistralAIEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_mistralai.MistralAIEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/mixedbread_ai.mdx b/docs/core_docs/docs/integrations/text_embedding/mixedbread_ai.mdx index 4fe65d0a8439..27d116ce1fa0 100644 --- a/docs/core_docs/docs/integrations/text_embedding/mixedbread_ai.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/mixedbread_ai.mdx @@ -86,5 +86,5 @@ try { ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/nomic.mdx b/docs/core_docs/docs/integrations/text_embedding/nomic.mdx index c80ad74ff673..ce931c0440d1 100644 --- a/docs/core_docs/docs/integrations/text_embedding/nomic.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/nomic.mdx @@ -30,5 +30,5 @@ import NomicExample from "@examples/models/embeddings/nomic.ts"; ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/ollama.ipynb b/docs/core_docs/docs/integrations/text_embedding/ollama.ipynb index 86e59ebf8b53..e1aa8cfa7771 100644 --- a/docs/core_docs/docs/integrations/text_embedding/ollama.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/ollama.ipynb @@ -1,334 +1,334 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Ollama\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# OllamaEmbeddings\n", - "\n", - "This will help you get started with Ollama [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `OllamaEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.OllamaEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/ollama/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [`OllamaEmbeddings`](https://api.js.langchain.com/classes/langchain_ollama.OllamaEmbeddings.html) | [`@langchain/ollama`](https://npmjs.com/@langchain/ollama) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/ollama?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/ollama?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access Ollama embedding models you'll need to follow [these instructions](https://github.com/jmorganca/ollama) to install Ollama, and install the `@langchain/ollama` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain OllamaEmbeddings integration lives in the `@langchain/ollama` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/ollama @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and embed text:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { OllamaEmbeddings } from \"@langchain/ollama\";\n", - "\n", - "const embeddings = new OllamaEmbeddings({\n", - " model: \"mxbai-embed-large\", // Default value\n", - " baseUrl: \"http://localhost:11434\", // Default value\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Ollama\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " 0.026051683, 0.029081265, -0.040726297, -0.015116953, -0.010691089,\n", - " 0.030181013, -0.0065084146, -0.02079503, 0.013575795, 0.03452527,\n", - " 0.009578291, 0.007026421, -0.030110886, 0.013489622, -0.04294787,\n", - " 0.011141899, -0.043768786, -0.00362867, -0.0081198225, -0.03426076,\n", - " 0.010075142, 0.027787417, -0.09052663, -0.06039698, -0.009462592,\n", - " 0.06232288, 0.051121354, 0.011977532, 0.089046724, 0.059000008,\n", - " 0.031860664, -0.034242127, 0.020339863, 0.011483523, -0.05429335,\n", - " -0.04963588, 0.03263794, -0.05581542, 0.013908403, -0.012356067,\n", - " -0.007802118, -0.010027855, 0.00281217, -0.101886116, -0.079341754,\n", - " 0.011269771, 0.0035983133, -0.027667878, 0.032092705, -0.052843474,\n", - " -0.045283325, 0.0382421, 0.0193055, 0.011050924, 0.021132186,\n", - " -0.037696265, 0.0006107435, 0.0043520257, -0.028798066, 0.049155913,\n", - " 0.03590549, -0.0040995986, 0.019772101, -0.076119535, 0.0031298609,\n", - " 0.03368174, 0.039398745, -0.011813277, -0.019313531, -0.013108803,\n", - " -0.044905286, -0.022326004, -0.01656178, -0.06658457, 0.016789088,\n", - " 0.049952697, 0.006615693, -0.01694402, -0.018105473, 0.0049101883,\n", - " -0.004966945, 0.049762275, -0.03556957, -0.015986584, -0.03190983,\n", - " -0.05336687, -0.0020468342, -0.0016106658, -0.035291273, -0.029783724,\n", - " -0.010153295, 0.052100364, 0.05528949, 0.01379487, -0.024542747,\n", - " 0.028773975, 0.010087022, 0.030448131, -0.042391222, 0.016596776\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# OllamaEmbeddings\n", + "\n", + "This will help you get started with Ollama [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `OllamaEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.OllamaEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/ollama/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [`OllamaEmbeddings`](https://api.js.langchain.com/classes/langchain_ollama.OllamaEmbeddings.html) | [`@langchain/ollama`](https://npmjs.com/@langchain/ollama) | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/ollama?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/ollama?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Ollama embedding models you'll need to follow [these instructions](https://github.com/jmorganca/ollama) to install Ollama, and install the `@langchain/ollama` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain OllamaEmbeddings integration lives in the `@langchain/ollama` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/ollama @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and embed text:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { OllamaEmbeddings } from \"@langchain/ollama\";\n", + "\n", + "const embeddings = new OllamaEmbeddings({\n", + " model: \"mxbai-embed-large\", // Default value\n", + " baseUrl: \"http://localhost:11434\", // Default value\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " 0.026051683, 0.029081265, -0.040726297, -0.015116953, -0.010691089,\n", - " 0.030181013, -0.0065084146, -0.02079503, 0.013575795, 0.03452527,\n", - " 0.009578291, 0.007026421, -0.030110886, 0.013489622, -0.04294787,\n", - " 0.011141899, -0.043768786, -0.00362867, -0.0081198225, -0.03426076,\n", - " 0.010075142, 0.027787417, -0.09052663, -0.06039698, -0.009462592,\n", - " 0.06232288, 0.051121354, 0.011977532, 0.089046724, 0.059000008,\n", - " 0.031860664, -0.034242127, 0.020339863, 0.011483523, -0.05429335,\n", - " -0.04963588, 0.03263794, -0.05581542, 0.013908403, -0.012356067,\n", - " -0.007802118, -0.010027855, 0.00281217, -0.101886116, -0.079341754,\n", - " 0.011269771, 0.0035983133, -0.027667878, 0.032092705, -0.052843474,\n", - " -0.045283325, 0.0382421, 0.0193055, 0.011050924, 0.021132186,\n", - " -0.037696265, 0.0006107435, 0.0043520257, -0.028798066, 0.049155913,\n", - " 0.03590549, -0.0040995986, 0.019772101, -0.076119535, 0.0031298609,\n", - " 0.03368174, 0.039398745, -0.011813277, -0.019313531, -0.013108803,\n", - " -0.044905286, -0.022326004, -0.01656178, -0.06658457, 0.016789088,\n", - " 0.049952697, 0.006615693, -0.01694402, -0.018105473, 0.0049101883,\n", - " -0.004966945, 0.049762275, -0.03556957, -0.015986584, -0.03190983,\n", - " -0.05336687, -0.0020468342, -0.0016106658, -0.035291273, -0.029783724,\n", - " -0.010153295, 0.052100364, 0.05528949, 0.01379487, -0.024542747,\n", - " 0.028773975, 0.010087022, 0.030448131, -0.042391222, 0.016596776\n", - "]\n", - "[\n", - " 0.0558515, 0.028698817, -0.037476595, 0.0048659276, -0.019229038,\n", - " -0.04713716, -0.020947812, -0.017550547, 0.01205507, 0.027693441,\n", - " -0.011791304, 0.009862203, 0.019662278, -0.037511427, -0.022662448,\n", - " 0.036224432, -0.051760387, -0.030165697, -0.008899774, -0.024518963,\n", - " 0.010077767, 0.032209765, -0.0854303, -0.038666975, -0.036021013,\n", - " 0.060899545, 0.045867186, 0.003365381, 0.09387081, 0.038216405,\n", - " 0.011449426, -0.016495887, 0.020602569, -0.02368503, -0.014733645,\n", - " -0.065408126, -0.0065152845, -0.027103946, 0.00038956117, -0.08648814,\n", - " 0.029316466, -0.054449145, 0.034129277, -0.055225655, -0.043182302,\n", - " 0.0011148591, 0.044116337, -0.046552557, 0.032423045, -0.03269365,\n", - " -0.05062933, 0.021473562, -0.011019348, -0.019621233, -0.0003149565,\n", - " -0.0046085776, 0.0052610254, -0.0029293327, -0.035793293, 0.034469575,\n", - " 0.037724957, 0.009572597, 0.014198464, -0.0878237, 0.0056973165,\n", - " 0.023563445, 0.030928325, 0.025520306, 0.01836824, -0.016456697,\n", - " -0.061934732, 0.009764942, -0.035812028, -0.04429064, 0.031323086,\n", - " 0.056027107, -0.0019782048, -0.015204176, -0.008684945, -0.0010460864,\n", - " 0.054642987, 0.044149086, -0.032964867, -0.012044753, -0.019075096,\n", - " -0.027932597, 0.018542245, -0.02602878, -0.04645578, -0.020976603,\n", - " 0.018999187, 0.050663687, 0.016725155, 0.0076955976, 0.011448177,\n", - " 0.053931057, -0.03234989, 0.024429373, -0.023123834, 0.02197912\n", - "]\n" - ] + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " 0.026051683, 0.029081265, -0.040726297, -0.015116953, -0.010691089,\n", + " 0.030181013, -0.0065084146, -0.02079503, 0.013575795, 0.03452527,\n", + " 0.009578291, 0.007026421, -0.030110886, 0.013489622, -0.04294787,\n", + " 0.011141899, -0.043768786, -0.00362867, -0.0081198225, -0.03426076,\n", + " 0.010075142, 0.027787417, -0.09052663, -0.06039698, -0.009462592,\n", + " 0.06232288, 0.051121354, 0.011977532, 0.089046724, 0.059000008,\n", + " 0.031860664, -0.034242127, 0.020339863, 0.011483523, -0.05429335,\n", + " -0.04963588, 0.03263794, -0.05581542, 0.013908403, -0.012356067,\n", + " -0.007802118, -0.010027855, 0.00281217, -0.101886116, -0.079341754,\n", + " 0.011269771, 0.0035983133, -0.027667878, 0.032092705, -0.052843474,\n", + " -0.045283325, 0.0382421, 0.0193055, 0.011050924, 0.021132186,\n", + " -0.037696265, 0.0006107435, 0.0043520257, -0.028798066, 0.049155913,\n", + " 0.03590549, -0.0040995986, 0.019772101, -0.076119535, 0.0031298609,\n", + " 0.03368174, 0.039398745, -0.011813277, -0.019313531, -0.013108803,\n", + " -0.044905286, -0.022326004, -0.01656178, -0.06658457, 0.016789088,\n", + " 0.049952697, 0.006615693, -0.01694402, -0.018105473, 0.0049101883,\n", + " -0.004966945, 0.049762275, -0.03556957, -0.015986584, -0.03190983,\n", + " -0.05336687, -0.0020468342, -0.0016106658, -0.035291273, -0.029783724,\n", + " -0.010153295, 0.052100364, 0.05528949, 0.01379487, -0.024542747,\n", + " 0.028773975, 0.010087022, 0.030448131, -0.042391222, 0.016596776\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " 0.026051683, 0.029081265, -0.040726297, -0.015116953, -0.010691089,\n", + " 0.030181013, -0.0065084146, -0.02079503, 0.013575795, 0.03452527,\n", + " 0.009578291, 0.007026421, -0.030110886, 0.013489622, -0.04294787,\n", + " 0.011141899, -0.043768786, -0.00362867, -0.0081198225, -0.03426076,\n", + " 0.010075142, 0.027787417, -0.09052663, -0.06039698, -0.009462592,\n", + " 0.06232288, 0.051121354, 0.011977532, 0.089046724, 0.059000008,\n", + " 0.031860664, -0.034242127, 0.020339863, 0.011483523, -0.05429335,\n", + " -0.04963588, 0.03263794, -0.05581542, 0.013908403, -0.012356067,\n", + " -0.007802118, -0.010027855, 0.00281217, -0.101886116, -0.079341754,\n", + " 0.011269771, 0.0035983133, -0.027667878, 0.032092705, -0.052843474,\n", + " -0.045283325, 0.0382421, 0.0193055, 0.011050924, 0.021132186,\n", + " -0.037696265, 0.0006107435, 0.0043520257, -0.028798066, 0.049155913,\n", + " 0.03590549, -0.0040995986, 0.019772101, -0.076119535, 0.0031298609,\n", + " 0.03368174, 0.039398745, -0.011813277, -0.019313531, -0.013108803,\n", + " -0.044905286, -0.022326004, -0.01656178, -0.06658457, 0.016789088,\n", + " 0.049952697, 0.006615693, -0.01694402, -0.018105473, 0.0049101883,\n", + " -0.004966945, 0.049762275, -0.03556957, -0.015986584, -0.03190983,\n", + " -0.05336687, -0.0020468342, -0.0016106658, -0.035291273, -0.029783724,\n", + " -0.010153295, 0.052100364, 0.05528949, 0.01379487, -0.024542747,\n", + " 0.028773975, 0.010087022, 0.030448131, -0.042391222, 0.016596776\n", + "]\n", + "[\n", + " 0.0558515, 0.028698817, -0.037476595, 0.0048659276, -0.019229038,\n", + " -0.04713716, -0.020947812, -0.017550547, 0.01205507, 0.027693441,\n", + " -0.011791304, 0.009862203, 0.019662278, -0.037511427, -0.022662448,\n", + " 0.036224432, -0.051760387, -0.030165697, -0.008899774, -0.024518963,\n", + " 0.010077767, 0.032209765, -0.0854303, -0.038666975, -0.036021013,\n", + " 0.060899545, 0.045867186, 0.003365381, 0.09387081, 0.038216405,\n", + " 0.011449426, -0.016495887, 0.020602569, -0.02368503, -0.014733645,\n", + " -0.065408126, -0.0065152845, -0.027103946, 0.00038956117, -0.08648814,\n", + " 0.029316466, -0.054449145, 0.034129277, -0.055225655, -0.043182302,\n", + " 0.0011148591, 0.044116337, -0.046552557, 0.032423045, -0.03269365,\n", + " -0.05062933, 0.021473562, -0.011019348, -0.019621233, -0.0003149565,\n", + " -0.0046085776, 0.0052610254, -0.0029293327, -0.035793293, 0.034469575,\n", + " 0.037724957, 0.009572597, 0.014198464, -0.0878237, 0.0056973165,\n", + " 0.023563445, 0.030928325, 0.025520306, 0.01836824, -0.016456697,\n", + " -0.061934732, 0.009764942, -0.035812028, -0.04429064, 0.031323086,\n", + " 0.056027107, -0.0019782048, -0.015204176, -0.008684945, -0.0010460864,\n", + " 0.054642987, 0.044149086, -0.032964867, -0.012044753, -0.019075096,\n", + " -0.027932597, 0.018542245, -0.02602878, -0.04645578, -0.020976603,\n", + " 0.018999187, 0.050663687, 0.016725155, 0.0076955976, 0.011448177,\n", + " 0.053931057, -0.03234989, 0.024429373, -0.023123834, 0.02197912\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "ecbe9d2c", + "metadata": {}, + "source": [ + "Ollama [model parameters](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values) are also supported:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "c3ddbce9", + "metadata": {}, + "outputs": [], + "source": [ + "import { OllamaEmbeddings } from \"@langchain/ollama\";\n", + "\n", + "const embeddingsCustomParams = new OllamaEmbeddings({\n", + " requestOptions: {\n", + " useMmap: true, // use_mmap 1\n", + " numThread: 6, // num_thread 6\n", + " numGpu: 1, // num_gpu 1\n", + " },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "b48d41bb", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "- Embedding model [conceptual guide](/docs/concepts/embedding_models)\n", + "- Embedding model [how-to guides](/docs/how_to/#embedding-models)" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `OllamaEmbeddings` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.OllamaEmbeddings.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "ecbe9d2c", - "metadata": {}, - "source": [ - "Ollama [model parameters](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values) are also supported:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "c3ddbce9", - "metadata": {}, - "outputs": [], - "source": [ - "import { OllamaEmbeddings } from \"@langchain/ollama\";\n", - "\n", - "const embeddingsCustomParams = new OllamaEmbeddings({\n", - " requestOptions: {\n", - " useMmap: true, // use_mmap 1\n", - " numThread: 6, // num_thread 6\n", - " numGpu: 1, // num_gpu 1\n", - " },\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "b48d41bb", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "- Embedding model [conceptual guide](/docs/concepts/#embedding-models)\n", - "- Embedding model [how-to guides](/docs/how_to/#embedding-models)" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `OllamaEmbeddings` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.OllamaEmbeddings.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/openai.ipynb b/docs/core_docs/docs/integrations/text_embedding/openai.ipynb index 4bd007ed6bba..14001b233f6c 100644 --- a/docs/core_docs/docs/integrations/text_embedding/openai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/openai.ipynb @@ -1,418 +1,418 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: OpenAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# OpenAI\n", - "\n", - "This will help you get started with OpenAIEmbeddings [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `OpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.OpenAIEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/openai/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [OpenAIEmbeddings](https://api.js.langchain.com/classes/langchain_openai.OpenAIEmbeddings.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access OpenAIEmbeddings embedding models you'll need to create an OpenAI account, get an API key, and install the `@langchain/openai` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "Head to [platform.openai.com](https://platform.openai.com) to sign up to OpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export OPENAI_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain OpenAIEmbeddings integration lives in the `@langchain/openai` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/openai @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " apiKey: \"YOUR-API-KEY\", // In Node.js defaults to process.env.OPENAI_API_KEY\n", - " batchSize: 512, // Default value if omitted is 512. Max is 2048\n", - " model: \"text-embedding-3-large\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "fb4153d3", - "metadata": {}, - "source": [ - "If you're part of an organization, you can set `process.env.OPENAI_ORGANIZATION` to your OpenAI organization id, or pass it in as `organization` when\n", - "initializing the model." - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: OpenAI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.01927683, 0.0037708976, -0.032942563, 0.0037671267, 0.008175306,\n", - " -0.012511838, -0.009713832, 0.021403614, -0.015377721, 0.0018684798,\n", - " 0.020574018, 0.022399133, -0.02322873, -0.01524951, -0.00504169,\n", - " -0.007375876, -0.03448109, 0.00015130726, 0.021388533, -0.012564631,\n", - " -0.020031009, 0.027406884, -0.039217334, 0.03036327, 0.030393435,\n", - " -0.021750538, 0.032610722, -0.021162277, -0.025898525, 0.018869571,\n", - " 0.034179416, -0.013371604, 0.0037652412, -0.02146395, 0.0012641934,\n", - " -0.055688616, 0.05104287, 0.0024982197, -0.019095825, 0.0037369595,\n", - " 0.00088757504, 0.025189597, -0.018779071, 0.024978427, 0.016833287,\n", - " -0.0025868358, -0.011727491, -0.0021154736, -0.017738303, 0.0013839195,\n", - " -0.0131151825, -0.05405959, 0.029729757, -0.003393808, 0.019774588,\n", - " 0.028885076, 0.004355387, 0.026094612, 0.06479911, 0.038040817,\n", - " -0.03478276, -0.012594799, -0.024767255, -0.0031430433, 0.017874055,\n", - " -0.015294761, 0.005709139, 0.025355516, 0.044798266, 0.02549127,\n", - " -0.02524993, 0.00014553308, -0.019427665, -0.023545485, 0.008748483,\n", - " 0.019850006, -0.028417485, -0.001860938, -0.02318348, -0.010799851,\n", - " 0.04793565, -0.0048983963, 0.02193154, -0.026411368, 0.026426451,\n", - " -0.012149832, 0.035355937, -0.047814984, -0.027165547, -0.008228099,\n", - " -0.007737882, 0.023726488, -0.046487626, -0.007783133, -0.019638835,\n", - " 0.01793439, -0.018024892, 0.0030336871, -0.019578502, 0.0042837397\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# OpenAI\n", + "\n", + "This will help you get started with OpenAIEmbeddings [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `OpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.OpenAIEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/openai/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [OpenAIEmbeddings](https://api.js.langchain.com/classes/langchain_openai.OpenAIEmbeddings.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access OpenAIEmbeddings embedding models you'll need to create an OpenAI account, get an API key, and install the `@langchain/openai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [platform.openai.com](https://platform.openai.com) to sign up to OpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain OpenAIEmbeddings integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai @langchain/core\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " -0.01927683, 0.0037708976, -0.032942563, 0.0037671267, 0.008175306,\n", - " -0.012511838, -0.009713832, 0.021403614, -0.015377721, 0.0018684798,\n", - " 0.020574018, 0.022399133, -0.02322873, -0.01524951, -0.00504169,\n", - " -0.007375876, -0.03448109, 0.00015130726, 0.021388533, -0.012564631,\n", - " -0.020031009, 0.027406884, -0.039217334, 0.03036327, 0.030393435,\n", - " -0.021750538, 0.032610722, -0.021162277, -0.025898525, 0.018869571,\n", - " 0.034179416, -0.013371604, 0.0037652412, -0.02146395, 0.0012641934,\n", - " -0.055688616, 0.05104287, 0.0024982197, -0.019095825, 0.0037369595,\n", - " 0.00088757504, 0.025189597, -0.018779071, 0.024978427, 0.016833287,\n", - " -0.0025868358, -0.011727491, -0.0021154736, -0.017738303, 0.0013839195,\n", - " -0.0131151825, -0.05405959, 0.029729757, -0.003393808, 0.019774588,\n", - " 0.028885076, 0.004355387, 0.026094612, 0.06479911, 0.038040817,\n", - " -0.03478276, -0.012594799, -0.024767255, -0.0031430433, 0.017874055,\n", - " -0.015294761, 0.005709139, 0.025355516, 0.044798266, 0.02549127,\n", - " -0.02524993, 0.00014553308, -0.019427665, -0.023545485, 0.008748483,\n", - " 0.019850006, -0.028417485, -0.001860938, -0.02318348, -0.010799851,\n", - " 0.04793565, -0.0048983963, 0.02193154, -0.026411368, 0.026426451,\n", - " -0.012149832, 0.035355937, -0.047814984, -0.027165547, -0.008228099,\n", - " -0.007737882, 0.023726488, -0.046487626, -0.007783133, -0.019638835,\n", - " 0.01793439, -0.018024892, 0.0030336871, -0.019578502, 0.0042837397\n", - "]\n", - "[\n", - " -0.010181213, 0.023419594, -0.04215527, -0.0015320902, -0.023573855,\n", - " -0.0091644935, -0.014893179, 0.019016149, -0.023475688, 0.0010219777,\n", - " 0.009255648, 0.03996757, -0.04366983, -0.01640774, -0.020194141,\n", - " 0.019408813, -0.027977299, -0.022017224, 0.013539891, -0.007769135,\n", - " 0.032647192, -0.015089511, -0.022900717, 0.023798235, 0.026084099,\n", - " -0.024625633, 0.035003178, -0.017978394, -0.049615882, 0.013364594,\n", - " 0.031132633, 0.019142363, 0.023195215, -0.038396914, 0.005584942,\n", - " -0.031946007, 0.053682756, -0.0036356465, 0.011240003, 0.0056690844,\n", - " -0.0062791156, 0.044146635, -0.037387207, 0.01300699, 0.018946031,\n", - " 0.0050415234, 0.029618073, -0.021750772, -0.000649473, 0.00026951815,\n", - " -0.014710871, -0.029814405, 0.04204308, -0.014710871, 0.0039616977,\n", - " -0.021512369, 0.054608323, 0.021484323, 0.02790718, -0.010573876,\n", - " -0.023952495, -0.035143413, -0.048802506, -0.0075798146, 0.023279356,\n", - " -0.022690361, -0.016590048, 0.0060477243, 0.014100839, 0.005476258,\n", - " -0.017221114, -0.0100059165, -0.017922299, -0.021989176, 0.01830094,\n", - " 0.05516927, 0.001033372, 0.0017310516, -0.00960624, -0.037864015,\n", - " 0.013063084, 0.006591143, -0.010160177, 0.0011394264, 0.04953174,\n", - " 0.004806626, 0.029421741, -0.037751824, 0.003618117, 0.007162609,\n", - " 0.027696826, -0.0021070621, -0.024485396, -0.0042141243, -0.02801937,\n", - " -0.019605145, 0.016281527, -0.035143413, 0.01640774, 0.042323552\n", - "]\n" - ] - } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "2b1a3527", - "metadata": {}, - "source": [ - "## Specifying dimensions\n", - "\n", - "With the `text-embedding-3` class of models, you can specify the size of the embeddings you want returned. For example by default `text-embedding-3-large` returns embeddings of dimension 3072:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a611fe1a", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "3072\n" - ] - } - ], - "source": [ - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddingsDefaultDimensions = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-large\",\n", - "});\n", - "\n", - "const vectorsDefaultDimensions = await embeddingsDefaultDimensions.embedDocuments([\"some text\"]);\n", - "console.log(vectorsDefaultDimensions[0].length);" - ] - }, - { - "cell_type": "markdown", - "id": "08efe771", - "metadata": {}, - "source": [ - "But by passing in `dimensions: 1024` we can reduce the size of our embeddings to 1024:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "19667fdb", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " apiKey: \"YOUR-API-KEY\", // In Node.js defaults to process.env.OPENAI_API_KEY\n", + " batchSize: 512, // Default value if omitted is 512. Max is 2048\n", + " model: \"text-embedding-3-large\",\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "1024\n" - ] + "cell_type": "markdown", + "id": "fb4153d3", + "metadata": {}, + "source": [ + "If you're part of an organization, you can set `process.env.OPENAI_ORGANIZATION` to your OpenAI organization id, or pass it in as `organization` when\n", + "initializing the model." + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.01927683, 0.0037708976, -0.032942563, 0.0037671267, 0.008175306,\n", + " -0.012511838, -0.009713832, 0.021403614, -0.015377721, 0.0018684798,\n", + " 0.020574018, 0.022399133, -0.02322873, -0.01524951, -0.00504169,\n", + " -0.007375876, -0.03448109, 0.00015130726, 0.021388533, -0.012564631,\n", + " -0.020031009, 0.027406884, -0.039217334, 0.03036327, 0.030393435,\n", + " -0.021750538, 0.032610722, -0.021162277, -0.025898525, 0.018869571,\n", + " 0.034179416, -0.013371604, 0.0037652412, -0.02146395, 0.0012641934,\n", + " -0.055688616, 0.05104287, 0.0024982197, -0.019095825, 0.0037369595,\n", + " 0.00088757504, 0.025189597, -0.018779071, 0.024978427, 0.016833287,\n", + " -0.0025868358, -0.011727491, -0.0021154736, -0.017738303, 0.0013839195,\n", + " -0.0131151825, -0.05405959, 0.029729757, -0.003393808, 0.019774588,\n", + " 0.028885076, 0.004355387, 0.026094612, 0.06479911, 0.038040817,\n", + " -0.03478276, -0.012594799, -0.024767255, -0.0031430433, 0.017874055,\n", + " -0.015294761, 0.005709139, 0.025355516, 0.044798266, 0.02549127,\n", + " -0.02524993, 0.00014553308, -0.019427665, -0.023545485, 0.008748483,\n", + " 0.019850006, -0.028417485, -0.001860938, -0.02318348, -0.010799851,\n", + " 0.04793565, -0.0048983963, 0.02193154, -0.026411368, 0.026426451,\n", + " -0.012149832, 0.035355937, -0.047814984, -0.027165547, -0.008228099,\n", + " -0.007737882, 0.023726488, -0.046487626, -0.007783133, -0.019638835,\n", + " 0.01793439, -0.018024892, 0.0030336871, -0.019578502, 0.0042837397\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " -0.01927683, 0.0037708976, -0.032942563, 0.0037671267, 0.008175306,\n", + " -0.012511838, -0.009713832, 0.021403614, -0.015377721, 0.0018684798,\n", + " 0.020574018, 0.022399133, -0.02322873, -0.01524951, -0.00504169,\n", + " -0.007375876, -0.03448109, 0.00015130726, 0.021388533, -0.012564631,\n", + " -0.020031009, 0.027406884, -0.039217334, 0.03036327, 0.030393435,\n", + " -0.021750538, 0.032610722, -0.021162277, -0.025898525, 0.018869571,\n", + " 0.034179416, -0.013371604, 0.0037652412, -0.02146395, 0.0012641934,\n", + " -0.055688616, 0.05104287, 0.0024982197, -0.019095825, 0.0037369595,\n", + " 0.00088757504, 0.025189597, -0.018779071, 0.024978427, 0.016833287,\n", + " -0.0025868358, -0.011727491, -0.0021154736, -0.017738303, 0.0013839195,\n", + " -0.0131151825, -0.05405959, 0.029729757, -0.003393808, 0.019774588,\n", + " 0.028885076, 0.004355387, 0.026094612, 0.06479911, 0.038040817,\n", + " -0.03478276, -0.012594799, -0.024767255, -0.0031430433, 0.017874055,\n", + " -0.015294761, 0.005709139, 0.025355516, 0.044798266, 0.02549127,\n", + " -0.02524993, 0.00014553308, -0.019427665, -0.023545485, 0.008748483,\n", + " 0.019850006, -0.028417485, -0.001860938, -0.02318348, -0.010799851,\n", + " 0.04793565, -0.0048983963, 0.02193154, -0.026411368, 0.026426451,\n", + " -0.012149832, 0.035355937, -0.047814984, -0.027165547, -0.008228099,\n", + " -0.007737882, 0.023726488, -0.046487626, -0.007783133, -0.019638835,\n", + " 0.01793439, -0.018024892, 0.0030336871, -0.019578502, 0.0042837397\n", + "]\n", + "[\n", + " -0.010181213, 0.023419594, -0.04215527, -0.0015320902, -0.023573855,\n", + " -0.0091644935, -0.014893179, 0.019016149, -0.023475688, 0.0010219777,\n", + " 0.009255648, 0.03996757, -0.04366983, -0.01640774, -0.020194141,\n", + " 0.019408813, -0.027977299, -0.022017224, 0.013539891, -0.007769135,\n", + " 0.032647192, -0.015089511, -0.022900717, 0.023798235, 0.026084099,\n", + " -0.024625633, 0.035003178, -0.017978394, -0.049615882, 0.013364594,\n", + " 0.031132633, 0.019142363, 0.023195215, -0.038396914, 0.005584942,\n", + " -0.031946007, 0.053682756, -0.0036356465, 0.011240003, 0.0056690844,\n", + " -0.0062791156, 0.044146635, -0.037387207, 0.01300699, 0.018946031,\n", + " 0.0050415234, 0.029618073, -0.021750772, -0.000649473, 0.00026951815,\n", + " -0.014710871, -0.029814405, 0.04204308, -0.014710871, 0.0039616977,\n", + " -0.021512369, 0.054608323, 0.021484323, 0.02790718, -0.010573876,\n", + " -0.023952495, -0.035143413, -0.048802506, -0.0075798146, 0.023279356,\n", + " -0.022690361, -0.016590048, 0.0060477243, 0.014100839, 0.005476258,\n", + " -0.017221114, -0.0100059165, -0.017922299, -0.021989176, 0.01830094,\n", + " 0.05516927, 0.001033372, 0.0017310516, -0.00960624, -0.037864015,\n", + " 0.013063084, 0.006591143, -0.010160177, 0.0011394264, 0.04953174,\n", + " 0.004806626, 0.029421741, -0.037751824, 0.003618117, 0.007162609,\n", + " 0.027696826, -0.0021070621, -0.024485396, -0.0042141243, -0.02801937,\n", + " -0.019605145, 0.016281527, -0.035143413, 0.01640774, 0.042323552\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "2b1a3527", + "metadata": {}, + "source": [ + "## Specifying dimensions\n", + "\n", + "With the `text-embedding-3` class of models, you can specify the size of the embeddings you want returned. For example by default `text-embedding-3-large` returns embeddings of dimension 3072:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a611fe1a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3072\n" + ] + } + ], + "source": [ + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddingsDefaultDimensions = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-large\",\n", + "});\n", + "\n", + "const vectorsDefaultDimensions = await embeddingsDefaultDimensions.embedDocuments([\"some text\"]);\n", + "console.log(vectorsDefaultDimensions[0].length);" + ] + }, + { + "cell_type": "markdown", + "id": "08efe771", + "metadata": {}, + "source": [ + "But by passing in `dimensions: 1024` we can reduce the size of our embeddings to 1024:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "19667fdb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1024\n" + ] + } + ], + "source": [ + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings1024 = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-large\",\n", + " dimensions: 1024,\n", + "});\n", + "\n", + "const vectors1024 = await embeddings1024.embedDocuments([\"some text\"]);\n", + "console.log(vectors1024[0].length);" + ] + }, + { + "cell_type": "markdown", + "id": "6b84c0df", + "metadata": {}, + "source": [ + "## Custom URLs\n", + "\n", + "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3bfa20a6", + "metadata": {}, + "outputs": [], + "source": [ + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const model = new OpenAIEmbeddings({\n", + " configuration: {\n", + " baseURL: \"https://your_custom_url.com\",\n", + " },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "ac3cac9b", + "metadata": {}, + "source": [ + "You can also pass other `ClientOptions` parameters accepted by the official SDK.\n", + "\n", + "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/text_embedding/azure_openai)." + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all OpenAIEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.OpenAIEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings1024 = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-large\",\n", - " dimensions: 1024,\n", - "});\n", - "\n", - "const vectors1024 = await embeddings1024.embedDocuments([\"some text\"]);\n", - "console.log(vectors1024[0].length);" - ] - }, - { - "cell_type": "markdown", - "id": "6b84c0df", - "metadata": {}, - "source": [ - "## Custom URLs\n", - "\n", - "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bfa20a6", - "metadata": {}, - "outputs": [], - "source": [ - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const model = new OpenAIEmbeddings({\n", - " configuration: {\n", - " baseURL: \"https://your_custom_url.com\",\n", - " },\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ac3cac9b", - "metadata": {}, - "source": [ - "You can also pass other `ClientOptions` parameters accepted by the official SDK.\n", - "\n", - "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/text_embedding/azure_openai)." - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all OpenAIEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.OpenAIEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/premai.mdx b/docs/core_docs/docs/integrations/text_embedding/premai.mdx index 8c6c1a3d2b4d..5b96a138d7ad 100644 --- a/docs/core_docs/docs/integrations/text_embedding/premai.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/premai.mdx @@ -29,5 +29,5 @@ import PremExample from "@examples/embeddings/premai.ts"; ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/tencent_hunyuan.mdx b/docs/core_docs/docs/integrations/text_embedding/tencent_hunyuan.mdx index dfec6524c77e..4e364762f52f 100644 --- a/docs/core_docs/docs/integrations/text_embedding/tencent_hunyuan.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/tencent_hunyuan.mdx @@ -39,5 +39,5 @@ import TencentHunyuan from "@examples/models/embeddings/tencent_hunyuan.ts"; ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx b/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx index 7a83f5a36bf8..6778824dbb3b 100644 --- a/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx @@ -17,5 +17,5 @@ This example uses the CPU backend, which works in any JS environment. However, y ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/togetherai.ipynb b/docs/core_docs/docs/integrations/text_embedding/togetherai.ipynb index 508a098f8688..029ab2877261 100644 --- a/docs/core_docs/docs/integrations/text_embedding/togetherai.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/togetherai.ipynb @@ -1,302 +1,302 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: TogetherAI\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# TogetherAIEmbeddings\n", - "\n", - "This will help you get started with TogetherAIEmbeddings [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `TogetherAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_embeddings_togetherai.TogetherAIEmbeddings.html).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/together/) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [TogetherAIEmbeddings](https://api.js.langchain.com/classes/langchain_community_embeddings_togetherai.TogetherAIEmbeddings.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_embeddings_togetherai.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "To access TogetherAI embedding models you'll need to create a TogetherAI account, get an API key, and install the `@langchain/community` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "You can sign up for a Together account and create an API key [here](https://api.together.xyz/). Once you've done this set the `TOGETHER_AI_API_KEY` environment variable:\n", - "\n", - "```bash\n", - "export TOGETHER_AI_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain TogetherAIEmbeddings integration lives in the `@langchain/community` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { TogetherAIEmbeddings } from \"@langchain/community/embeddings/togetherai\";\n", - "\n", - "const embeddings = new TogetherAIEmbeddings({\n", - " model: \"togethercomputer/m2-bert-80M-8k-retrieval\", // Default value\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d817716b", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "LangChain is the framework for building context-aware reasoning applications\n" - ] - } - ], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "0d2befcd", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: TogetherAI\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " 0.3812227, -0.052848946, -0.10564975, 0.03480297, 0.2878488,\n", - " 0.0084609175, 0.11605915, 0.05303011, 0.14711718, -0.14407106,\n", - " -0.29865336, -0.15807179, -0.068397366, -0.2708063, 0.056596708,\n", - " -0.07656515, 0.052995138, -0.11275427, 0.028096694, 0.123501234,\n", - " -0.039519835, 0.12148692, -0.12820457, 0.15691335, 0.033519063,\n", - " -0.27026987, -0.08460162, -0.23792154, -0.234982, -0.05786798,\n", - " 0.016467346, -0.17168592, -0.060787182, 0.038752213, -0.08169927,\n", - " 0.09327062, 0.29490772, 0.0167866, -0.32224452, -0.2037822,\n", - " -0.10284172, -0.124050565, 0.25344968, -0.06275548, -0.14180769,\n", - " 0.0046709594, 0.073105976, 0.12004031, 0.19224276, -0.022589967,\n", - " 0.102790825, 0.1138286, -0.057701062, -0.050010648, -0.1632584,\n", - " -0.18942119, -0.12018798, 0.15288158, 0.07941474, 0.10440051,\n", - " -0.13257962, -0.19282033, 0.044656333, 0.13560675, -0.068929024,\n", - " 0.028590716, 0.055663664, 0.04652713, 0.014936657, 0.120679885,\n", - " 0.053866718, -0.16296014, 0.119450666, -0.29559663, 0.008097747,\n", - " 0.07380408, -0.09010084, -0.0687739, -0.08575685, -0.07202606,\n", - " 0.18868081, -0.08392917, 0.014016109, 0.15435852, -0.030115498,\n", - " -0.16927013, 0.02836557, -0.050763763, 0.0840437, -0.22718845,\n", - " 0.111397505, 0.033395614, -0.123287566, -0.2111604, -0.1580479,\n", - " 0.05520573, -0.1422921, 0.08828953, 0.051058788, -0.13312188\n", - "]\n" - ] - } - ], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# TogetherAIEmbeddings\n", + "\n", + "This will help you get started with TogetherAIEmbeddings [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `TogetherAIEmbeddings` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_embeddings_togetherai.TogetherAIEmbeddings.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/together/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [TogetherAIEmbeddings](https://api.js.langchain.com/classes/langchain_community_embeddings_togetherai.TogetherAIEmbeddings.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_embeddings_togetherai.html) | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access TogetherAI embedding models you'll need to create a TogetherAI account, get an API key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "You can sign up for a Together account and create an API key [here](https://api.together.xyz/). Once you've done this set the `TOGETHER_AI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export TOGETHER_AI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain TogetherAIEmbeddings integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { TogetherAIEmbeddings } from \"@langchain/community/embeddings/togetherai\";\n", + "\n", + "const embeddings = new TogetherAIEmbeddings({\n", + " model: \"togethercomputer/m2-bert-80M-8k-retrieval\", // Default value\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d817716b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LangChain is the framework for building context-aware reasoning applications\n" + ] + } + ], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " 0.3812227, -0.052848946, -0.10564975, 0.03480297, 0.2878488,\n", - " 0.0084609175, 0.11605915, 0.05303011, 0.14711718, -0.14407106,\n", - " -0.29865336, -0.15807179, -0.068397366, -0.2708063, 0.056596708,\n", - " -0.07656515, 0.052995138, -0.11275427, 0.028096694, 0.123501234,\n", - " -0.039519835, 0.12148692, -0.12820457, 0.15691335, 0.033519063,\n", - " -0.27026987, -0.08460162, -0.23792154, -0.234982, -0.05786798,\n", - " 0.016467346, -0.17168592, -0.060787182, 0.038752213, -0.08169927,\n", - " 0.09327062, 0.29490772, 0.0167866, -0.32224452, -0.2037822,\n", - " -0.10284172, -0.124050565, 0.25344968, -0.06275548, -0.14180769,\n", - " 0.0046709594, 0.073105976, 0.12004031, 0.19224276, -0.022589967,\n", - " 0.102790825, 0.1138286, -0.057701062, -0.050010648, -0.1632584,\n", - " -0.18942119, -0.12018798, 0.15288158, 0.07941474, 0.10440051,\n", - " -0.13257962, -0.19282033, 0.044656333, 0.13560675, -0.068929024,\n", - " 0.028590716, 0.055663664, 0.04652713, 0.014936657, 0.120679885,\n", - " 0.053866718, -0.16296014, 0.119450666, -0.29559663, 0.008097747,\n", - " 0.07380408, -0.09010084, -0.0687739, -0.08575685, -0.07202606,\n", - " 0.18868081, -0.08392917, 0.014016109, 0.15435852, -0.030115498,\n", - " -0.16927013, 0.02836557, -0.050763763, 0.0840437, -0.22718845,\n", - " 0.111397505, 0.033395614, -0.123287566, -0.2111604, -0.1580479,\n", - " 0.05520573, -0.1422921, 0.08828953, 0.051058788, -0.13312188\n", - "]\n", - "[\n", - " 0.066308185, -0.032866564, 0.115751594, 0.19082588, 0.14017,\n", - " -0.26976448, -0.056340694, -0.26923394, 0.2548541, -0.27271318,\n", - " -0.2244126, 0.07949589, -0.27710953, -0.17993368, 0.09681616,\n", - " -0.08692256, 0.22127126, -0.14512022, -0.18016525, 0.14892976,\n", - " -0.0526347, -0.008140617, -0.2916987, 0.23706906, -0.38488507,\n", - " -0.35881752, 0.09276949, -0.07051063, -0.07778231, 0.12552947,\n", - " 0.06256748, -0.25832427, 0.025054429, -0.1451448, -0.2662871,\n", - " 0.13676351, -0.07413256, 0.14966589, -0.39968985, 0.15542287,\n", - " -0.13107607, 0.02761394, 0.108077586, -0.12076956, 0.128296,\n", - " -0.05625126, 0.15723586, -0.056932643, 0.23720805, 0.23993455,\n", - " -0.035553705, -0.053907514, -0.11852807, 0.07005695, -0.06317475,\n", - " 0.070009425, 0.284697, 0.2212059, 0.018890115, 0.16924675,\n", - " 0.21651487, 0.07259682, 0.1328156, 0.3261852, 0.1914124,\n", - " -0.10120423, 0.03450111, -0.22588971, -0.04458192, 0.24116798,\n", - " -0.021830376, -0.30731413, 0.08586451, -0.058835756, 0.0010347435,\n", - " 0.0031927782, -0.09403646, -0.22608931, 0.15865424, 0.15738021,\n", - " 0.23582733, 0.1714161, 0.1585189, -0.18085755, 0.019376995,\n", - " -0.026587496, -0.017079154, -0.04588549, -0.047336094, -0.082413346,\n", - " -0.1114185, -0.05403556, 0.12438637, -0.20476522, 0.073182,\n", - " -0.12210378, -0.010543863, -0.09767598, 0.1057683, -0.050204434\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 3, + "id": "0d2befcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " 0.3812227, -0.052848946, -0.10564975, 0.03480297, 0.2878488,\n", + " 0.0084609175, 0.11605915, 0.05303011, 0.14711718, -0.14407106,\n", + " -0.29865336, -0.15807179, -0.068397366, -0.2708063, 0.056596708,\n", + " -0.07656515, 0.052995138, -0.11275427, 0.028096694, 0.123501234,\n", + " -0.039519835, 0.12148692, -0.12820457, 0.15691335, 0.033519063,\n", + " -0.27026987, -0.08460162, -0.23792154, -0.234982, -0.05786798,\n", + " 0.016467346, -0.17168592, -0.060787182, 0.038752213, -0.08169927,\n", + " 0.09327062, 0.29490772, 0.0167866, -0.32224452, -0.2037822,\n", + " -0.10284172, -0.124050565, 0.25344968, -0.06275548, -0.14180769,\n", + " 0.0046709594, 0.073105976, 0.12004031, 0.19224276, -0.022589967,\n", + " 0.102790825, 0.1138286, -0.057701062, -0.050010648, -0.1632584,\n", + " -0.18942119, -0.12018798, 0.15288158, 0.07941474, 0.10440051,\n", + " -0.13257962, -0.19282033, 0.044656333, 0.13560675, -0.068929024,\n", + " 0.028590716, 0.055663664, 0.04652713, 0.014936657, 0.120679885,\n", + " 0.053866718, -0.16296014, 0.119450666, -0.29559663, 0.008097747,\n", + " 0.07380408, -0.09010084, -0.0687739, -0.08575685, -0.07202606,\n", + " 0.18868081, -0.08392917, 0.014016109, 0.15435852, -0.030115498,\n", + " -0.16927013, 0.02836557, -0.050763763, 0.0840437, -0.22718845,\n", + " 0.111397505, 0.033395614, -0.123287566, -0.2111604, -0.1580479,\n", + " 0.05520573, -0.1422921, 0.08828953, 0.051058788, -0.13312188\n", + "]\n" + ] + } + ], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " 0.3812227, -0.052848946, -0.10564975, 0.03480297, 0.2878488,\n", + " 0.0084609175, 0.11605915, 0.05303011, 0.14711718, -0.14407106,\n", + " -0.29865336, -0.15807179, -0.068397366, -0.2708063, 0.056596708,\n", + " -0.07656515, 0.052995138, -0.11275427, 0.028096694, 0.123501234,\n", + " -0.039519835, 0.12148692, -0.12820457, 0.15691335, 0.033519063,\n", + " -0.27026987, -0.08460162, -0.23792154, -0.234982, -0.05786798,\n", + " 0.016467346, -0.17168592, -0.060787182, 0.038752213, -0.08169927,\n", + " 0.09327062, 0.29490772, 0.0167866, -0.32224452, -0.2037822,\n", + " -0.10284172, -0.124050565, 0.25344968, -0.06275548, -0.14180769,\n", + " 0.0046709594, 0.073105976, 0.12004031, 0.19224276, -0.022589967,\n", + " 0.102790825, 0.1138286, -0.057701062, -0.050010648, -0.1632584,\n", + " -0.18942119, -0.12018798, 0.15288158, 0.07941474, 0.10440051,\n", + " -0.13257962, -0.19282033, 0.044656333, 0.13560675, -0.068929024,\n", + " 0.028590716, 0.055663664, 0.04652713, 0.014936657, 0.120679885,\n", + " 0.053866718, -0.16296014, 0.119450666, -0.29559663, 0.008097747,\n", + " 0.07380408, -0.09010084, -0.0687739, -0.08575685, -0.07202606,\n", + " 0.18868081, -0.08392917, 0.014016109, 0.15435852, -0.030115498,\n", + " -0.16927013, 0.02836557, -0.050763763, 0.0840437, -0.22718845,\n", + " 0.111397505, 0.033395614, -0.123287566, -0.2111604, -0.1580479,\n", + " 0.05520573, -0.1422921, 0.08828953, 0.051058788, -0.13312188\n", + "]\n", + "[\n", + " 0.066308185, -0.032866564, 0.115751594, 0.19082588, 0.14017,\n", + " -0.26976448, -0.056340694, -0.26923394, 0.2548541, -0.27271318,\n", + " -0.2244126, 0.07949589, -0.27710953, -0.17993368, 0.09681616,\n", + " -0.08692256, 0.22127126, -0.14512022, -0.18016525, 0.14892976,\n", + " -0.0526347, -0.008140617, -0.2916987, 0.23706906, -0.38488507,\n", + " -0.35881752, 0.09276949, -0.07051063, -0.07778231, 0.12552947,\n", + " 0.06256748, -0.25832427, 0.025054429, -0.1451448, -0.2662871,\n", + " 0.13676351, -0.07413256, 0.14966589, -0.39968985, 0.15542287,\n", + " -0.13107607, 0.02761394, 0.108077586, -0.12076956, 0.128296,\n", + " -0.05625126, 0.15723586, -0.056932643, 0.23720805, 0.23993455,\n", + " -0.035553705, -0.053907514, -0.11852807, 0.07005695, -0.06317475,\n", + " 0.070009425, 0.284697, 0.2212059, 0.018890115, 0.16924675,\n", + " 0.21651487, 0.07259682, 0.1328156, 0.3261852, 0.1914124,\n", + " -0.10120423, 0.03450111, -0.22588971, -0.04458192, 0.24116798,\n", + " -0.021830376, -0.30731413, 0.08586451, -0.058835756, 0.0010347435,\n", + " 0.0031927782, -0.09403646, -0.22608931, 0.15865424, 0.15738021,\n", + " 0.23582733, 0.1714161, 0.1585189, -0.18085755, 0.019376995,\n", + " -0.026587496, -0.017079154, -0.04588549, -0.047336094, -0.082413346,\n", + " -0.1114185, -0.05403556, 0.12438637, -0.20476522, 0.073182,\n", + " -0.12210378, -0.010543863, -0.09767598, 0.1057683, -0.050204434\n", + "]\n" + ] + } + ], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all TogetherAIEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_embeddings_togetherai.TogetherAIEmbeddings.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all TogetherAIEmbeddings features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_embeddings_togetherai.TogetherAIEmbeddings.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/text_embedding/transformers.mdx b/docs/core_docs/docs/integrations/text_embedding/transformers.mdx index da7bda4c47cb..dc75291a39de 100644 --- a/docs/core_docs/docs/integrations/text_embedding/transformers.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/transformers.mdx @@ -36,5 +36,5 @@ import HFTransformersExample from "@examples/models/embeddings/hf_transformers.t ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/voyageai.mdx b/docs/core_docs/docs/integrations/text_embedding/voyageai.mdx index 3ebbb6cc621c..ae3d3a5cb543 100644 --- a/docs/core_docs/docs/integrations/text_embedding/voyageai.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/voyageai.mdx @@ -19,5 +19,5 @@ const embeddings = new VoyageEmbeddings({ ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/text_embedding/zhipuai.mdx b/docs/core_docs/docs/integrations/text_embedding/zhipuai.mdx index 284a13020b12..ab4e198b3a8a 100644 --- a/docs/core_docs/docs/integrations/text_embedding/zhipuai.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/zhipuai.mdx @@ -31,5 +31,5 @@ import ZhipuAIExample from "@examples/embeddings/zhipuai.ts"; ## Related -- Embedding model [conceptual guide](/docs/concepts/#embedding-models) +- Embedding model [conceptual guide](/docs/concepts/embedding_models) - Embedding model [how-to guides](/docs/how_to/#embedding-models) diff --git a/docs/core_docs/docs/integrations/toolkits/openapi.ipynb b/docs/core_docs/docs/integrations/toolkits/openapi.ipynb index 95ead5b1bdc0..afbc84478784 100644 --- a/docs/core_docs/docs/integrations/toolkits/openapi.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/openapi.ipynb @@ -1,322 +1,322 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: OpenApi Toolkit\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# OpenApiToolkit\n", - "\n", - "```{=mdx}\n", - "\n", - ":::caution Disclaimer ⚠️\n", - "\n", - "This agent can make requests to external APIs. Use with caution, especially when granting access to users.\n", - "\n", - "Be aware that this agent could theoretically send requests with provided credentials or other sensitive data to unverified or potentially malicious URLs --although it should never in theory.\n", - "\n", - "Consider adding limitations to what actions can be performed via the agent, what APIs it can access, what headers can be passed, and more.\n", - "\n", - "In addition, consider implementing measures to validate URLs before sending requests, and to securely handle and protect sensitive data such as credentials.\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "This will help you getting started with the [OpenApiToolkit](/docs/concepts/#toolkits). For detailed documentation of all OpenApiToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.OpenApiToolkit.html).\n", - "\n", - "The `OpenAPIToolkit` has access to the following tools:\n", - "\n", - "| Name | Description |\n", - "|-----------------|-------------|\n", - "| `requests_get` | A portal to the internet. Use this when you need to get specific content from a website. Input should be a url string (i.e. \"https://www.google.com\"). The output will be the text response of the GET request. |\n", - "| `requests_post` | Use this when you want to POST to a website. Input should be a json string with two keys: \"url\" and \"data\". The value of \"url\" should be a string, and the value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body. Be careful to always use double quotes for strings in the json string. The output will be the text response of the POST request. |\n", - "| `json_explorer` | Can be used to answer questions about the openapi spec for the API. Always use this tool before trying to make a request. Example inputs to this tool: 'What are the required query parameters for a GET request to the /bar endpoint?' 'What are the required parameters in the request body for a POST request to the /foo endpoint?' Always give this tool a specific question. |\n", - "\n", - "## Setup\n", - "\n", - "This toolkit requires an OpenAPI spec file. The LangChain.js repository has a [sample OpenAPI spec file in the `examples` directory](https://github.com/langchain-ai/langchainjs/blob/cc21aa29102571204f4443a40b53d28581a12e30/examples/openai_openapi.yaml). You can use this file to test the toolkit.\n", - "\n", - "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "This toolkit lives in the `langchain` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " langchain @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our toolkit. First, we need to define the LLM we would like to use in the toolkit.\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "63aec3e6", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { OpenApiToolkit } from \"langchain/agents/toolkits\"\n", - "import * as fs from \"fs\";\n", - "import * as yaml from \"js-yaml\";\n", - "import { JsonSpec, JsonObject } from \"langchain/tools\";\n", - "\n", - "// Load & convert the OpenAPI spec from YAML to JSON.\n", - "const yamlFile = fs.readFileSync(\"../../../../../examples/openai_openapi.yaml\", \"utf8\");\n", - "const data = yaml.load(yamlFile) as JsonObject;\n", - "if (!data) {\n", - " throw new Error(\"Failed to load OpenAPI spec\");\n", - "}\n", - "\n", - "// Define headers for the API requests.\n", - "const headers = {\n", - " \"Content-Type\": \"application/json\",\n", - " Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,\n", - "};\n", - "\n", - "const toolkit = new OpenApiToolkit(new JsonSpec(data), llm, headers);" - ] - }, - { - "cell_type": "markdown", - "id": "5c5f2839-4020-424e-9fc9-07777eede442", - "metadata": {}, - "source": [ - "## Tools\n", - "\n", - "View available tools:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'requests_get',\n", - " description: 'A portal to the internet. Use this when you need to get specific content from a website.\\n' +\n", - " ' Input should be a url string (i.e. \"https://www.google.com\"). The output will be the text response of the GET request.'\n", - " },\n", - " {\n", - " name: 'requests_post',\n", - " description: 'Use this when you want to POST to a website.\\n' +\n", - " ' Input should be a json string with two keys: \"url\" and \"data\".\\n' +\n", - " ' The value of \"url\" should be a string, and the value of \"data\" should be a dictionary of\\n' +\n", - " ' key-value pairs you want to POST to the url as a JSON body.\\n' +\n", - " ' Be careful to always use double quotes for strings in the json string\\n' +\n", - " ' The output will be the text response of the POST request.'\n", - " },\n", - " {\n", - " name: 'json_explorer',\n", - " description: '\\n' +\n", - " 'Can be used to answer questions about the openapi spec for the API. Always use this tool before trying to make a request. \\n' +\n", - " 'Example inputs to this tool: \\n' +\n", - " \" 'What are the required query parameters for a GET request to the /bar endpoint?'\\n\" +\n", - " \" 'What are the required parameters in the request body for a POST request to the /foo endpoint?'\\n\" +\n", - " 'Always give this tool a specific question.'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const tools = toolkit.getTools();\n", - "\n", - "console.log(tools.map((tool) => ({\n", - " name: tool.name,\n", - " description: tool.description,\n", - "})))" - ] - }, - { - "cell_type": "markdown", - "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", - "metadata": {}, - "source": [ - "## Use within an agent\n", - "\n", - "First, ensure you have LangGraph installed:\n", - "\n", - "```{=mdx}\n", - "\n", - " @langchain/langgraph\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d", - "metadata": {}, - "outputs": [], - "source": [ - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", - "\n", - "const agentExecutor = createReactAgent({ llm, tools });" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: OpenApi Toolkit\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'requests_post',\n", - " args: {\n", - " input: '{\"url\":\"https://api.openai.com/v1/chat/completions\",\"data\":{\"model\":\"gpt-4o-mini\",\"messages\":[{\"role\":\"user\",\"content\":\"tell me a joke.\"}]}}'\n", - " },\n", - " type: 'tool_call',\n", - " id: 'call_1HqyZrbYgKFwQRfAtsZA2uL5'\n", - " }\n", - "]\n", - "{\n", - " \"id\": \"chatcmpl-9t36IIuRCs0WGMEy69HUqPcKvOc1w\",\n", - " \"object\": \"chat.completion\",\n", - " \"created\": 1722906986,\n", - " \"model\": \"gpt-4o-mini-2024-07-18\",\n", - " \"choices\": [\n", - " {\n", - " \"index\": 0,\n", - " \"message\": {\n", - " \"role\": \"assistant\",\n", - " \"content\": \"Why don't skeletons fight each other? \\n\\nThey don't have the guts!\"\n", - " },\n", - " \"logprobs\": null,\n", - " \"finish_reason\": \"stop\"\n", - " }\n", - " ],\n", - " \"usage\": {\n", - " \"prompt_tokens\": 12,\n", - " \"completion_tokens\": 15,\n", - " \"total_tokens\": 27\n", - " },\n", - " \"system_fingerprint\": \"fp_48196bc67a\"\n", - "}\n", - "\n", - "Here's a joke for you:\n", - "\n", - "**Why don't skeletons fight each other?** \n", - "They don't have the guts!\n" - ] + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# OpenApiToolkit\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution Disclaimer ⚠️\n", + "\n", + "This agent can make requests to external APIs. Use with caution, especially when granting access to users.\n", + "\n", + "Be aware that this agent could theoretically send requests with provided credentials or other sensitive data to unverified or potentially malicious URLs --although it should never in theory.\n", + "\n", + "Consider adding limitations to what actions can be performed via the agent, what APIs it can access, what headers can be passed, and more.\n", + "\n", + "In addition, consider implementing measures to validate URLs before sending requests, and to securely handle and protect sensitive data such as credentials.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This will help you getting started with the [OpenApiToolkit](/docs/concepts/tools/#toolkits). For detailed documentation of all OpenApiToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.OpenApiToolkit.html).\n", + "\n", + "The `OpenAPIToolkit` has access to the following tools:\n", + "\n", + "| Name | Description |\n", + "|-----------------|-------------|\n", + "| `requests_get` | A portal to the internet. Use this when you need to get specific content from a website. Input should be a url string (i.e. \"https://www.google.com\"). The output will be the text response of the GET request. |\n", + "| `requests_post` | Use this when you want to POST to a website. Input should be a json string with two keys: \"url\" and \"data\". The value of \"url\" should be a string, and the value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body. Be careful to always use double quotes for strings in the json string. The output will be the text response of the POST request. |\n", + "| `json_explorer` | Can be used to answer questions about the openapi spec for the API. Always use this tool before trying to make a request. Example inputs to this tool: 'What are the required query parameters for a GET request to the /bar endpoint?' 'What are the required parameters in the request body for a POST request to the /foo endpoint?' Always give this tool a specific question. |\n", + "\n", + "## Setup\n", + "\n", + "This toolkit requires an OpenAPI spec file. The LangChain.js repository has a [sample OpenAPI spec file in the `examples` directory](https://github.com/langchain-ai/langchainjs/blob/cc21aa29102571204f4443a40b53d28581a12e30/examples/openai_openapi.yaml). You can use this file to test the toolkit.\n", + "\n", + "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "This toolkit lives in the `langchain` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " langchain @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our toolkit. First, we need to define the LLM we would like to use in the toolkit.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "63aec3e6", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { OpenApiToolkit } from \"langchain/agents/toolkits\"\n", + "import * as fs from \"fs\";\n", + "import * as yaml from \"js-yaml\";\n", + "import { JsonSpec, JsonObject } from \"langchain/tools\";\n", + "\n", + "// Load & convert the OpenAPI spec from YAML to JSON.\n", + "const yamlFile = fs.readFileSync(\"../../../../../examples/openai_openapi.yaml\", \"utf8\");\n", + "const data = yaml.load(yamlFile) as JsonObject;\n", + "if (!data) {\n", + " throw new Error(\"Failed to load OpenAPI spec\");\n", + "}\n", + "\n", + "// Define headers for the API requests.\n", + "const headers = {\n", + " \"Content-Type\": \"application/json\",\n", + " Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,\n", + "};\n", + "\n", + "const toolkit = new OpenApiToolkit(new JsonSpec(data), llm, headers);" + ] + }, + { + "cell_type": "markdown", + "id": "5c5f2839-4020-424e-9fc9-07777eede442", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "View available tools:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'requests_get',\n", + " description: 'A portal to the internet. Use this when you need to get specific content from a website.\\n' +\n", + " ' Input should be a url string (i.e. \"https://www.google.com\"). The output will be the text response of the GET request.'\n", + " },\n", + " {\n", + " name: 'requests_post',\n", + " description: 'Use this when you want to POST to a website.\\n' +\n", + " ' Input should be a json string with two keys: \"url\" and \"data\".\\n' +\n", + " ' The value of \"url\" should be a string, and the value of \"data\" should be a dictionary of\\n' +\n", + " ' key-value pairs you want to POST to the url as a JSON body.\\n' +\n", + " ' Be careful to always use double quotes for strings in the json string\\n' +\n", + " ' The output will be the text response of the POST request.'\n", + " },\n", + " {\n", + " name: 'json_explorer',\n", + " description: '\\n' +\n", + " 'Can be used to answer questions about the openapi spec for the API. Always use this tool before trying to make a request. \\n' +\n", + " 'Example inputs to this tool: \\n' +\n", + " \" 'What are the required query parameters for a GET request to the /bar endpoint?'\\n\" +\n", + " \" 'What are the required parameters in the request body for a POST request to the /foo endpoint?'\\n\" +\n", + " 'Always give this tool a specific question.'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const tools = toolkit.getTools();\n", + "\n", + "console.log(tools.map((tool) => ({\n", + " name: tool.name,\n", + " description: tool.description,\n", + "})))" + ] + }, + { + "cell_type": "markdown", + "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", + "metadata": {}, + "source": [ + "## Use within an agent\n", + "\n", + "First, ensure you have LangGraph installed:\n", + "\n", + "```{=mdx}\n", + "\n", + " @langchain/langgraph\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d", + "metadata": {}, + "outputs": [], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", + "\n", + "const agentExecutor = createReactAgent({ llm, tools });" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'requests_post',\n", + " args: {\n", + " input: '{\"url\":\"https://api.openai.com/v1/chat/completions\",\"data\":{\"model\":\"gpt-4o-mini\",\"messages\":[{\"role\":\"user\",\"content\":\"tell me a joke.\"}]}}'\n", + " },\n", + " type: 'tool_call',\n", + " id: 'call_1HqyZrbYgKFwQRfAtsZA2uL5'\n", + " }\n", + "]\n", + "{\n", + " \"id\": \"chatcmpl-9t36IIuRCs0WGMEy69HUqPcKvOc1w\",\n", + " \"object\": \"chat.completion\",\n", + " \"created\": 1722906986,\n", + " \"model\": \"gpt-4o-mini-2024-07-18\",\n", + " \"choices\": [\n", + " {\n", + " \"index\": 0,\n", + " \"message\": {\n", + " \"role\": \"assistant\",\n", + " \"content\": \"Why don't skeletons fight each other? \\n\\nThey don't have the guts!\"\n", + " },\n", + " \"logprobs\": null,\n", + " \"finish_reason\": \"stop\"\n", + " }\n", + " ],\n", + " \"usage\": {\n", + " \"prompt_tokens\": 12,\n", + " \"completion_tokens\": 15,\n", + " \"total_tokens\": 27\n", + " },\n", + " \"system_fingerprint\": \"fp_48196bc67a\"\n", + "}\n", + "\n", + "Here's a joke for you:\n", + "\n", + "**Why don't skeletons fight each other?** \n", + "They don't have the guts!\n" + ] + } + ], + "source": [ + "const exampleQuery = \"Make a POST request to openai /chat/completions. The prompt should be 'tell me a joke.'. Ensure you use the model 'gpt-4o-mini'.\"\n", + "\n", + "const events = await agentExecutor.stream(\n", + " { messages: [[\"user\", exampleQuery]]},\n", + " { streamMode: \"values\", }\n", + ")\n", + "\n", + "for await (const event of events) {\n", + " const lastMsg = event.messages[event.messages.length - 1];\n", + " if (lastMsg.tool_calls?.length) {\n", + " console.dir(lastMsg.tool_calls, { depth: null });\n", + " } else if (lastMsg.content) {\n", + " console.log(lastMsg.content);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all OpenApiToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.OpenApiToolkit.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const exampleQuery = \"Make a POST request to openai /chat/completions. The prompt should be 'tell me a joke.'. Ensure you use the model 'gpt-4o-mini'.\"\n", - "\n", - "const events = await agentExecutor.stream(\n", - " { messages: [[\"user\", exampleQuery]]},\n", - " { streamMode: \"values\", }\n", - ")\n", - "\n", - "for await (const event of events) {\n", - " const lastMsg = event.messages[event.messages.length - 1];\n", - " if (lastMsg.tool_calls?.length) {\n", - " console.dir(lastMsg.tool_calls, { depth: null });\n", - " } else if (lastMsg.content) {\n", - " console.log(lastMsg.content);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all OpenApiToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.OpenApiToolkit.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/toolkits/sql.ipynb b/docs/core_docs/docs/integrations/toolkits/sql.ipynb index 5f52a0f08b6a..fd8e6c949306 100644 --- a/docs/core_docs/docs/integrations/toolkits/sql.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/sql.ipynb @@ -1,316 +1,316 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Sql Toolkit\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# SqlToolkit\n", - "\n", - "This will help you getting started with the [SqlToolkit](/docs/concepts/#toolkits). For detailed documentation of all SqlToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents_toolkits_sql.SqlToolkit.html). You can also find the documentation for the Python equivalent [here](https://python.langchain.com/docs/integrations/toolkits/sql_database/).\n", - "\n", - "This toolkit contains a the following tools:\n", - "\n", - "| Name | Description |\n", - "|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n", - "| `query-sql` | Input to this tool is a detailed and correct SQL query, output is a result from the database. If the query is not correct, an error message will be returned. If an error is returned, rewrite the query, check the query, and try again. |\n", - "| `info-sql` | Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Be sure that the tables actually exist by calling list-tables-sql first! Example Input: \"table1, table2, table3\". |\n", - "| `list-tables-sql` | Input is an empty string, output is a comma-separated list of tables in the database. |\n", - "| `query-checker` | Use this tool to double check if your query is correct before executing it. Always use this tool before executing a query with query-sql! |\n", - "\n", - "This toolkit is useful for asking questions, performing queries, validating queries and more on a SQL database.\n", - "\n", - "## Setup\n", - "\n", - "This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc. To set it up, follow [these instructions](https://database.guide/2-sample-databases-sqlite/), placing the `.db` file in the directory where your code lives.\n", - "\n", - "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "This toolkit lives in the `langchain` package. You'll also need to install the `typeorm` peer dependency.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " langchain @langchain/core typeorm\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "First, we need to define our LLM to be used in the toolkit.\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "d1002b65", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { SqlToolkit } from \"langchain/agents/toolkits/sql\"\n", - "import { DataSource } from \"typeorm\";\n", - "import { SqlDatabase } from \"langchain/sql_db\";\n", - "\n", - "const datasource = new DataSource({\n", - " type: \"sqlite\",\n", - " database: \"../../../../../../Chinook.db\", // Replace with the link to your database\n", - "});\n", - "const db = await SqlDatabase.fromDataSourceParams({\n", - " appDataSource: datasource,\n", - "});\n", - "\n", - "const toolkit = new SqlToolkit(db, llm);" - ] - }, - { - "cell_type": "markdown", - "id": "5c5f2839-4020-424e-9fc9-07777eede442", - "metadata": {}, - "source": [ - "## Tools\n", - "\n", - "View available tools:" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'query-sql',\n", - " description: 'Input to this tool is a detailed and correct SQL query, output is a result from the database.\\n' +\n", - " ' If the query is not correct, an error message will be returned.\\n' +\n", - " ' If an error is returned, rewrite the query, check the query, and try again.'\n", - " },\n", - " {\n", - " name: 'info-sql',\n", - " description: 'Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables.\\n' +\n", - " ' Be sure that the tables actually exist by calling list-tables-sql first!\\n' +\n", - " '\\n' +\n", - " ' Example Input: \"table1, table2, table3.'\n", - " },\n", - " {\n", - " name: 'list-tables-sql',\n", - " description: 'Input is an empty string, output is a comma-separated list of tables in the database.'\n", - " },\n", - " {\n", - " name: 'query-checker',\n", - " description: 'Use this tool to double check if your query is correct before executing it.\\n' +\n", - " ' Always use this tool before executing a query with query-sql!'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const tools = toolkit.getTools();\n", - "\n", - "console.log(tools.map((tool) => ({\n", - " name: tool.name,\n", - " description: tool.description,\n", - "})))" - ] - }, - { - "cell_type": "markdown", - "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", - "metadata": {}, - "source": [ - "## Use within an agent\n", - "\n", - "First, ensure you have LangGraph installed:\n", - "\n", - "```{=mdx}\n", - "\n", - " @langchain/langgraph\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d", - "metadata": {}, - "outputs": [], - "source": [ - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", - "\n", - "const agentExecutor = createReactAgent({ llm, tools });" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Sql Toolkit\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'list-tables-sql',\n", - " args: {},\n", - " type: 'tool_call',\n", - " id: 'call_LqsRA86SsKmzhRfSRekIQtff'\n", - " }\n", - "]\n", - "Album, Artist, Customer, Employee, Genre, Invoice, InvoiceLine, MediaType, Playlist, PlaylistTrack, Track\n", - "[\n", - " {\n", - " name: 'query-checker',\n", - " args: { input: 'SELECT * FROM Artist LIMIT 10;' },\n", - " type: 'tool_call',\n", - " id: 'call_MKBCjt4gKhl5UpnjsMHmDrBH'\n", - " }\n", - "]\n", - "The SQL query you provided is:\n", - "\n", - "```sql\n", - "SELECT * FROM Artist LIMIT 10;\n", - "```\n", - "\n", - "This query is straightforward and does not contain any of the common mistakes listed. It simply selects all columns from the `Artist` table and limits the result to 10 rows. \n", - "\n", - "Therefore, there are no mistakes to correct, and the original query can be reproduced as is:\n", - "\n", - "```sql\n", - "SELECT * FROM Artist LIMIT 10;\n", - "```\n", - "[\n", - " {\n", - " name: 'query-sql',\n", - " args: { input: 'SELECT * FROM Artist LIMIT 10;' },\n", - " type: 'tool_call',\n", - " id: 'call_a8MPiqXPMaN6yjN9i7rJctJo'\n", - " }\n", - "]\n", - "[{\"ArtistId\":1,\"Name\":\"AC/DC\"},{\"ArtistId\":2,\"Name\":\"Accept\"},{\"ArtistId\":3,\"Name\":\"Aerosmith\"},{\"ArtistId\":4,\"Name\":\"Alanis Morissette\"},{\"ArtistId\":5,\"Name\":\"Alice In Chains\"},{\"ArtistId\":6,\"Name\":\"Antônio Carlos Jobim\"},{\"ArtistId\":7,\"Name\":\"Apocalyptica\"},{\"ArtistId\":8,\"Name\":\"Audioslave\"},{\"ArtistId\":9,\"Name\":\"BackBeat\"},{\"ArtistId\":10,\"Name\":\"Billy Cobham\"}]\n", - "Here are 10 artists from your database:\n", - "\n", - "1. AC/DC\n", - "2. Accept\n", - "3. Aerosmith\n", - "4. Alanis Morissette\n", - "5. Alice In Chains\n", - "6. Antônio Carlos Jobim\n", - "7. Apocalyptica\n", - "8. Audioslave\n", - "9. BackBeat\n", - "10. Billy Cobham\n" - ] + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# SqlToolkit\n", + "\n", + "This will help you getting started with the [SqlToolkit](/docs/concepts/tools/#toolkits). For detailed documentation of all SqlToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents_toolkits_sql.SqlToolkit.html). You can also find the documentation for the Python equivalent [here](https://python.langchain.com/docs/integrations/toolkits/sql_database/).\n", + "\n", + "This toolkit contains a the following tools:\n", + "\n", + "| Name | Description |\n", + "|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n", + "| `query-sql` | Input to this tool is a detailed and correct SQL query, output is a result from the database. If the query is not correct, an error message will be returned. If an error is returned, rewrite the query, check the query, and try again. |\n", + "| `info-sql` | Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Be sure that the tables actually exist by calling list-tables-sql first! Example Input: \"table1, table2, table3\". |\n", + "| `list-tables-sql` | Input is an empty string, output is a comma-separated list of tables in the database. |\n", + "| `query-checker` | Use this tool to double check if your query is correct before executing it. Always use this tool before executing a query with query-sql! |\n", + "\n", + "This toolkit is useful for asking questions, performing queries, validating queries and more on a SQL database.\n", + "\n", + "## Setup\n", + "\n", + "This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc. To set it up, follow [these instructions](https://database.guide/2-sample-databases-sqlite/), placing the `.db` file in the directory where your code lives.\n", + "\n", + "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "This toolkit lives in the `langchain` package. You'll also need to install the `typeorm` peer dependency.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " langchain @langchain/core typeorm\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "First, we need to define our LLM to be used in the toolkit.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "d1002b65", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { SqlToolkit } from \"langchain/agents/toolkits/sql\"\n", + "import { DataSource } from \"typeorm\";\n", + "import { SqlDatabase } from \"langchain/sql_db\";\n", + "\n", + "const datasource = new DataSource({\n", + " type: \"sqlite\",\n", + " database: \"../../../../../../Chinook.db\", // Replace with the link to your database\n", + "});\n", + "const db = await SqlDatabase.fromDataSourceParams({\n", + " appDataSource: datasource,\n", + "});\n", + "\n", + "const toolkit = new SqlToolkit(db, llm);" + ] + }, + { + "cell_type": "markdown", + "id": "5c5f2839-4020-424e-9fc9-07777eede442", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "View available tools:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'query-sql',\n", + " description: 'Input to this tool is a detailed and correct SQL query, output is a result from the database.\\n' +\n", + " ' If the query is not correct, an error message will be returned.\\n' +\n", + " ' If an error is returned, rewrite the query, check the query, and try again.'\n", + " },\n", + " {\n", + " name: 'info-sql',\n", + " description: 'Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables.\\n' +\n", + " ' Be sure that the tables actually exist by calling list-tables-sql first!\\n' +\n", + " '\\n' +\n", + " ' Example Input: \"table1, table2, table3.'\n", + " },\n", + " {\n", + " name: 'list-tables-sql',\n", + " description: 'Input is an empty string, output is a comma-separated list of tables in the database.'\n", + " },\n", + " {\n", + " name: 'query-checker',\n", + " description: 'Use this tool to double check if your query is correct before executing it.\\n' +\n", + " ' Always use this tool before executing a query with query-sql!'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const tools = toolkit.getTools();\n", + "\n", + "console.log(tools.map((tool) => ({\n", + " name: tool.name,\n", + " description: tool.description,\n", + "})))" + ] + }, + { + "cell_type": "markdown", + "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", + "metadata": {}, + "source": [ + "## Use within an agent\n", + "\n", + "First, ensure you have LangGraph installed:\n", + "\n", + "```{=mdx}\n", + "\n", + " @langchain/langgraph\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d", + "metadata": {}, + "outputs": [], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", + "\n", + "const agentExecutor = createReactAgent({ llm, tools });" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'list-tables-sql',\n", + " args: {},\n", + " type: 'tool_call',\n", + " id: 'call_LqsRA86SsKmzhRfSRekIQtff'\n", + " }\n", + "]\n", + "Album, Artist, Customer, Employee, Genre, Invoice, InvoiceLine, MediaType, Playlist, PlaylistTrack, Track\n", + "[\n", + " {\n", + " name: 'query-checker',\n", + " args: { input: 'SELECT * FROM Artist LIMIT 10;' },\n", + " type: 'tool_call',\n", + " id: 'call_MKBCjt4gKhl5UpnjsMHmDrBH'\n", + " }\n", + "]\n", + "The SQL query you provided is:\n", + "\n", + "```sql\n", + "SELECT * FROM Artist LIMIT 10;\n", + "```\n", + "\n", + "This query is straightforward and does not contain any of the common mistakes listed. It simply selects all columns from the `Artist` table and limits the result to 10 rows. \n", + "\n", + "Therefore, there are no mistakes to correct, and the original query can be reproduced as is:\n", + "\n", + "```sql\n", + "SELECT * FROM Artist LIMIT 10;\n", + "```\n", + "[\n", + " {\n", + " name: 'query-sql',\n", + " args: { input: 'SELECT * FROM Artist LIMIT 10;' },\n", + " type: 'tool_call',\n", + " id: 'call_a8MPiqXPMaN6yjN9i7rJctJo'\n", + " }\n", + "]\n", + "[{\"ArtistId\":1,\"Name\":\"AC/DC\"},{\"ArtistId\":2,\"Name\":\"Accept\"},{\"ArtistId\":3,\"Name\":\"Aerosmith\"},{\"ArtistId\":4,\"Name\":\"Alanis Morissette\"},{\"ArtistId\":5,\"Name\":\"Alice In Chains\"},{\"ArtistId\":6,\"Name\":\"Antônio Carlos Jobim\"},{\"ArtistId\":7,\"Name\":\"Apocalyptica\"},{\"ArtistId\":8,\"Name\":\"Audioslave\"},{\"ArtistId\":9,\"Name\":\"BackBeat\"},{\"ArtistId\":10,\"Name\":\"Billy Cobham\"}]\n", + "Here are 10 artists from your database:\n", + "\n", + "1. AC/DC\n", + "2. Accept\n", + "3. Aerosmith\n", + "4. Alanis Morissette\n", + "5. Alice In Chains\n", + "6. Antônio Carlos Jobim\n", + "7. Apocalyptica\n", + "8. Audioslave\n", + "9. BackBeat\n", + "10. Billy Cobham\n" + ] + } + ], + "source": [ + "const exampleQuery = \"Can you list 10 artists from my database?\"\n", + "\n", + "const events = await agentExecutor.stream(\n", + " { messages: [[\"user\", exampleQuery]]},\n", + " { streamMode: \"values\", }\n", + ")\n", + "\n", + "for await (const event of events) {\n", + " const lastMsg = event.messages[event.messages.length - 1];\n", + " if (lastMsg.tool_calls?.length) {\n", + " console.dir(lastMsg.tool_calls, { depth: null });\n", + " } else if (lastMsg.content) {\n", + " console.log(lastMsg.content);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all SqlToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents_toolkits_sql.SqlToolkit.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const exampleQuery = \"Can you list 10 artists from my database?\"\n", - "\n", - "const events = await agentExecutor.stream(\n", - " { messages: [[\"user\", exampleQuery]]},\n", - " { streamMode: \"values\", }\n", - ")\n", - "\n", - "for await (const event of events) {\n", - " const lastMsg = event.messages[event.messages.length - 1];\n", - " if (lastMsg.tool_calls?.length) {\n", - " console.dir(lastMsg.tool_calls, { depth: null });\n", - " } else if (lastMsg.content) {\n", - " console.log(lastMsg.content);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all SqlToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents_toolkits_sql.SqlToolkit.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb b/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb index 9eb7c1fce715..fb233438253d 100644 --- a/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb +++ b/docs/core_docs/docs/integrations/toolkits/vectorstore.ipynb @@ -1,259 +1,259 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: VectorStore Toolkit\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "e49f1e0d", - "metadata": {}, - "source": [ - "# VectorStoreToolkit\n", - "\n", - "This will help you getting started with the [VectorStoreToolkit](/docs/concepts/#toolkits). For detailed documentation of all VectorStoreToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.VectorStoreToolkit.html).\n", - "\n", - "The `VectorStoreToolkit` is a toolkit which takes in a vector store, and converts it to a tool which can then be invoked, passed to LLMs, agents and more.\n", - "\n", - "## Setup\n", - "\n", - "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "This toolkit lives in the `langchain` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " langchain @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "a38cde65-254d-4219-a441-068766c0d4b5", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our toolkit. First, we need to define the LLM we'll use in the toolkit.\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "79d116f5", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - " temperature: 0,\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", - "metadata": {}, - "outputs": [], - "source": [ - "import { VectorStoreToolkit, VectorStoreInfo } from \"langchain/agents/toolkits\"\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\"\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", - "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", - "import fs from \"fs\";\n", - "\n", - "// Load a text file to use as our data source.\n", - "const text = fs.readFileSync(\"../../../../../examples/state_of_the_union.txt\", \"utf8\");\n", - "\n", - "// Split the text into chunks before inserting to our store\n", - "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });\n", - "const docs = await textSplitter.createDocuments([text]);\n", - "\n", - "const vectorStore = await MemoryVectorStore.fromDocuments(docs, new OpenAIEmbeddings());\n", - "\n", - "const vectorStoreInfo: VectorStoreInfo = {\n", - " name: \"state_of_union_address\",\n", - " description: \"the most recent state of the Union address\",\n", - " vectorStore,\n", - "};\n", - "\n", - "const toolkit = new VectorStoreToolkit(vectorStoreInfo, llm);" - ] - }, - { - "cell_type": "markdown", - "id": "5c5f2839-4020-424e-9fc9-07777eede442", - "metadata": {}, - "source": [ - "## Tools\n", - "\n", - "Here, we can see it converts our vector store into a tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'state_of_union_address',\n", - " description: 'Useful for when you need to answer questions about state_of_union_address. Whenever you need information about the most recent state of the Union address you should ALWAYS use this. Input should be a fully formed question.'\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const tools = toolkit.getTools();\n", - "\n", - "console.log(tools.map((tool) => ({\n", - " name: tool.name,\n", - " description: tool.description,\n", - "})))" - ] - }, - { - "cell_type": "markdown", - "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", - "metadata": {}, - "source": [ - "## Use within an agent\n", - "\n", - "First, ensure you have LangGraph installed:\n", - "\n", - "```{=mdx}\n", - "\n", - " @langchain/langgraph\n", - "\n", - "```\n", - "\n", - "Then, instantiate the agent:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d", - "metadata": {}, - "outputs": [], - "source": [ - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", - "\n", - "const agentExecutor = createReactAgent({ llm, tools });" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: VectorStore Toolkit\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'state_of_union_address',\n", - " args: {\n", - " input: 'What did Biden say about Ketanji Brown Jackson in the State of the Union address?'\n", - " },\n", - " type: 'tool_call',\n", - " id: 'call_glJSWLNrftKHa92A6j8x4jhd'\n", - " }\n", - "]\n", - "In the State of the Union address, Biden mentioned that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, describing her as one of the nation’s top legal minds who will continue Justice Breyer’s legacy of excellence. He highlighted her background as a former top litigator in private practice, a former federal public defender, and noted that she comes from a family of public school educators and police officers. He also pointed out that she has received a broad range of support since her nomination.\n", - "In the State of the Union address, President Biden spoke about Ketanji Brown Jackson, stating that he nominated her as one of the nation’s top legal minds who will continue Justice Breyer’s legacy of excellence. He highlighted her experience as a former top litigator in private practice and a federal public defender, as well as her background coming from a family of public school educators and police officers. Biden also noted that she has received a broad range of support since her nomination.\n" - ] + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# VectorStoreToolkit\n", + "\n", + "This will help you getting started with the [VectorStoreToolkit](/docs/concepts/tools/#toolkits). For detailed documentation of all VectorStoreToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.VectorStoreToolkit.html).\n", + "\n", + "The `VectorStoreToolkit` is a toolkit which takes in a vector store, and converts it to a tool which can then be invoked, passed to LLMs, agents and more.\n", + "\n", + "## Setup\n", + "\n", + "If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "This toolkit lives in the `langchain` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " langchain @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our toolkit. First, we need to define the LLM we'll use in the toolkit.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "79d116f5", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + " temperature: 0,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { VectorStoreToolkit, VectorStoreInfo } from \"langchain/agents/toolkits\"\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\"\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", + "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", + "import fs from \"fs\";\n", + "\n", + "// Load a text file to use as our data source.\n", + "const text = fs.readFileSync(\"../../../../../examples/state_of_the_union.txt\", \"utf8\");\n", + "\n", + "// Split the text into chunks before inserting to our store\n", + "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });\n", + "const docs = await textSplitter.createDocuments([text]);\n", + "\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(docs, new OpenAIEmbeddings());\n", + "\n", + "const vectorStoreInfo: VectorStoreInfo = {\n", + " name: \"state_of_union_address\",\n", + " description: \"the most recent state of the Union address\",\n", + " vectorStore,\n", + "};\n", + "\n", + "const toolkit = new VectorStoreToolkit(vectorStoreInfo, llm);" + ] + }, + { + "cell_type": "markdown", + "id": "5c5f2839-4020-424e-9fc9-07777eede442", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "Here, we can see it converts our vector store into a tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "51a60dbe-9f2e-4e04-bb62-23968f17164a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'state_of_union_address',\n", + " description: 'Useful for when you need to answer questions about state_of_union_address. Whenever you need information about the most recent state of the Union address you should ALWAYS use this. Input should be a fully formed question.'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const tools = toolkit.getTools();\n", + "\n", + "console.log(tools.map((tool) => ({\n", + " name: tool.name,\n", + " description: tool.description,\n", + "})))" + ] + }, + { + "cell_type": "markdown", + "id": "dfe8aad4-8626-4330-98a9-7ea1ca5d2e0e", + "metadata": {}, + "source": [ + "## Use within an agent\n", + "\n", + "First, ensure you have LangGraph installed:\n", + "\n", + "```{=mdx}\n", + "\n", + " @langchain/langgraph\n", + "\n", + "```\n", + "\n", + "Then, instantiate the agent:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "310bf18e-6c9a-4072-b86e-47bc1fcca29d", + "metadata": {}, + "outputs": [], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\"\n", + "\n", + "const agentExecutor = createReactAgent({ llm, tools });" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "23e11cc9-abd6-4855-a7eb-799f45ca01ae", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'state_of_union_address',\n", + " args: {\n", + " input: 'What did Biden say about Ketanji Brown Jackson in the State of the Union address?'\n", + " },\n", + " type: 'tool_call',\n", + " id: 'call_glJSWLNrftKHa92A6j8x4jhd'\n", + " }\n", + "]\n", + "In the State of the Union address, Biden mentioned that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, describing her as one of the nation’s top legal minds who will continue Justice Breyer’s legacy of excellence. He highlighted her background as a former top litigator in private practice, a former federal public defender, and noted that she comes from a family of public school educators and police officers. He also pointed out that she has received a broad range of support since her nomination.\n", + "In the State of the Union address, President Biden spoke about Ketanji Brown Jackson, stating that he nominated her as one of the nation’s top legal minds who will continue Justice Breyer’s legacy of excellence. He highlighted her experience as a former top litigator in private practice and a federal public defender, as well as her background coming from a family of public school educators and police officers. Biden also noted that she has received a broad range of support since her nomination.\n" + ] + } + ], + "source": [ + "const exampleQuery = \"What did biden say about Ketanji Brown Jackson is the state of the union address?\"\n", + "\n", + "const events = await agentExecutor.stream(\n", + " { messages: [[\"user\", exampleQuery]]},\n", + " { streamMode: \"values\", }\n", + ")\n", + "\n", + "for await (const event of events) {\n", + " const lastMsg = event.messages[event.messages.length - 1];\n", + " if (lastMsg.tool_calls?.length) {\n", + " console.dir(lastMsg.tool_calls, { depth: null });\n", + " } else if (lastMsg.content) {\n", + " console.log(lastMsg.content);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all VectorStoreToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.VectorStoreToolkit.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const exampleQuery = \"What did biden say about Ketanji Brown Jackson is the state of the union address?\"\n", - "\n", - "const events = await agentExecutor.stream(\n", - " { messages: [[\"user\", exampleQuery]]},\n", - " { streamMode: \"values\", }\n", - ")\n", - "\n", - "for await (const event of events) {\n", - " const lastMsg = event.messages[event.messages.length - 1];\n", - " if (lastMsg.tool_calls?.length) {\n", - " console.dir(lastMsg.tool_calls, { depth: null });\n", - " } else if (lastMsg.content) {\n", - " console.log(lastMsg.content);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all VectorStoreToolkit features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.agents.VectorStoreToolkit.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx b/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx index 453e6fc1cb89..d6e216d1f253 100644 --- a/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx +++ b/docs/core_docs/docs/integrations/tools/aiplugin-tool.mdx @@ -68,5 +68,5 @@ Finished chain. ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/azure_dynamic_sessions.mdx b/docs/core_docs/docs/integrations/tools/azure_dynamic_sessions.mdx index fade937440e7..d3daa3e14d97 100644 --- a/docs/core_docs/docs/integrations/tools/azure_dynamic_sessions.mdx +++ b/docs/core_docs/docs/integrations/tools/azure_dynamic_sessions.mdx @@ -43,5 +43,5 @@ import AgentExample from "@examples/tools/azure_dynamic_sessions/azure_dynamic_s ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/connery.mdx b/docs/core_docs/docs/integrations/tools/connery.mdx index 8454f626c157..906744a4b811 100644 --- a/docs/core_docs/docs/integrations/tools/connery.mdx +++ b/docs/core_docs/docs/integrations/tools/connery.mdx @@ -63,5 +63,5 @@ Connery Action is a structured tool, so you can only use it in the agents suppor ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/dalle.mdx b/docs/core_docs/docs/integrations/tools/dalle.mdx index 6c50daf19721..1ddc656f0486 100644 --- a/docs/core_docs/docs/integrations/tools/dalle.mdx +++ b/docs/core_docs/docs/integrations/tools/dalle.mdx @@ -28,5 +28,5 @@ npm install @langchain/openai @langchain/core ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/discord.mdx b/docs/core_docs/docs/integrations/tools/discord.mdx index 5699e30d88a9..8a1372449188 100644 --- a/docs/core_docs/docs/integrations/tools/discord.mdx +++ b/docs/core_docs/docs/integrations/tools/discord.mdx @@ -39,5 +39,5 @@ import AgentExample from "@examples/agents/discord.ts"; ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb b/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb index 5f5ea4896ac5..574447b86877 100644 --- a/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb +++ b/docs/core_docs/docs/integrations/tools/duckduckgo_search.ipynb @@ -1,291 +1,291 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "10238e62-3465-4973-9279-606cbb7ccf16", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: DuckDuckGoSearch\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "a6f91f20", - "metadata": {}, - "source": [ - "# DuckDuckGoSearch\n", - "\n", - "This notebook provides a quick overview for getting started with [DuckDuckGoSearch](/docs/integrations/tools/). For detailed documentation of all DuckDuckGoSearch features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_tools_duckduckgo_search.DuckDuckGoSearch.html).\n", - "\n", - "DuckDuckGoSearch offers a privacy-focused search API designed for LLM Agents. It provides seamless integration with a wide range of data sources, prioritizing user privacy and relevant search results.\n", - "\n", - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/tools/ddg/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [DuckDuckGoSearch](https://api.js.langchain.com/classes/langchain_community_tools_duckduckgo_search.DuckDuckGoSearch.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "The integration lives in the `@langchain/community` package, along with the `duck-duck-scrape` dependency:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core duck-duck-scrape\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", - "\n", - "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "You can instantiate an instance of the `DuckDuckGoSearch` tool like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64", - "metadata": {}, - "outputs": [], - "source": [ - "import { DuckDuckGoSearch } from \"@langchain/community/tools/duckduckgo_search\"\n", - "\n", - "const tool = new DuckDuckGoSearch({ maxResults: 1 })" - ] - }, - { - "cell_type": "markdown", - "id": "74147a1a", - "metadata": {}, - "source": [ - "## Invocation\n", - "\n", - "### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{\"title\":\"San Francisco, CA Current Weather | AccuWeather\",\"link\":\"https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629\",\"snippet\":\"Current weather in San Francisco, CA. Check current conditions in San Francisco, CA with radar, hourly, and more.\"}]\n" - ] - } - ], - "source": [ - "await tool.invoke(\"what is the current weather in sf?\")" - ] - }, - { - "cell_type": "markdown", - "id": "d6e73897", - "metadata": {}, - "source": [ - "### [Invoke with ToolCall](/docs/concepts/#invoke-with-toolcall)\n", - "\n", - "We can also invoke the tool with a model-generated `ToolCall`, in which case a `ToolMessage` will be returned:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "f90e33a7", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "10238e62-3465-4973-9279-606cbb7ccf16", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: DuckDuckGoSearch\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "ToolMessage {\n", - " \"content\": \"[{\\\"title\\\":\\\"San Francisco, CA Weather Conditions | Weather Underground\\\",\\\"link\\\":\\\"https://www.wunderground.com/weather/us/ca/san-francisco\\\",\\\"snippet\\\":\\\"San Francisco Weather Forecasts. Weather Underground provides local & long-range weather forecasts, weatherreports, maps & tropical weather conditions for the San Francisco area.\\\"}]\",\n", - " \"name\": \"duckduckgo-search\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"tool_call_id\"\n", - "}\n" - ] - } - ], - "source": [ - "// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n", - "const modelGeneratedToolCall = {\n", - " args: {\n", - " input: \"what is the current weather in sf?\"\n", - " },\n", - " id: \"tool_call_id\",\n", - " name: tool.name,\n", - " type: \"tool_call\",\n", - "}\n", - "await tool.invoke(modelGeneratedToolCall)" - ] - }, - { - "cell_type": "markdown", - "id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "af3123ad-7a02-40e5-b58e-7d56e23e5830", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\"\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95", - "metadata": {}, - "outputs": [], - "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"system\", \"You are a helpful assistant.\"],\n", - " [\"placeholder\", \"{messages}\"],\n", - " ]\n", - ")\n", - "\n", - "const llmWithTools = llm.bindTools([tool]);\n", - "\n", - "const chain = prompt.pipe(llmWithTools);\n", - "\n", - "const toolChain = RunnableLambda.from(\n", - " async (userInput: string, config) => {\n", - " const humanMessage = new HumanMessage(userInput,);\n", - " const aiMsg = await chain.invoke({\n", - " messages: [new HumanMessage(userInput)],\n", - " }, config);\n", - " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n", - " return chain.invoke({\n", - " messages: [humanMessage, aiMsg, ...toolMsgs],\n", - " }, config);\n", - " }\n", - ");\n", - "\n", - "const toolChainResult = await toolChain.invoke(\"how many people have climbed mount everest?\");" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "28448fe2", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a6f91f20", + "metadata": {}, + "source": [ + "# DuckDuckGoSearch\n", + "\n", + "This notebook provides a quick overview for getting started with [DuckDuckGoSearch](/docs/integrations/tools/). For detailed documentation of all DuckDuckGoSearch features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_tools_duckduckgo_search.DuckDuckGoSearch.html).\n", + "\n", + "DuckDuckGoSearch offers a privacy-focused search API designed for LLM Agents. It provides seamless integration with a wide range of data sources, prioritizing user privacy and relevant search results.\n", + "\n", + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/tools/ddg/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [DuckDuckGoSearch](https://api.js.langchain.com/classes/langchain_community_tools_duckduckgo_search.DuckDuckGoSearch.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "The integration lives in the `@langchain/community` package, along with the `duck-duck-scrape` dependency:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core duck-duck-scrape\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", + "\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"tool_calls\": [],\n", - " \"content\": \"As of December 2023, a total of 6,664 different people have reached the summit of Mount Everest.\"\n", - "}\n" - ] + "cell_type": "markdown", + "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "You can instantiate an instance of the `DuckDuckGoSearch` tool like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64", + "metadata": {}, + "outputs": [], + "source": [ + "import { DuckDuckGoSearch } from \"@langchain/community/tools/duckduckgo_search\"\n", + "\n", + "const tool = new DuckDuckGoSearch({ maxResults: 1 })" + ] + }, + { + "cell_type": "markdown", + "id": "74147a1a", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "### [Invoke directly with args](/docs/concepts/tools)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{\"title\":\"San Francisco, CA Current Weather | AccuWeather\",\"link\":\"https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629\",\"snippet\":\"Current weather in San Francisco, CA. Check current conditions in San Francisco, CA with radar, hourly, and more.\"}]\n" + ] + } + ], + "source": [ + "await tool.invoke(\"what is the current weather in sf?\")" + ] + }, + { + "cell_type": "markdown", + "id": "d6e73897", + "metadata": {}, + "source": [ + "### [Invoke with ToolCall](/docs/concepts/tools)\n", + "\n", + "We can also invoke the tool with a model-generated `ToolCall`, in which case a `ToolMessage` will be returned:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "f90e33a7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ToolMessage {\n", + " \"content\": \"[{\\\"title\\\":\\\"San Francisco, CA Weather Conditions | Weather Underground\\\",\\\"link\\\":\\\"https://www.wunderground.com/weather/us/ca/san-francisco\\\",\\\"snippet\\\":\\\"San Francisco Weather Forecasts. Weather Underground provides local & long-range weather forecasts, weatherreports, maps & tropical weather conditions for the San Francisco area.\\\"}]\",\n", + " \"name\": \"duckduckgo-search\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"tool_call_id\"\n", + "}\n" + ] + } + ], + "source": [ + "// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n", + "const modelGeneratedToolCall = {\n", + " args: {\n", + " input: \"what is the current weather in sf?\"\n", + " },\n", + " id: \"tool_call_id\",\n", + " name: tool.name,\n", + " type: \"tool_call\",\n", + "}\n", + "await tool.invoke(modelGeneratedToolCall)" + ] + }, + { + "cell_type": "markdown", + "id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "af3123ad-7a02-40e5-b58e-7d56e23e5830", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\"\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95", + "metadata": {}, + "outputs": [], + "source": [ + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", \"You are a helpful assistant.\"],\n", + " [\"placeholder\", \"{messages}\"],\n", + " ]\n", + ")\n", + "\n", + "const llmWithTools = llm.bindTools([tool]);\n", + "\n", + "const chain = prompt.pipe(llmWithTools);\n", + "\n", + "const toolChain = RunnableLambda.from(\n", + " async (userInput: string, config) => {\n", + " const humanMessage = new HumanMessage(userInput,);\n", + " const aiMsg = await chain.invoke({\n", + " messages: [new HumanMessage(userInput)],\n", + " }, config);\n", + " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n", + " return chain.invoke({\n", + " messages: [humanMessage, aiMsg, ...toolMsgs],\n", + " }, config);\n", + " }\n", + ");\n", + "\n", + "const toolChainResult = await toolChain.invoke(\"how many people have climbed mount everest?\");" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "28448fe2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"tool_calls\": [],\n", + " \"content\": \"As of December 2023, a total of 6,664 different people have reached the summit of Mount Everest.\"\n", + "}\n" + ] + } + ], + "source": [ + "const { tool_calls, content } = toolChainResult;\n", + "\n", + "console.log(\"AIMessage\", JSON.stringify({\n", + " tool_calls,\n", + " content,\n", + "}, null, 2));" + ] + }, + { + "cell_type": "markdown", + "id": "570f4662", + "metadata": {}, + "source": [ + "## Agents\n", + "\n", + "For guides on how to use LangChain tools in agents, see the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) docs." + ] + }, + { + "cell_type": "markdown", + "id": "4ac8146c", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all DuckDuckGoSearch features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_tools_duckduckgo_search.DuckDuckGoSearch.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const { tool_calls, content } = toolChainResult;\n", - "\n", - "console.log(\"AIMessage\", JSON.stringify({\n", - " tool_calls,\n", - " content,\n", - "}, null, 2));" - ] - }, - { - "cell_type": "markdown", - "id": "570f4662", - "metadata": {}, - "source": [ - "## Agents\n", - "\n", - "For guides on how to use LangChain tools in agents, see the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) docs." - ] - }, - { - "cell_type": "markdown", - "id": "4ac8146c", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all DuckDuckGoSearch features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_tools_duckduckgo_search.DuckDuckGoSearch.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/tools/exa_search.ipynb b/docs/core_docs/docs/integrations/tools/exa_search.ipynb index 74b89aa56eb5..b48ee2d3831f 100644 --- a/docs/core_docs/docs/integrations/tools/exa_search.ipynb +++ b/docs/core_docs/docs/integrations/tools/exa_search.ipynb @@ -1,426 +1,426 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# ExaSearchResults\n", - "\n", - "Exa (formerly Metaphor Search) is a search engine fully designed for use by LLMs. Search for documents on the internet using natural language queries, then retrieve cleaned HTML content from desired documents.\n", - "\n", - "Unlike keyword-based search (Google), Exa's neural search capabilities allow it to semantically understand queries and return relevant documents. For example, we could search `\"fascinating article about cats\"` and compare the search results from Google and Exa. Google gives us SEO-optimized listicles based on the keyword “fascinating”. Exa just works.\n", - "\n", - "This page goes over how to use `ExaSearchResults` with LangChain.\n", - "\n", - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | Serializable | [PY support](https://python.langchain.com/docs/integrations/tools/exa_search/) | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: |\n", - "| [ExaSearchResults](https://api.js.langchain.com/classes/langchain_exa.ExaSearchResults.html) | [@langchain/exa](https://npmjs.com/package/@langchain/exa) | ❌ | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/exa?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "The integration lives in the `@langchain/exa` package.\n", - "\n", - "```{=mdx}\n", - "\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/exa @langchain/core\n", - "\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "First, get an Exa API key and add it as an environment variable. Get 1000 free searches/month by signing up [here](https://dashboard.exa.ai/login).\n", - "\n", - "```typescript\n", - "process.env.EXASEARCH_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "It's also helpful (but not needed) to set up LangSmith for best-in-class observability:\n", - "\n", - "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Here we show how to insatiate an instance of the `ExaSearchResults` tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import { ExaSearchResults } from \"@langchain/exa\"\n", - "import Exa from \"exa-js\";\n", - "\n", - "// @lc-ts-ignore\n", - "const client = new Exa(process.env.EXASEARCH_API_KEY)\n", - "\n", - "const tool = new ExaSearchResults({\n", - " // @lc-ts-ignore\n", - " client,\n", - " searchArgs: {\n", - " numResults: 2,\n", - " }\n", - "})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Invocation\n", - "\n", - "### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\"results\":[{\"score\":0.16085544228553772,\"title\":\"Hawaii Weather Forecast\",\"id\":\"https://www.willyweather.com/hi/hawaii.html\",\"url\":\"https://www.willyweather.com/hi/hawaii.html\",\"publishedDate\":\"2023-01-01\",\"author\":\"\",\"text\":\"Get an account to remove ads View More Real-Time Extremes Nation State County Hottest 78.8 °FFaleolo Intl / Apia, Samoa, HI Coldest 51.6 °FBradshaw Army Air Field / Hawaii, HI Windiest 12.7mphBradshaw Army Air Field / Hawaii, HI Most Humid 100%Hilo, Hilo International Airport, HI Least Humid 73.32%Kailua / Kona, Keahole Airport, HI Highest Pressure 1030.5 hPaBradshaw Army Air Field / Hawaii, HI Lowest Pressure 1008 hPaFaleolo Intl / Apia, Samoa, HI\"},{\"score\":0.1591680943965912,\"title\":\"The Hawaii Climate To Prepare For Your Maui Wedding\",\"id\":\"https://mymauiwedding.weebly.com/blog6/the-hawaii-climate-to-prepare-for-your-maui-wedding\",\"url\":\"https://mymauiwedding.weebly.com/blog6/the-hawaii-climate-to-prepare-for-your-maui-wedding\",\"publishedDate\":\"2012-04-26\",\"author\":\"\",\"text\":\"Since the The hawaiian islands environment is very constant throughout the season with only slight heat range changes, you can travel there any season. While the moisture is very high, the continuous exotic sea breezes keep the circumstances very relaxed throughout the season. During the day you will be relaxed in a T-shirt or an Aloha clothing and a couple of shoes. Once the sun places you will probably want to wear a light coat since the circumstances can fall around ten levels. The protecting impact of the hills and the variations in climate at various levels make a variety of environment areas. The unique micro-climates are specific for the internal valleys, hill hills and seashores in The hawaiian islands. Located at the side of the exotic location and due to year-round heated sea exterior circumstances, which keep the overlying environment heated, The hawaiian islands has only two circumstances, both of them heated and one with a little bit more rain. Hawaii Climate During Summer Between the several weeks of Apr and Nov the environment is more dry and hotter with the conditions including 75-88. In the summer time the northern eastern business gusts of wind carry most of the rain to the destinations leeward part, which delivers a welcome comfort from the hot and dry climate.The conditions you will encounter will be proportional to where you are on the destinations. If you are on the edges that are protected from the gusts of wind, the the southeast part of and European factors, you will encounters hot and dry circumstances. If you are on the windward factors, northern or eastern, you will obtain the complete power of the gusts of wind and encounter moister and shade circumstances. Go windward for exotic circumstances and leeward for an dry environment. Hawaii Climate During Winter From Dec to Apr it is just a little bit chilly, with conditions between 68-80 F. Winter season is regarded rain. The biggest down pours come between Oct and Apr (the hoo'ilo season). Though stormy weather may be common, they usually complete through the destinations quickly and without event. There are more dark times to mess up your laying in the sun, but it hardly ever down pours more than 3 times in a row in one identify. Winter is search period, so if you're a search participant, come to the Northern Coast in Explore to get the ideal trend. Also, whale viewing period is at the end of winter, during Jan to Apr, so make sure you are here if you want to see these spectacular creatures! Hawaii Climate is Greatly Influenced by the Mountains The hills around the destinations are accountable for the large variety of circumstances. As an example, Kauai's Mt. Waialele is one of the rainiest destinations on the world. Mt. Waialele gets over 420 inches large of rainfall each season, but just a few kilometers down the line, Waimea Canyn is absolutely dry and has been nicknamed the \\\"Grand Canyn of the Pacific\\\". On Big Isle The hawaiian destinations, Hilo is one of the rainiest places in the nation, with 180 inches large of rainfall a season. But Puako, only 60 kilometers away, gets less than 6 inches large of rainfall. If you choose to discover the organic charm discovered at greater levels such as Mauna Kea, use long jeans and several levels of awesome climate outfits. The heat variety in the greater destinations falls 3.5 levels for every 1,000 toes above sea level.Watching the dawn from Mt Haleakala's peak is a incredible concept, but be sure to package up with neckties and work gloves that will keep you comfortable. The circumstances at the peak can fall to 30 F!. Also know that there is less security from the sun at greater levels so be sure to utilize the sun display liberally and use eyewear and a hat. The environment can modify greatly in just a few time when you are in the hills. The exclusive The hawaiian destinations environment makes it possible to sun shower on the Kona Shore and ski on Mauna Kea in the same day.\"}],\"requestId\":\"2145d8de65373c70250400c2c9e8eb13\"}\n" - ] - } - ], - "source": [ - "await tool.invoke(\"what is the weather in wailea?\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### [Invoke with ToolCall](/docs/concepts/#invoke-with-toolcall)\n", - "\n", - "We can also invoke the tool with a model-generated `ToolCall`, in which case a `ToolMessage` will be returned:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ExaSearchResults\n", + "\n", + "Exa (formerly Metaphor Search) is a search engine fully designed for use by LLMs. Search for documents on the internet using natural language queries, then retrieve cleaned HTML content from desired documents.\n", + "\n", + "Unlike keyword-based search (Google), Exa's neural search capabilities allow it to semantically understand queries and return relevant documents. For example, we could search `\"fascinating article about cats\"` and compare the search results from Google and Exa. Google gives us SEO-optimized listicles based on the keyword “fascinating”. Exa just works.\n", + "\n", + "This page goes over how to use `ExaSearchResults` with LangChain.\n", + "\n", + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | Serializable | [PY support](https://python.langchain.com/docs/integrations/tools/exa_search/) | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [ExaSearchResults](https://api.js.langchain.com/classes/langchain_exa.ExaSearchResults.html) | [@langchain/exa](https://npmjs.com/package/@langchain/exa) | ❌ | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/exa?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "The integration lives in the `@langchain/exa` package.\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/exa @langchain/core\n", + "\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "First, get an Exa API key and add it as an environment variable. Get 1000 free searches/month by signing up [here](https://dashboard.exa.ai/login).\n", + "\n", + "```typescript\n", + "process.env.EXASEARCH_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "It's also helpful (but not needed) to set up LangSmith for best-in-class observability:\n", + "\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "ToolMessage {\n", - " \"content\": \"{\\\"results\\\":[{\\\"score\\\":0.12955062091350555,\\\"title\\\":\\\"Urban Dictionary: Waianae\\\",\\\"id\\\":\\\"https://www.urbandictionary.com/define.php?term=Waianae\\\",\\\"url\\\":\\\"https://www.urbandictionary.com/define.php?term=Waianae\\\",\\\"publishedDate\\\":\\\"2006-04-19\\\",\\\"author\\\":\\\"\\\",\\\"text\\\":\\\"Hot but good time for go beach ,in this part of Hawaii you HAVE to have respect ,with people and their stuff, but Some people like act dumb and stupid so that’s the only thing that make Waianae look bad , but foreal kine in this part of Hawaii we have respect and if we don’t get that respect you gon expect no respect back . Get the Waianae mug. Advertise here for $5/day Located on the west end of Oahu. Waianae gets a bad reputation for being poor, dirty, scary, etc. Its hot and dry out west and the beaches are super nice. Makaha, Yokes, and Pray for Sex are some great beaches to name a few. Mostly locals and the majority of the homeless live out here. Even though its a little rough, the people have alot of aloha who live out here. Most important thing here is to have respect for other people and their stuff. Get the WAIANAE mug. Advertise here for $5/day When going too the island of Honolulu if you go to an amazing part for the island called Waianae, say ho sole u know where can find 1 top banggahh like get 1 Waianae special. Then say shoots boto by August 1, 2021 Get the Waianae special mug.\\\"},{\\\"score\\\":0.12563708424568176,\\\"title\\\":\\\"Mount Waialeale: One of the Wettest Spots on Earth | Hawaii.com\\\",\\\"id\\\":\\\"https://www.hawaii.com/trip-ideas/mount-waialeale-one-of-the-wettest-spots-on-earth/\\\",\\\"url\\\":\\\"https://www.hawaii.com/trip-ideas/mount-waialeale-one-of-the-wettest-spots-on-earth/\\\",\\\"publishedDate\\\":\\\"2022-01-18\\\",\\\"author\\\":\\\"Matthew Jones\\\",\\\"text\\\":\\\"Wai’ale’ale, Kauai without much cloud cover. Photo: WiseTim . \\\\nMount Wai‘ale‘ale on the gorgeous island of Kaua‘i is often referred to as the wettest spot on earth. While the more than 5,000-foot tall mountain that’s often enshrouded in clouds does receive a tremendous amount of rainfall each year, it’s more accurately “one of” the wettest spots on earth. The average annual rainfall is around 500 inches but some spots on the planet, such as “Big Bog” on Maui, typically acquire even more moisture.\\\\nLegend Has It\\\\n Road to Waialeale Basin, Kauai. Photo: Bryce Edwards . \\\\nMany legends surround this mystical peak that includes native inhabitants climbing to the top to make offerings to the Hawaiian god, Kane. Remains of a heiau (place of worship constructed from rocks) at the summit confirm that some kind of ancient activity took place here, even though getting to the water-logged location seems nearly impossible.\\\\nWai‘ale‘ale, which is actually a dormant shield volcano, means “rippling or overflowing water” in Hawaiian. Consider yourself lucky if you capture a glimpse of the top of the sky-high summit during your vacation. The best opportunity is during crisp, early mornings before clouds form. But you also need to be in the proper location – Līhu‘e, Kapa‘a, and Wailua offer some of the best vantage points for Wai‘ale‘ale.\\\\nAs Seen From Kuilau Ridge\\\\n Views of Mount Waialeale from Kuilau Ridge, Kauai. Photo: Martin Bravenboer . \\\\nTo get even closer to the second-highest peak on the island you can traverse the Kuilau Ridge Trail in Wailua, located near the end of Kuamo‘o Road. About midway through the easy 2-mile roundtrip hike is a great spot for viewing the mountain.\\\\nWeeping Wall\\\\n Mount Waialeale “Wall of Tears” from the air. Photo: FH . \\\\nFurther down the road and well beyond the paved portion is another hike that takes daring souls to the basin of Wai‘ale‘ale called the “Weeping Wall” where numerous ribbons of waterfalls cascade from the summit. But don’t even consider this adventure unless you’re accompanied by an experienced local guide, as you can easily get lost since there is no maintained trail and there is always a high risk for flash flooding that creates dangerous encounters with rushing water.\\\\nViews from the Alakai Swamp Trail\\\\n Kilohana Overlook of Hanalei Bay. Photo: Hawaii Savvy . \\\\nThat said, there is another safer way to get close to this magical mountain – via the Alaka‘i Swamp Trail located in Koke‘e State Park. The difficult hike is about 8 miles roundtrip and you must start out extremely early to get to the midway point in time to see the vista before fog settles in. But those who see Wai‘ale‘ale uncovered at this prime vantage point, along with Hanalei Bay below, are in for a tremendous treat.\\\"}],\\\"requestId\\\":\\\"37fb09f547148c664026aa61f19c27ed\\\"}\",\n", - " \"name\": \"exa_search_results_json\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"1\"\n", - "}\n" - ] - } - ], - "source": [ - "// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n", - "const modelGeneratedToolCall = {\n", - " args: {\n", - " input: \"what is the weather in wailea\"\n", - " },\n", - " id: \"1\",\n", - " name: tool.name,\n", - " type: \"tool_call\",\n", - "}\n", - "await tool.invoke(modelGeneratedToolCall)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling) and then calling it:\n", - "\n", - "```{=mdx}\n", - "\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\"\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", - "import { RunnableConfig } from \"@langchain/core/runnables\"\n", - "import { AIMessage } from \"@langchain/core/messages\"\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"system\", \"You are a helpful assistant.\"],\n", - " [\"human\", \"{user_input}\"],\n", - " [\"placeholder\", \"{messages}\"],\n", - " ]\n", - ")\n", - "\n", - "// specifying tool_choice will force the model to call this tool.\n", - "const llmWithTools = llm.bindTools([tool], {\n", - " tool_choice: tool.name\n", - "})\n", - "\n", - "const llmChain = prompt.pipe(llmWithTools);\n", - "\n", - "const toolChain = async (userInput: string, config?: RunnableConfig): Promise => {\n", - " const input_ = { user_input: userInput };\n", - " const aiMsg = await llmChain.invoke(input_, config);\n", - " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n", - " return llmChain.invoke({ ...input_, messages: [aiMsg, ...toolMsgs] }, config);\n", - "};\n", - "\n", - "const toolChainResult = await toolChain(\"What is Anthropic's estimated revenue for 2024?\");" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Here we show how to insatiate an instance of the `ExaSearchResults` tool:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"exa_search_results_json\",\n", - " \"args\": {\n", - " \"input\": \"Anthropic revenue 2024 projections\"\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_cgC1G9vjXIjHub0TkVfxiDcr\"\n", - " }\n", - " ],\n", - " \"content\": \"\"\n", - "}\n" - ] - } - ], - "source": [ - "const { tool_calls, content } = toolChainResult;\n", - "\n", - "console.log(\"AIMessage\", JSON.stringify({\n", - " tool_calls,\n", - " content\n", - "}, null, 2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## With an Agent\n", - "\n", - "We can create LangChain tools which use the `ExaRetriever` and the `createRetrieverTool` Using these tools we can construct a simple search agent that can answer questions about any topic.\n", - "\n", - "We'll use LangGraph to create the agent. Make sure you have `@langchain/langgraph` installed:\n", - "\n", - "```{=mdx}\n", - "\n", - " @langchain/langgraph\n", - "\n", - "\n", - "Then, define the LLM to use with the agent\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llmForAgent = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "import Exa from \"exa-js\";\n", - "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", - "import { ExaRetriever } from \"@langchain/exa\";\n", - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", - "\n", - "// @lc-ts-ignore\n", - "const agentClient = new Exa(process.env.EXASEARCH_API_KEY);\n", - "\n", - "const exaRetrieverForAgent = new ExaRetriever({\n", - " // @lc-ts-ignore\n", - " client: agentClient,\n", - " searchArgs: {\n", - " numResults: 2,\n", - " },\n", - "});\n", - "\n", - "// Convert the ExaRetriever into a tool\n", - "const searchToolForAgent = createRetrieverTool(exaRetrieverForAgent, {\n", - " name: \"search\",\n", - " description: \"Get the contents of a webpage given a string search query.\",\n", - "});\n", - "\n", - "const toolsForAgent = [searchToolForAgent];\n", - "\n", - "const agentExecutor = createReactAgent({\n", - " llm: llmForAgent,\n", - " tools: toolsForAgent,\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { ExaSearchResults } from \"@langchain/exa\"\n", + "import Exa from \"exa-js\";\n", + "\n", + "// @lc-ts-ignore\n", + "const client = new Exa(process.env.EXASEARCH_API_KEY)\n", + "\n", + "const tool = new ExaSearchResults({\n", + " // @lc-ts-ignore\n", + " client,\n", + " searchArgs: {\n", + " numResults: 2,\n", + " }\n", + "})" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " name: 'search',\n", - " args: { query: 'fascinating article about cats' },\n", - " type: 'tool_call',\n", - " id: 'call_EcA0tmWsyNktO7HAsGQnqLVt'\n", - " }\n", - "]\n", - "No one seems to think brushing kitty's teeth is worth the hassle. Tyler Comrie / The Atlantic; Getty On the list of perfect pet parents, Mikel Delgado, a professional feline-behavior consultant, probably ranks high. The Ph.D. expert in animal cognition spends half an hour each evening playing with her three torbie cats, Ruby, Coriander, and Professor Scribbles. She’s trained them to take pills in gelatin capsules, just in case they eventually need meds. She even commissioned a screened-in backyard catio so that the girls can safely venture outside. Delgado would do anything for her cats—well, almost anything. “Guilty as charged,” Delgado told me. “I do not brush my cats’ teeth.” To be fair, most cat owners don’t—probably because they’re well aware that it’s weird, if not downright terrifying, to stick one’s fingers inside an ornery cat’s mouth. Reliable stats are scarce, but informal surveys suggest that less than 5 percent of owners give their cats the dental scrub-a-dub-dub—an estimate that the vets I spoke with endorse. “I’m always very shocked if someone says they brush their cat’s teeth,” says Anson Tsugawa, a veterinary dentist in California. When Steve Valeika, a vet in North Carolina, suggests the practice to his clients, many of them “look at me like I’ve totally lost it,” he told me. (This is where I out myself as one of the loons: My cats, Calvin and Hobbes, get their teeth brushed thrice weekly.) There certainly is an element of absurdity to all of this. Lions, after all, aren’t skulking the savannas for Oral-Bs. But our pets don’t share the diets and lifestyles of their wild counterparts, and their teeth are quite susceptible to the buildup of bacteria that can eventually invade the gums to trigger prolonged, painful disease. Studies suggest that most domestic cats older than four end up developing some sort of gum affliction; several experts told me that the rates of periodontal disease in household felines can exceed 80 percent. Left untreated, these ailments can cost a cat one or more teeth, or even spread their effects throughout the body, potentially compromising organs such as the kidneys, liver, and heart. To stave off kitty gum disease, veterinary guidelines and professionals generally recommend that owners clean their cat’s chompers daily, ideally for at least a minute, hitting every tooth. “That’s the gold standard,” says Santiago Peralta, a veterinary dentist at Cornell University. Even a gap of two or three days can leave enough time for tartar to cement, Jeanne Perrone, a veterinary-dentistry trainer in Florida, told me. But brushing feline teeth is also really, really, really hard. Most cats aren’t keen on having things shoved into their mouth, especially not bristly, sludge-covered sticks. (Dogs don’t always love cleanings either, but they’re at least used to engaging their owners with their mouths.) My old cat, Luna, was once so desperate to escape a brushing that she shrieked in my face, then peed all over the floor. Read: Why we think cats are psychopaths A niche industry has sprouted to ease the ordeal for hygiene-conscious humans: poultry-flavored toothpastes, cat-size toothbrushes, silicone scrubbers that fit on fingers. Sometimes the gear helps; when Chin-Sun Lee, a New Orleans–based writer, purchased malt-flavored toothpaste for her cat, Tuesday, he went bonkers for the stuff. Every morning, he comes trotting over just so he can lick the brush. Krissy Lyon, a neuroscientist at the Salk Institute, told me that one of her cats, Cocchi, is so crazy for his toothpaste that she and her partner have to “restrain him or lock him in a different room” while they’re brushing the teeth of their other cat, Noma. Tuesday (left) and Calvin (right) getting their teeth brushed. (Courtesy of Chin-Sun Lee and Katherine J. Wu) But tasty toothpaste isn’t a sufficient lure for all. Valeika, who extols the virtues of feline oral health, admitted that even his own cat, Boocat, doesn’t reap the benefits of his brushing expertise. He “tried hard-core for a couple weeks” when he adopted her seven years ago. But Boocat was too feisty to stand for such a thing. “She can be a real terror,” Valeika told me. “We once saw her chase a bear out of our yard.” Maybe Boocat is picking up on how odd the whole toothbrushing ritual can be. Even most American people weren’t regularly scrubbing their dentition until around the time of World War II. Vet dentistry, which borrowed principles from its human analogue, “is a relatively new discipline,” Peralta told me. “Thirty years ago, nobody was even thinking about dog or cat teeth.” Nor was it all that long ago that people across the country routinely let their pets sleep outside, eat only table scraps, and run hog wild through the streets. Now pets have become overly pampered, their accessories Gooped. Experts told me that they’ve seen all kinds of snake-oil hacks that purport to functionally replace feline toothbrushing—sprays, gels, toys, water additives, even calls to rub cat teeth with coconut oil. A lot of these products end up just cosmetically whitening teeth, temporarily freshening breath, or accomplishing nothing at all. If a super-simple, once-a-month magic bullet for dental hygiene existed, Tsugawa told me, “we’d be doing it for our own teeth.” There are probably a lot of un-toothbrushed cats out there who could be s-l-o-w-l-y taught to accept the process and maybe even enjoy it. Mary Berg, the president of Beyond the Crown Veterinary Education, told me that one of her colleagues trained her pet to relish the process so much that “she could just say ‘Brusha brusha brusha’ and the cat would come running.” But getting to that point can require weeks or months of conditioning. Berg recommends taking it day by day, introducing cats first to the toothpaste, then to getting one or two teeth touched, and on and on until they’re comfy with the whole set—always “with lots of praise and reward afterward,” she said. And that’s all before “you introduce that scary plastic thing.” Read: An offbeat approach to bonding with cats That’s a big ask for many owners, especially those who went the cat route because of the creatures’ rep for being low-maintenance. The consequences of skipping toothbrushing are also subtle because they don’t directly affect humans, Delgado told me. Miss a nail trimming, and the couch might pay the price. But cat teeth aren’t often glimpsed. Boocat, defender of the realm (Courtesy of Steve Valeika) The potential downsides of brushing, meanwhile, can be screamingly clear. On cat forums and Twitter, the cat-toothbrushing-phobic joke about losing their fingers. But what a lot of people are really afraid of sacrificing is their cat’s love. Broken trust can mar the relationship between owner and pet, Perrone said; people simply can’t communicate to skittish animals that this act of apparent torture is for their own good. Some cats never learn to deal. Even among veterinary experts, toothbrushing rituals are rare. Peralta and his wife just try to clear the bar of “at least once a week” with their own cat, Kit Kat; Berg and Perrone don’t brush their felines’ teeth at all. (Tsugawa does not currently own a cat, but he wasn’t a brusher when he did.) I’m no pro, but I feel a bit torn too. I never took the time to teach Calvin and Hobbes to see toothbrushing as a treat, and they can get pretty grumpy during the ritual itself. Valeika, the North Carolina vet, told me that seeing Boocat’s horrified reactions was the main thing that prompted him to quit the brush. “She would hate it if we were always doing that to her,” he said. “She really would just not be our pet anymore.” Feline-health experts know they’re losing this fight. “A lot of us are not even talking about toothbrushing anymore, because nobody’s doing it,” Berg said. Luckily, a few well-vetted alternatives to toothbrushing do exist. Berg and Delgado use specialty kibble that can cut down on plaque, and Perrone’s cat, Noriko, is into Greenies dental treats—both options that many pets may be more receptive to. Scientifically, nothing beats bona fide brushing. But realistically, this young art may already be obsolete. The best interventions, Delgado told me, will be the ones that people actually use. “If someone in my profession doesn’t brush their pet’s teeth,” Berg said, “I can’t blame anybody else.”\n", - "\n", - "Last night I watched the Netflix documentary, Inside the Mind of a Cat . It was a good show that demonstrated cats are thoughtful creatures and amazing predators and that they may have intellectual capacities on par with dogs.\n", - "In addition to learning about the research, I watched the show from my perspective as a mental behaviorist. A mental behavioral position is one that bridges and resolves the old divide in psychology between the behaviorists (who say that the mind is not a scientific construct and thus they just study behavior) and the mentalists (who say they study overt behaviors and then infer mental processes that are presumed to cause the behaviors).\n", - "The mental behavioral view says that animals like cats are “minded” creatures, and that they exhibit mental behaviors. To see this, imagine three cats in a tree; one is dead, one is anesthetized, and one is alive and well. Now drop the cats. The mental behaviorist says all three cats behave, but they exhibit different kinds of behaviors as they fall. The first cat falls through the air and lands on the ground. Its behavior is “physical” in that it is caused by the laws and forces of the material world as mapped by the physical sciences. The second cat also falls much like the first. However, if we were to peer inside the cat, we would see that its physiology is very active in maintaining its complex organization. The behaviors of the cat’s cells and organ systems that keep it alive are living behaviors studied by the biological sciences.\n", - "The third cat rotates, lands on its feet, and takes off. This is a different kind of behavior that cannot be well-described as either physical or biological. Rather, the proper description is mental. Mental behavior is a particular kind of functional awareness and responsivity that animals exhibit. Such behaviors are actions mediated by the brain and nervous system and the complex active body of the cat. More specifically, mental behaviors are a pattern of activity that emerges as function of a complex adaptive, sensory-motor looping system. \n", - "Just as we consider entities like cells that exhibit living behaviors to be alive, we should consider creatures like cats that exhibit mental behaviors to be “minded.” The mental behaviorist argues that mindedness is one of the most important concepts that both science and most modern people are blind to. I say “modern people” because, historically, most cultures have seen clearly that animals behave very differently when compared to plants or bacteria, and most cultures have had some kind of category for specifying this difference. For example, Aristotle divided the \"soul\" into the vegetative, animal, and human layers. In addition, the Great Chain of Being differentiated animals from the rest of the living world. However, our modern scientific system does not have a word for the way animals are in the world that makes them so different. We just call it \"animal behavior.\" And this gap is a major blind spot in our grammar for understanding the world around us.\n", - "Returning to the documentary, if we did not really look inside \"the mind\" of cats, what did the documentary actually show? It showed the mental behavioral investment patterns of cats. That is, it showed how cats demonstrate functional awareness and responsivity to various kinds of situations and stimuli. For example, it showed they clearly recognize and respond to their names, it showed they prefer their owners to strangers, and it showed they really do have a unique skill set in their capacity to land on their feet. In other words, it showed cats are minded creatures that exhibit complex adaptive patterns of mental behavior.\n", - "Although it becomes obvious when you know how to see the world this way (i.e., when I go for a walk in the woods, the mindedness of the squirrels, birds, and bees is as blatantly apparent to me as the living behaviors of the trees and mushrooms), it nevertheless takes practice to learn how to see mindedness in the world. However, we need to make the effort because failing to see mindedness in the world results in much blindness.\n", - "I found a fascinating article from The Atlantic that delves into the often-overlooked aspect of feline care: brushing cats' teeth. Despite being a crucial part of maintaining a cat's health, very few cat owners actually brush their pets' teeth. The article highlights the challenges and absurdities of this task, noting that less than 5% of cat owners engage in this practice. \n", - "\n", - "Veterinary experts emphasize the importance of dental hygiene for cats, as most domestic cats over the age of four develop some form of gum disease, which can lead to severe health issues if left untreated. The article discusses various tools and techniques, such as poultry-flavored toothpaste and cat-sized toothbrushes, that can make the process easier. However, it also acknowledges the difficulties and resistance many cats show towards toothbrushing.\n", - "\n", - "Interestingly, the article also touches on the broader context of pet care, noting how the expectations and practices around pet maintenance have evolved over time. It suggests that while brushing a cat's teeth is ideal, alternative methods like dental treats and specialized kibble can also be effective.\n", - "\n", - "For more details, you can read the full article [here](https://www.theatlantic.com/health/archive/2023/10/cat-dental-care-toothbrushing/675123/).\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "### [Invoke directly with args](/docs/concepts/tools)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\"results\":[{\"score\":0.16085544228553772,\"title\":\"Hawaii Weather Forecast\",\"id\":\"https://www.willyweather.com/hi/hawaii.html\",\"url\":\"https://www.willyweather.com/hi/hawaii.html\",\"publishedDate\":\"2023-01-01\",\"author\":\"\",\"text\":\"Get an account to remove ads View More Real-Time Extremes Nation State County Hottest 78.8 °FFaleolo Intl / Apia, Samoa, HI Coldest 51.6 °FBradshaw Army Air Field / Hawaii, HI Windiest 12.7mphBradshaw Army Air Field / Hawaii, HI Most Humid 100%Hilo, Hilo International Airport, HI Least Humid 73.32%Kailua / Kona, Keahole Airport, HI Highest Pressure 1030.5 hPaBradshaw Army Air Field / Hawaii, HI Lowest Pressure 1008 hPaFaleolo Intl / Apia, Samoa, HI\"},{\"score\":0.1591680943965912,\"title\":\"The Hawaii Climate To Prepare For Your Maui Wedding\",\"id\":\"https://mymauiwedding.weebly.com/blog6/the-hawaii-climate-to-prepare-for-your-maui-wedding\",\"url\":\"https://mymauiwedding.weebly.com/blog6/the-hawaii-climate-to-prepare-for-your-maui-wedding\",\"publishedDate\":\"2012-04-26\",\"author\":\"\",\"text\":\"Since the The hawaiian islands environment is very constant throughout the season with only slight heat range changes, you can travel there any season. While the moisture is very high, the continuous exotic sea breezes keep the circumstances very relaxed throughout the season. During the day you will be relaxed in a T-shirt or an Aloha clothing and a couple of shoes. Once the sun places you will probably want to wear a light coat since the circumstances can fall around ten levels. The protecting impact of the hills and the variations in climate at various levels make a variety of environment areas. The unique micro-climates are specific for the internal valleys, hill hills and seashores in The hawaiian islands. Located at the side of the exotic location and due to year-round heated sea exterior circumstances, which keep the overlying environment heated, The hawaiian islands has only two circumstances, both of them heated and one with a little bit more rain. Hawaii Climate During Summer Between the several weeks of Apr and Nov the environment is more dry and hotter with the conditions including 75-88. In the summer time the northern eastern business gusts of wind carry most of the rain to the destinations leeward part, which delivers a welcome comfort from the hot and dry climate.The conditions you will encounter will be proportional to where you are on the destinations. If you are on the edges that are protected from the gusts of wind, the the southeast part of and European factors, you will encounters hot and dry circumstances. If you are on the windward factors, northern or eastern, you will obtain the complete power of the gusts of wind and encounter moister and shade circumstances. Go windward for exotic circumstances and leeward for an dry environment. Hawaii Climate During Winter From Dec to Apr it is just a little bit chilly, with conditions between 68-80 F. Winter season is regarded rain. The biggest down pours come between Oct and Apr (the hoo'ilo season). Though stormy weather may be common, they usually complete through the destinations quickly and without event. There are more dark times to mess up your laying in the sun, but it hardly ever down pours more than 3 times in a row in one identify. Winter is search period, so if you're a search participant, come to the Northern Coast in Explore to get the ideal trend. Also, whale viewing period is at the end of winter, during Jan to Apr, so make sure you are here if you want to see these spectacular creatures! Hawaii Climate is Greatly Influenced by the Mountains The hills around the destinations are accountable for the large variety of circumstances. As an example, Kauai's Mt. Waialele is one of the rainiest destinations on the world. Mt. Waialele gets over 420 inches large of rainfall each season, but just a few kilometers down the line, Waimea Canyn is absolutely dry and has been nicknamed the \\\"Grand Canyn of the Pacific\\\". On Big Isle The hawaiian destinations, Hilo is one of the rainiest places in the nation, with 180 inches large of rainfall a season. But Puako, only 60 kilometers away, gets less than 6 inches large of rainfall. If you choose to discover the organic charm discovered at greater levels such as Mauna Kea, use long jeans and several levels of awesome climate outfits. The heat variety in the greater destinations falls 3.5 levels for every 1,000 toes above sea level.Watching the dawn from Mt Haleakala's peak is a incredible concept, but be sure to package up with neckties and work gloves that will keep you comfortable. The circumstances at the peak can fall to 30 F!. Also know that there is less security from the sun at greater levels so be sure to utilize the sun display liberally and use eyewear and a hat. The environment can modify greatly in just a few time when you are in the hills. The exclusive The hawaiian destinations environment makes it possible to sun shower on the Kona Shore and ski on Mauna Kea in the same day.\"}],\"requestId\":\"2145d8de65373c70250400c2c9e8eb13\"}\n" + ] + } + ], + "source": [ + "await tool.invoke(\"what is the weather in wailea?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### [Invoke with ToolCall](/docs/concepts/tools)\n", + "\n", + "We can also invoke the tool with a model-generated `ToolCall`, in which case a `ToolMessage` will be returned:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ToolMessage {\n", + " \"content\": \"{\\\"results\\\":[{\\\"score\\\":0.12955062091350555,\\\"title\\\":\\\"Urban Dictionary: Waianae\\\",\\\"id\\\":\\\"https://www.urbandictionary.com/define.php?term=Waianae\\\",\\\"url\\\":\\\"https://www.urbandictionary.com/define.php?term=Waianae\\\",\\\"publishedDate\\\":\\\"2006-04-19\\\",\\\"author\\\":\\\"\\\",\\\"text\\\":\\\"Hot but good time for go beach ,in this part of Hawaii you HAVE to have respect ,with people and their stuff, but Some people like act dumb and stupid so that’s the only thing that make Waianae look bad , but foreal kine in this part of Hawaii we have respect and if we don’t get that respect you gon expect no respect back . Get the Waianae mug. Advertise here for $5/day Located on the west end of Oahu. Waianae gets a bad reputation for being poor, dirty, scary, etc. Its hot and dry out west and the beaches are super nice. Makaha, Yokes, and Pray for Sex are some great beaches to name a few. Mostly locals and the majority of the homeless live out here. Even though its a little rough, the people have alot of aloha who live out here. Most important thing here is to have respect for other people and their stuff. Get the WAIANAE mug. Advertise here for $5/day When going too the island of Honolulu if you go to an amazing part for the island called Waianae, say ho sole u know where can find 1 top banggahh like get 1 Waianae special. Then say shoots boto by August 1, 2021 Get the Waianae special mug.\\\"},{\\\"score\\\":0.12563708424568176,\\\"title\\\":\\\"Mount Waialeale: One of the Wettest Spots on Earth | Hawaii.com\\\",\\\"id\\\":\\\"https://www.hawaii.com/trip-ideas/mount-waialeale-one-of-the-wettest-spots-on-earth/\\\",\\\"url\\\":\\\"https://www.hawaii.com/trip-ideas/mount-waialeale-one-of-the-wettest-spots-on-earth/\\\",\\\"publishedDate\\\":\\\"2022-01-18\\\",\\\"author\\\":\\\"Matthew Jones\\\",\\\"text\\\":\\\"Wai’ale’ale, Kauai without much cloud cover. Photo: WiseTim . \\\\nMount Wai‘ale‘ale on the gorgeous island of Kaua‘i is often referred to as the wettest spot on earth. While the more than 5,000-foot tall mountain that’s often enshrouded in clouds does receive a tremendous amount of rainfall each year, it’s more accurately “one of” the wettest spots on earth. The average annual rainfall is around 500 inches but some spots on the planet, such as “Big Bog” on Maui, typically acquire even more moisture.\\\\nLegend Has It\\\\n Road to Waialeale Basin, Kauai. Photo: Bryce Edwards . \\\\nMany legends surround this mystical peak that includes native inhabitants climbing to the top to make offerings to the Hawaiian god, Kane. Remains of a heiau (place of worship constructed from rocks) at the summit confirm that some kind of ancient activity took place here, even though getting to the water-logged location seems nearly impossible.\\\\nWai‘ale‘ale, which is actually a dormant shield volcano, means “rippling or overflowing water” in Hawaiian. Consider yourself lucky if you capture a glimpse of the top of the sky-high summit during your vacation. The best opportunity is during crisp, early mornings before clouds form. But you also need to be in the proper location – Līhu‘e, Kapa‘a, and Wailua offer some of the best vantage points for Wai‘ale‘ale.\\\\nAs Seen From Kuilau Ridge\\\\n Views of Mount Waialeale from Kuilau Ridge, Kauai. Photo: Martin Bravenboer . \\\\nTo get even closer to the second-highest peak on the island you can traverse the Kuilau Ridge Trail in Wailua, located near the end of Kuamo‘o Road. About midway through the easy 2-mile roundtrip hike is a great spot for viewing the mountain.\\\\nWeeping Wall\\\\n Mount Waialeale “Wall of Tears” from the air. Photo: FH . \\\\nFurther down the road and well beyond the paved portion is another hike that takes daring souls to the basin of Wai‘ale‘ale called the “Weeping Wall” where numerous ribbons of waterfalls cascade from the summit. But don’t even consider this adventure unless you’re accompanied by an experienced local guide, as you can easily get lost since there is no maintained trail and there is always a high risk for flash flooding that creates dangerous encounters with rushing water.\\\\nViews from the Alakai Swamp Trail\\\\n Kilohana Overlook of Hanalei Bay. Photo: Hawaii Savvy . \\\\nThat said, there is another safer way to get close to this magical mountain – via the Alaka‘i Swamp Trail located in Koke‘e State Park. The difficult hike is about 8 miles roundtrip and you must start out extremely early to get to the midway point in time to see the vista before fog settles in. But those who see Wai‘ale‘ale uncovered at this prime vantage point, along with Hanalei Bay below, are in for a tremendous treat.\\\"}],\\\"requestId\\\":\\\"37fb09f547148c664026aa61f19c27ed\\\"}\",\n", + " \"name\": \"exa_search_results_json\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"1\"\n", + "}\n" + ] + } + ], + "source": [ + "// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n", + "const modelGeneratedToolCall = {\n", + " args: {\n", + " input: \"what is the weather in wailea\"\n", + " },\n", + " id: \"1\",\n", + " name: tool.name,\n", + " type: \"tool_call\",\n", + "}\n", + "await tool.invoke(modelGeneratedToolCall)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling) and then calling it:\n", + "\n", + "```{=mdx}\n", + "\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\"\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "import { RunnableConfig } from \"@langchain/core/runnables\"\n", + "import { AIMessage } from \"@langchain/core/messages\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", \"You are a helpful assistant.\"],\n", + " [\"human\", \"{user_input}\"],\n", + " [\"placeholder\", \"{messages}\"],\n", + " ]\n", + ")\n", + "\n", + "// specifying tool_choice will force the model to call this tool.\n", + "const llmWithTools = llm.bindTools([tool], {\n", + " tool_choice: tool.name\n", + "})\n", + "\n", + "const llmChain = prompt.pipe(llmWithTools);\n", + "\n", + "const toolChain = async (userInput: string, config?: RunnableConfig): Promise => {\n", + " const input_ = { user_input: userInput };\n", + " const aiMsg = await llmChain.invoke(input_, config);\n", + " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n", + " return llmChain.invoke({ ...input_, messages: [aiMsg, ...toolMsgs] }, config);\n", + "};\n", + "\n", + "const toolChainResult = await toolChain(\"What is Anthropic's estimated revenue for 2024?\");" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"exa_search_results_json\",\n", + " \"args\": {\n", + " \"input\": \"Anthropic revenue 2024 projections\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_cgC1G9vjXIjHub0TkVfxiDcr\"\n", + " }\n", + " ],\n", + " \"content\": \"\"\n", + "}\n" + ] + } + ], + "source": [ + "const { tool_calls, content } = toolChainResult;\n", + "\n", + "console.log(\"AIMessage\", JSON.stringify({\n", + " tool_calls,\n", + " content\n", + "}, null, 2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## With an Agent\n", + "\n", + "We can create LangChain tools which use the `ExaRetriever` and the `createRetrieverTool` Using these tools we can construct a simple search agent that can answer questions about any topic.\n", + "\n", + "We'll use LangGraph to create the agent. Make sure you have `@langchain/langgraph` installed:\n", + "\n", + "```{=mdx}\n", + "\n", + " @langchain/langgraph\n", + "\n", + "\n", + "Then, define the LLM to use with the agent\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llmForAgent = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "import Exa from \"exa-js\";\n", + "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", + "import { ExaRetriever } from \"@langchain/exa\";\n", + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "\n", + "// @lc-ts-ignore\n", + "const agentClient = new Exa(process.env.EXASEARCH_API_KEY);\n", + "\n", + "const exaRetrieverForAgent = new ExaRetriever({\n", + " // @lc-ts-ignore\n", + " client: agentClient,\n", + " searchArgs: {\n", + " numResults: 2,\n", + " },\n", + "});\n", + "\n", + "// Convert the ExaRetriever into a tool\n", + "const searchToolForAgent = createRetrieverTool(exaRetrieverForAgent, {\n", + " name: \"search\",\n", + " description: \"Get the contents of a webpage given a string search query.\",\n", + "});\n", + "\n", + "const toolsForAgent = [searchToolForAgent];\n", + "\n", + "const agentExecutor = createReactAgent({\n", + " llm: llmForAgent,\n", + " tools: toolsForAgent,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'search',\n", + " args: { query: 'fascinating article about cats' },\n", + " type: 'tool_call',\n", + " id: 'call_EcA0tmWsyNktO7HAsGQnqLVt'\n", + " }\n", + "]\n", + "No one seems to think brushing kitty's teeth is worth the hassle. Tyler Comrie / The Atlantic; Getty On the list of perfect pet parents, Mikel Delgado, a professional feline-behavior consultant, probably ranks high. The Ph.D. expert in animal cognition spends half an hour each evening playing with her three torbie cats, Ruby, Coriander, and Professor Scribbles. She’s trained them to take pills in gelatin capsules, just in case they eventually need meds. She even commissioned a screened-in backyard catio so that the girls can safely venture outside. Delgado would do anything for her cats—well, almost anything. “Guilty as charged,” Delgado told me. “I do not brush my cats’ teeth.” To be fair, most cat owners don’t—probably because they’re well aware that it’s weird, if not downright terrifying, to stick one’s fingers inside an ornery cat’s mouth. Reliable stats are scarce, but informal surveys suggest that less than 5 percent of owners give their cats the dental scrub-a-dub-dub—an estimate that the vets I spoke with endorse. “I’m always very shocked if someone says they brush their cat’s teeth,” says Anson Tsugawa, a veterinary dentist in California. When Steve Valeika, a vet in North Carolina, suggests the practice to his clients, many of them “look at me like I’ve totally lost it,” he told me. (This is where I out myself as one of the loons: My cats, Calvin and Hobbes, get their teeth brushed thrice weekly.) There certainly is an element of absurdity to all of this. Lions, after all, aren’t skulking the savannas for Oral-Bs. But our pets don’t share the diets and lifestyles of their wild counterparts, and their teeth are quite susceptible to the buildup of bacteria that can eventually invade the gums to trigger prolonged, painful disease. Studies suggest that most domestic cats older than four end up developing some sort of gum affliction; several experts told me that the rates of periodontal disease in household felines can exceed 80 percent. Left untreated, these ailments can cost a cat one or more teeth, or even spread their effects throughout the body, potentially compromising organs such as the kidneys, liver, and heart. To stave off kitty gum disease, veterinary guidelines and professionals generally recommend that owners clean their cat’s chompers daily, ideally for at least a minute, hitting every tooth. “That’s the gold standard,” says Santiago Peralta, a veterinary dentist at Cornell University. Even a gap of two or three days can leave enough time for tartar to cement, Jeanne Perrone, a veterinary-dentistry trainer in Florida, told me. But brushing feline teeth is also really, really, really hard. Most cats aren’t keen on having things shoved into their mouth, especially not bristly, sludge-covered sticks. (Dogs don’t always love cleanings either, but they’re at least used to engaging their owners with their mouths.) My old cat, Luna, was once so desperate to escape a brushing that she shrieked in my face, then peed all over the floor. Read: Why we think cats are psychopaths A niche industry has sprouted to ease the ordeal for hygiene-conscious humans: poultry-flavored toothpastes, cat-size toothbrushes, silicone scrubbers that fit on fingers. Sometimes the gear helps; when Chin-Sun Lee, a New Orleans–based writer, purchased malt-flavored toothpaste for her cat, Tuesday, he went bonkers for the stuff. Every morning, he comes trotting over just so he can lick the brush. Krissy Lyon, a neuroscientist at the Salk Institute, told me that one of her cats, Cocchi, is so crazy for his toothpaste that she and her partner have to “restrain him or lock him in a different room” while they’re brushing the teeth of their other cat, Noma. Tuesday (left) and Calvin (right) getting their teeth brushed. (Courtesy of Chin-Sun Lee and Katherine J. Wu) But tasty toothpaste isn’t a sufficient lure for all. Valeika, who extols the virtues of feline oral health, admitted that even his own cat, Boocat, doesn’t reap the benefits of his brushing expertise. He “tried hard-core for a couple weeks” when he adopted her seven years ago. But Boocat was too feisty to stand for such a thing. “She can be a real terror,” Valeika told me. “We once saw her chase a bear out of our yard.” Maybe Boocat is picking up on how odd the whole toothbrushing ritual can be. Even most American people weren’t regularly scrubbing their dentition until around the time of World War II. Vet dentistry, which borrowed principles from its human analogue, “is a relatively new discipline,” Peralta told me. “Thirty years ago, nobody was even thinking about dog or cat teeth.” Nor was it all that long ago that people across the country routinely let their pets sleep outside, eat only table scraps, and run hog wild through the streets. Now pets have become overly pampered, their accessories Gooped. Experts told me that they’ve seen all kinds of snake-oil hacks that purport to functionally replace feline toothbrushing—sprays, gels, toys, water additives, even calls to rub cat teeth with coconut oil. A lot of these products end up just cosmetically whitening teeth, temporarily freshening breath, or accomplishing nothing at all. If a super-simple, once-a-month magic bullet for dental hygiene existed, Tsugawa told me, “we’d be doing it for our own teeth.” There are probably a lot of un-toothbrushed cats out there who could be s-l-o-w-l-y taught to accept the process and maybe even enjoy it. Mary Berg, the president of Beyond the Crown Veterinary Education, told me that one of her colleagues trained her pet to relish the process so much that “she could just say ‘Brusha brusha brusha’ and the cat would come running.” But getting to that point can require weeks or months of conditioning. Berg recommends taking it day by day, introducing cats first to the toothpaste, then to getting one or two teeth touched, and on and on until they’re comfy with the whole set—always “with lots of praise and reward afterward,” she said. And that’s all before “you introduce that scary plastic thing.” Read: An offbeat approach to bonding with cats That’s a big ask for many owners, especially those who went the cat route because of the creatures’ rep for being low-maintenance. The consequences of skipping toothbrushing are also subtle because they don’t directly affect humans, Delgado told me. Miss a nail trimming, and the couch might pay the price. But cat teeth aren’t often glimpsed. Boocat, defender of the realm (Courtesy of Steve Valeika) The potential downsides of brushing, meanwhile, can be screamingly clear. On cat forums and Twitter, the cat-toothbrushing-phobic joke about losing their fingers. But what a lot of people are really afraid of sacrificing is their cat’s love. Broken trust can mar the relationship between owner and pet, Perrone said; people simply can’t communicate to skittish animals that this act of apparent torture is for their own good. Some cats never learn to deal. Even among veterinary experts, toothbrushing rituals are rare. Peralta and his wife just try to clear the bar of “at least once a week” with their own cat, Kit Kat; Berg and Perrone don’t brush their felines’ teeth at all. (Tsugawa does not currently own a cat, but he wasn’t a brusher when he did.) I’m no pro, but I feel a bit torn too. I never took the time to teach Calvin and Hobbes to see toothbrushing as a treat, and they can get pretty grumpy during the ritual itself. Valeika, the North Carolina vet, told me that seeing Boocat’s horrified reactions was the main thing that prompted him to quit the brush. “She would hate it if we were always doing that to her,” he said. “She really would just not be our pet anymore.” Feline-health experts know they’re losing this fight. “A lot of us are not even talking about toothbrushing anymore, because nobody’s doing it,” Berg said. Luckily, a few well-vetted alternatives to toothbrushing do exist. Berg and Delgado use specialty kibble that can cut down on plaque, and Perrone’s cat, Noriko, is into Greenies dental treats—both options that many pets may be more receptive to. Scientifically, nothing beats bona fide brushing. But realistically, this young art may already be obsolete. The best interventions, Delgado told me, will be the ones that people actually use. “If someone in my profession doesn’t brush their pet’s teeth,” Berg said, “I can’t blame anybody else.”\n", + "\n", + "Last night I watched the Netflix documentary, Inside the Mind of a Cat . It was a good show that demonstrated cats are thoughtful creatures and amazing predators and that they may have intellectual capacities on par with dogs.\n", + "In addition to learning about the research, I watched the show from my perspective as a mental behaviorist. A mental behavioral position is one that bridges and resolves the old divide in psychology between the behaviorists (who say that the mind is not a scientific construct and thus they just study behavior) and the mentalists (who say they study overt behaviors and then infer mental processes that are presumed to cause the behaviors).\n", + "The mental behavioral view says that animals like cats are “minded” creatures, and that they exhibit mental behaviors. To see this, imagine three cats in a tree; one is dead, one is anesthetized, and one is alive and well. Now drop the cats. The mental behaviorist says all three cats behave, but they exhibit different kinds of behaviors as they fall. The first cat falls through the air and lands on the ground. Its behavior is “physical” in that it is caused by the laws and forces of the material world as mapped by the physical sciences. The second cat also falls much like the first. However, if we were to peer inside the cat, we would see that its physiology is very active in maintaining its complex organization. The behaviors of the cat’s cells and organ systems that keep it alive are living behaviors studied by the biological sciences.\n", + "The third cat rotates, lands on its feet, and takes off. This is a different kind of behavior that cannot be well-described as either physical or biological. Rather, the proper description is mental. Mental behavior is a particular kind of functional awareness and responsivity that animals exhibit. Such behaviors are actions mediated by the brain and nervous system and the complex active body of the cat. More specifically, mental behaviors are a pattern of activity that emerges as function of a complex adaptive, sensory-motor looping system. \n", + "Just as we consider entities like cells that exhibit living behaviors to be alive, we should consider creatures like cats that exhibit mental behaviors to be “minded.” The mental behaviorist argues that mindedness is one of the most important concepts that both science and most modern people are blind to. I say “modern people” because, historically, most cultures have seen clearly that animals behave very differently when compared to plants or bacteria, and most cultures have had some kind of category for specifying this difference. For example, Aristotle divided the \"soul\" into the vegetative, animal, and human layers. In addition, the Great Chain of Being differentiated animals from the rest of the living world. However, our modern scientific system does not have a word for the way animals are in the world that makes them so different. We just call it \"animal behavior.\" And this gap is a major blind spot in our grammar for understanding the world around us.\n", + "Returning to the documentary, if we did not really look inside \"the mind\" of cats, what did the documentary actually show? It showed the mental behavioral investment patterns of cats. That is, it showed how cats demonstrate functional awareness and responsivity to various kinds of situations and stimuli. For example, it showed they clearly recognize and respond to their names, it showed they prefer their owners to strangers, and it showed they really do have a unique skill set in their capacity to land on their feet. In other words, it showed cats are minded creatures that exhibit complex adaptive patterns of mental behavior.\n", + "Although it becomes obvious when you know how to see the world this way (i.e., when I go for a walk in the woods, the mindedness of the squirrels, birds, and bees is as blatantly apparent to me as the living behaviors of the trees and mushrooms), it nevertheless takes practice to learn how to see mindedness in the world. However, we need to make the effort because failing to see mindedness in the world results in much blindness.\n", + "I found a fascinating article from The Atlantic that delves into the often-overlooked aspect of feline care: brushing cats' teeth. Despite being a crucial part of maintaining a cat's health, very few cat owners actually brush their pets' teeth. The article highlights the challenges and absurdities of this task, noting that less than 5% of cat owners engage in this practice. \n", + "\n", + "Veterinary experts emphasize the importance of dental hygiene for cats, as most domestic cats over the age of four develop some form of gum disease, which can lead to severe health issues if left untreated. The article discusses various tools and techniques, such as poultry-flavored toothpaste and cat-sized toothbrushes, that can make the process easier. However, it also acknowledges the difficulties and resistance many cats show towards toothbrushing.\n", + "\n", + "Interestingly, the article also touches on the broader context of pet care, noting how the expectations and practices around pet maintenance have evolved over time. It suggests that while brushing a cat's teeth is ideal, alternative methods like dental treats and specialized kibble can also be effective.\n", + "\n", + "For more details, you can read the full article [here](https://www.theatlantic.com/health/archive/2023/10/cat-dental-care-toothbrushing/675123/).\n" + ] + } + ], + "source": [ + "const exampleQuery = \"Summarize for me a fascinating article about cats.\"\n", + "\n", + "const events = await agentExecutor.stream(\n", + " { messages: [\n", + " [\n", + " \"system\",\n", + " `You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources.`,\n", + " ],\n", + " [\"human\", exampleQuery],\n", + " ] },\n", + " { streamMode: \"values\", }\n", + ")\n", + "\n", + "for await (const event of events) {\n", + " const lastMsg = event.messages[event.messages.length - 1];\n", + " if (lastMsg.tool_calls?.length) {\n", + " console.dir(lastMsg.tool_calls, { depth: null });\n", + " } else if (lastMsg.content) {\n", + " console.log(lastMsg.content);\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Related\n", + "\n", + "- Tool [conceptual guide](/docs/concepts/tools)\n", + "- Tool [how-to guides](/docs/how_to/#tools)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `ExaSearchResults` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_exa.ExaSearchResults.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const exampleQuery = \"Summarize for me a fascinating article about cats.\"\n", - "\n", - "const events = await agentExecutor.stream(\n", - " { messages: [\n", - " [\n", - " \"system\",\n", - " `You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources.`,\n", - " ],\n", - " [\"human\", exampleQuery],\n", - " ] },\n", - " { streamMode: \"values\", }\n", - ")\n", - "\n", - "for await (const event of events) {\n", - " const lastMsg = event.messages[event.messages.length - 1];\n", - " if (lastMsg.tool_calls?.length) {\n", - " console.dir(lastMsg.tool_calls, { depth: null });\n", - " } else if (lastMsg.content) {\n", - " console.log(lastMsg.content);\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Related\n", - "\n", - "- Tool [conceptual guide](/docs/concepts/#tools)\n", - "- Tool [how-to guides](/docs/how_to/#tools)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `ExaSearchResults` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_exa.ExaSearchResults.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/tools/gmail.mdx b/docs/core_docs/docs/integrations/tools/gmail.mdx index fce3c49d72a1..1d6b42ff4658 100644 --- a/docs/core_docs/docs/integrations/tools/gmail.mdx +++ b/docs/core_docs/docs/integrations/tools/gmail.mdx @@ -32,5 +32,5 @@ import ToolExample from "@examples/tools/gmail.ts"; ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/google_calendar.mdx b/docs/core_docs/docs/integrations/tools/google_calendar.mdx index 800605123d00..14cfa036c75f 100644 --- a/docs/core_docs/docs/integrations/tools/google_calendar.mdx +++ b/docs/core_docs/docs/integrations/tools/google_calendar.mdx @@ -32,5 +32,5 @@ npm install @langchain/openai @langchain/core ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/google_places.mdx b/docs/core_docs/docs/integrations/tools/google_places.mdx index 4d2c48386efe..e01b18f810dd 100644 --- a/docs/core_docs/docs/integrations/tools/google_places.mdx +++ b/docs/core_docs/docs/integrations/tools/google_places.mdx @@ -31,5 +31,5 @@ import ToolExample from "@examples/tools/google_places.ts"; ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/google_routes.mdx b/docs/core_docs/docs/integrations/tools/google_routes.mdx index 0ed4b2548166..19f0112d6fb7 100644 --- a/docs/core_docs/docs/integrations/tools/google_routes.mdx +++ b/docs/core_docs/docs/integrations/tools/google_routes.mdx @@ -31,5 +31,5 @@ import ToolExample from "@examples/tools/google_routes.ts"; ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/index.mdx b/docs/core_docs/docs/integrations/tools/index.mdx index b1ea32d458b2..5dc53baf1784 100644 --- a/docs/core_docs/docs/integrations/tools/index.mdx +++ b/docs/core_docs/docs/integrations/tools/index.mdx @@ -7,9 +7,9 @@ sidebar_class_name: hidden import { CategoryTable, IndexTable } from "@theme/FeatureTables"; -[Tools](/docs/concepts/#tools) are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models. +[Tools](/docs/concepts/tools) are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models. -A [toolkit](/docs/concepts#toolkits) is a collection of tools meant to be used together. For a list of toolkit integrations, see [this page](/docs/integrations/toolkits/). +A [toolkit](/docs/concepts/tools/#toolkits) is a collection of tools meant to be used together. For a list of toolkit integrations, see [this page](/docs/integrations/toolkits/). :::info If you'd like to write your own tool, see [this how-to](/docs/how_to/custom_tools/). If you'd like to contribute an integration, see [Contributing integrations](/docs/contributing). diff --git a/docs/core_docs/docs/integrations/tools/jigsawstack.mdx b/docs/core_docs/docs/integrations/tools/jigsawstack.mdx index 9c96f4c4792c..54691e7d8672 100644 --- a/docs/core_docs/docs/integrations/tools/jigsawstack.mdx +++ b/docs/core_docs/docs/integrations/tools/jigsawstack.mdx @@ -159,5 +159,5 @@ console.log(res.output); ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/lambda_agent.mdx b/docs/core_docs/docs/integrations/tools/lambda_agent.mdx index d53160ccfc31..8ed61818bd1d 100644 --- a/docs/core_docs/docs/integrations/tools/lambda_agent.mdx +++ b/docs/core_docs/docs/integrations/tools/lambda_agent.mdx @@ -57,5 +57,5 @@ console.log(result); ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/pyinterpreter.mdx b/docs/core_docs/docs/integrations/tools/pyinterpreter.mdx index c88aafb338e3..b809fb268370 100644 --- a/docs/core_docs/docs/integrations/tools/pyinterpreter.mdx +++ b/docs/core_docs/docs/integrations/tools/pyinterpreter.mdx @@ -29,5 +29,5 @@ npm install @langchain/openai @langchain/core ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/searchapi.mdx b/docs/core_docs/docs/integrations/tools/searchapi.mdx index e85f0b720733..f11c8c2034a4 100644 --- a/docs/core_docs/docs/integrations/tools/searchapi.mdx +++ b/docs/core_docs/docs/integrations/tools/searchapi.mdx @@ -28,5 +28,5 @@ npm install @langchain/openai @langchain/core ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/searxng.mdx b/docs/core_docs/docs/integrations/tools/searxng.mdx index 13c11f60e799..e2e543a10632 100644 --- a/docs/core_docs/docs/integrations/tools/searxng.mdx +++ b/docs/core_docs/docs/integrations/tools/searxng.mdx @@ -26,5 +26,5 @@ npm install @langchain/openai @langchain/core ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/stackexchange.mdx b/docs/core_docs/docs/integrations/tools/stackexchange.mdx index 602003fc067c..cff78e0da8b4 100644 --- a/docs/core_docs/docs/integrations/tools/stackexchange.mdx +++ b/docs/core_docs/docs/integrations/tools/stackexchange.mdx @@ -16,5 +16,5 @@ import ToolExample from "@examples/tools/stackexchange.ts"; ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/tavily_search.ipynb b/docs/core_docs/docs/integrations/tools/tavily_search.ipynb index 51ed543db0bb..327c1f677c2c 100644 --- a/docs/core_docs/docs/integrations/tools/tavily_search.ipynb +++ b/docs/core_docs/docs/integrations/tools/tavily_search.ipynb @@ -1,306 +1,306 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "10238e62-3465-4973-9279-606cbb7ccf16", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Tavily Search\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "a6f91f20", - "metadata": {}, - "source": [ - "# TavilySearchResults\n", - "\n", - "[Tavily](https://tavily.com/) Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.\n", - "\n", - "This guide provides a quick overview for getting started with the Tavily search results [tool](/docs/integrations/tools/). For detailed documentation of all `TavilySearchResults` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html).\n", - "\n", - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/tools/tavily_search/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [TavilySearchResults](https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "The integration lives in the `@langchain/community` package, which you can install as shown below:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "Set up an API key [here](https://app.tavily.com) and set it as an environment variable named `TAVILY_API_KEY`.\n", - "\n", - "```typescript\n", - "process.env.TAVILY_API_KEY = \"YOUR_API_KEY\"\n", - "```\n", - "\n", - "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", - "\n", - "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "You can import and instantiate an instance of the `TavilySearchResults` tool like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64", - "metadata": {}, - "outputs": [], - "source": [ - "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\";\n", - "\n", - "const tool = new TavilySearchResults({\n", - " maxResults: 2,\n", - " // ...\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "74147a1a", - "metadata": {}, - "source": [ - "## Invocation\n", - "\n", - "### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)\n", - "\n", - "You can invoke the tool directly like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{\"title\":\"San Francisco, CA Current Weather | AccuWeather\",\"url\":\"https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629\",\"content\":\"Current weather in San Francisco, CA. Check current conditions in San Francisco, CA with radar, hourly, and more.\",\"score\":0.9428234,\"raw_content\":null},{\"title\":\"National Weather Service\",\"url\":\"https://forecast.weather.gov/zipcity.php?inputstring=San+Francisco,CA\",\"content\":\"NOAA National Weather Service. Current conditions at SAN FRANCISCO DOWNTOWN (SFOC1) Lat: 37.77056°NLon: 122.42694°WElev: 150.0ft.\",\"score\":0.94261247,\"raw_content\":null}]\n" - ] - } - ], - "source": [ - "await tool.invoke({\n", - " input: \"what is the current weather in SF?\"\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "d6e73897", - "metadata": {}, - "source": [ - "### [Invoke with ToolCall](/docs/concepts/#invoke-with-toolcall)\n", - "\n", - "We can also invoke the tool with a model-generated `ToolCall`, in which case a `ToolMessage` will be returned:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "f90e33a7", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "10238e62-3465-4973-9279-606cbb7ccf16", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Tavily Search\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "ToolMessage {\n", - " \"content\": \"[{\\\"title\\\":\\\"Weather in San Francisco\\\",\\\"url\\\":\\\"https://www.weatherapi.com/\\\",\\\"content\\\":\\\"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1722967498, 'localtime': '2024-08-06 11:04'}, 'current': {'last_updated_epoch': 1722967200, 'last_updated': '2024-08-06 11:00', 'temp_c': 18.4, 'temp_f': 65.2, 'is_day': 1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png', 'code': 1000}, 'wind_mph': 2.9, 'wind_kph': 4.7, 'wind_degree': 275, 'wind_dir': 'W', 'pressure_mb': 1015.0, 'pressure_in': 29.97, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 64, 'cloud': 2, 'feelslike_c': 18.5, 'feelslike_f': 65.2, 'windchill_c': 18.5, 'windchill_f': 65.2, 'heatindex_c': 18.4, 'heatindex_f': 65.2, 'dewpoint_c': 11.7, 'dewpoint_f': 53.1, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 5.0, 'gust_mph': 4.3, 'gust_kph': 7.0}}\\\",\\\"score\\\":0.9983156,\\\"raw_content\\\":null},{\\\"title\\\":\\\"Weather in San Francisco in June 2024 - Detailed Forecast\\\",\\\"url\\\":\\\"https://www.easeweather.com/north-america/united-states/california/city-and-county-of-san-francisco/san-francisco/june\\\",\\\"content\\\":\\\"Until now, June 2024 in San Francisco is slightly cooler than the historical average by -0.6 ° C.. The forecast for June 2024 in San Francisco predicts the temperature to closely align with the historical average at 17.7 ° C. 17.7 ° C.\\\",\\\"score\\\":0.9905143,\\\"raw_content\\\":null}]\",\n", - " \"name\": \"tavily_search_results_json\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"1\"\n", - "}\n" - ] - } - ], - "source": [ - "// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n", - "const modelGeneratedToolCall = {\n", - " args: {\n", - " input: \"what is the current weather in SF?\"\n", - " },\n", - " id: \"1\",\n", - " name: tool.name,\n", - " type: \"tool_call\",\n", - "}\n", - "\n", - "await tool.invoke(modelGeneratedToolCall)" - ] - }, - { - "cell_type": "markdown", - "id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "af3123ad-7a02-40e5-b58e-7d56e23e5830", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\"\n", - "\n", - "const llm = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95", - "metadata": {}, - "outputs": [], - "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"system\", \"You are a helpful assistant.\"],\n", - " [\"placeholder\", \"{messages}\"],\n", - " ]\n", - ")\n", - "\n", - "const llmWithTools = llm.bindTools([tool]);\n", - "\n", - "const chain = prompt.pipe(llmWithTools);\n", - "\n", - "const toolChain = RunnableLambda.from(\n", - " async (userInput: string, config) => {\n", - " const humanMessage = new HumanMessage(userInput,);\n", - " const aiMsg = await chain.invoke({\n", - " messages: [new HumanMessage(userInput)],\n", - " }, config);\n", - " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n", - " return chain.invoke({\n", - " messages: [humanMessage, aiMsg, ...toolMsgs],\n", - " }, config);\n", - " }\n", - ");\n", - "\n", - "const toolChainResult = await toolChain.invoke(\"what is the current weather in sf?\");" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "9ac188a2", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "a6f91f20", + "metadata": {}, + "source": [ + "# TavilySearchResults\n", + "\n", + "[Tavily](https://tavily.com/) Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.\n", + "\n", + "This guide provides a quick overview for getting started with the Tavily search results [tool](/docs/integrations/tools/). For detailed documentation of all `TavilySearchResults` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html).\n", + "\n", + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/tools/tavily_search/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [TavilySearchResults](https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "The integration lives in the `@langchain/community` package, which you can install as shown below:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Set up an API key [here](https://app.tavily.com) and set it as an environment variable named `TAVILY_API_KEY`.\n", + "\n", + "```typescript\n", + "process.env.TAVILY_API_KEY = \"YOUR_API_KEY\"\n", + "```\n", + "\n", + "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:\n", + "\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"tool_calls\": [],\n", - " \"content\": \"The current weather in San Francisco is as follows:\\n\\n- **Condition:** Sunny\\n- **Temperature:** 18.4°C (65.2°F)\\n- **Wind:** 2.9 mph (4.7 kph) from the west\\n- **Humidity:** 64%\\n- **Visibility:** 10 km (6 miles)\\n- **UV Index:** 5\\n\\n![Sunny](//cdn.weatherapi.com/weather/64x64/day/113.png)\\n\\nFor more detailed information, you can visit [WeatherAPI](https://www.weatherapi.com/).\"\n", - "}\n" - ] + "cell_type": "markdown", + "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "You can import and instantiate an instance of the `TavilySearchResults` tool like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64", + "metadata": {}, + "outputs": [], + "source": [ + "import { TavilySearchResults } from \"@langchain/community/tools/tavily_search\";\n", + "\n", + "const tool = new TavilySearchResults({\n", + " maxResults: 2,\n", + " // ...\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "74147a1a", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "### [Invoke directly with args](/docs/concepts/tools)\n", + "\n", + "You can invoke the tool directly like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "65310a8b-eb0c-4d9e-a618-4f4abe2414fc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{\"title\":\"San Francisco, CA Current Weather | AccuWeather\",\"url\":\"https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629\",\"content\":\"Current weather in San Francisco, CA. Check current conditions in San Francisco, CA with radar, hourly, and more.\",\"score\":0.9428234,\"raw_content\":null},{\"title\":\"National Weather Service\",\"url\":\"https://forecast.weather.gov/zipcity.php?inputstring=San+Francisco,CA\",\"content\":\"NOAA National Weather Service. Current conditions at SAN FRANCISCO DOWNTOWN (SFOC1) Lat: 37.77056°NLon: 122.42694°WElev: 150.0ft.\",\"score\":0.94261247,\"raw_content\":null}]\n" + ] + } + ], + "source": [ + "await tool.invoke({\n", + " input: \"what is the current weather in SF?\"\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "d6e73897", + "metadata": {}, + "source": [ + "### [Invoke with ToolCall](/docs/concepts/tools)\n", + "\n", + "We can also invoke the tool with a model-generated `ToolCall`, in which case a `ToolMessage` will be returned:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "f90e33a7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ToolMessage {\n", + " \"content\": \"[{\\\"title\\\":\\\"Weather in San Francisco\\\",\\\"url\\\":\\\"https://www.weatherapi.com/\\\",\\\"content\\\":\\\"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.78, 'lon': -122.42, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1722967498, 'localtime': '2024-08-06 11:04'}, 'current': {'last_updated_epoch': 1722967200, 'last_updated': '2024-08-06 11:00', 'temp_c': 18.4, 'temp_f': 65.2, 'is_day': 1, 'condition': {'text': 'Sunny', 'icon': '//cdn.weatherapi.com/weather/64x64/day/113.png', 'code': 1000}, 'wind_mph': 2.9, 'wind_kph': 4.7, 'wind_degree': 275, 'wind_dir': 'W', 'pressure_mb': 1015.0, 'pressure_in': 29.97, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 64, 'cloud': 2, 'feelslike_c': 18.5, 'feelslike_f': 65.2, 'windchill_c': 18.5, 'windchill_f': 65.2, 'heatindex_c': 18.4, 'heatindex_f': 65.2, 'dewpoint_c': 11.7, 'dewpoint_f': 53.1, 'vis_km': 10.0, 'vis_miles': 6.0, 'uv': 5.0, 'gust_mph': 4.3, 'gust_kph': 7.0}}\\\",\\\"score\\\":0.9983156,\\\"raw_content\\\":null},{\\\"title\\\":\\\"Weather in San Francisco in June 2024 - Detailed Forecast\\\",\\\"url\\\":\\\"https://www.easeweather.com/north-america/united-states/california/city-and-county-of-san-francisco/san-francisco/june\\\",\\\"content\\\":\\\"Until now, June 2024 in San Francisco is slightly cooler than the historical average by -0.6 ° C.. The forecast for June 2024 in San Francisco predicts the temperature to closely align with the historical average at 17.7 ° C. 17.7 ° C.\\\",\\\"score\\\":0.9905143,\\\"raw_content\\\":null}]\",\n", + " \"name\": \"tavily_search_results_json\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"1\"\n", + "}\n" + ] + } + ], + "source": [ + "// This is usually generated by a model, but we'll create a tool call directly for demo purposes.\n", + "const modelGeneratedToolCall = {\n", + " args: {\n", + " input: \"what is the current weather in SF?\"\n", + " },\n", + " id: \"1\",\n", + " name: tool.name,\n", + " type: \"tool_call\",\n", + "}\n", + "\n", + "await tool.invoke(modelGeneratedToolCall)" + ] + }, + { + "cell_type": "markdown", + "id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "af3123ad-7a02-40e5-b58e-7d56e23e5830", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\"\n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "fdbf35b5-3aaf-4947-9ec6-48c21533fb95", + "metadata": {}, + "outputs": [], + "source": [ + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnableLambda } from \"@langchain/core/runnables\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", \"You are a helpful assistant.\"],\n", + " [\"placeholder\", \"{messages}\"],\n", + " ]\n", + ")\n", + "\n", + "const llmWithTools = llm.bindTools([tool]);\n", + "\n", + "const chain = prompt.pipe(llmWithTools);\n", + "\n", + "const toolChain = RunnableLambda.from(\n", + " async (userInput: string, config) => {\n", + " const humanMessage = new HumanMessage(userInput,);\n", + " const aiMsg = await chain.invoke({\n", + " messages: [new HumanMessage(userInput)],\n", + " }, config);\n", + " const toolMsgs = await tool.batch(aiMsg.tool_calls, config);\n", + " return chain.invoke({\n", + " messages: [humanMessage, aiMsg, ...toolMsgs],\n", + " }, config);\n", + " }\n", + ");\n", + "\n", + "const toolChainResult = await toolChain.invoke(\"what is the current weather in sf?\");" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "9ac188a2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"tool_calls\": [],\n", + " \"content\": \"The current weather in San Francisco is as follows:\\n\\n- **Condition:** Sunny\\n- **Temperature:** 18.4°C (65.2°F)\\n- **Wind:** 2.9 mph (4.7 kph) from the west\\n- **Humidity:** 64%\\n- **Visibility:** 10 km (6 miles)\\n- **UV Index:** 5\\n\\n![Sunny](//cdn.weatherapi.com/weather/64x64/day/113.png)\\n\\nFor more detailed information, you can visit [WeatherAPI](https://www.weatherapi.com/).\"\n", + "}\n" + ] + } + ], + "source": [ + "const { tool_calls, content } = toolChainResult;\n", + "\n", + "console.log(\"AIMessage\", JSON.stringify({\n", + " tool_calls,\n", + " content,\n", + "}, null, 2));" + ] + }, + { + "cell_type": "markdown", + "id": "573fb391", + "metadata": {}, + "source": [ + "## Agents\n", + "\n", + "For guides on how to use LangChain tools in agents, see the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) docs." + ] + }, + { + "cell_type": "markdown", + "id": "4ac8146c", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `TavilySearchResults` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const { tool_calls, content } = toolChainResult;\n", - "\n", - "console.log(\"AIMessage\", JSON.stringify({\n", - " tool_calls,\n", - " content,\n", - "}, null, 2));" - ] - }, - { - "cell_type": "markdown", - "id": "573fb391", - "metadata": {}, - "source": [ - "## Agents\n", - "\n", - "For guides on how to use LangChain tools in agents, see the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) docs." - ] - }, - { - "cell_type": "markdown", - "id": "4ac8146c", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `TavilySearchResults` features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_tools_tavily_search.TavilySearchResults.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/tools/webbrowser.mdx b/docs/core_docs/docs/integrations/tools/webbrowser.mdx index a5418a6a9956..64948db73703 100644 --- a/docs/core_docs/docs/integrations/tools/webbrowser.mdx +++ b/docs/core_docs/docs/integrations/tools/webbrowser.mdx @@ -47,5 +47,5 @@ import AgentExample from "@examples/agents/mrkl_browser.ts"; ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/wikipedia.mdx b/docs/core_docs/docs/integrations/tools/wikipedia.mdx index 67a221fd3341..b819e89a2db6 100644 --- a/docs/core_docs/docs/integrations/tools/wikipedia.mdx +++ b/docs/core_docs/docs/integrations/tools/wikipedia.mdx @@ -16,5 +16,5 @@ import ToolExample from "@examples/tools/wikipedia.ts"; ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/wolframalpha.mdx b/docs/core_docs/docs/integrations/tools/wolframalpha.mdx index bb379c47b609..5f4b58f74691 100644 --- a/docs/core_docs/docs/integrations/tools/wolframalpha.mdx +++ b/docs/core_docs/docs/integrations/tools/wolframalpha.mdx @@ -20,5 +20,5 @@ import ToolExample from "@examples/tools/wolframalpha.ts"; ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/tools/zapier_agent.mdx b/docs/core_docs/docs/integrations/tools/zapier_agent.mdx index 01ec98a0d8df..7772a3c9148f 100644 --- a/docs/core_docs/docs/integrations/tools/zapier_agent.mdx +++ b/docs/core_docs/docs/integrations/tools/zapier_agent.mdx @@ -69,5 +69,5 @@ console.log(`Got output ${result.output}`); ## Related -- Tool [conceptual guide](/docs/concepts/#tools) +- Tool [conceptual guide](/docs/concepts/tools) - Tool [how-to guides](/docs/how_to/#tools) diff --git a/docs/core_docs/docs/integrations/vectorstores/chroma.ipynb b/docs/core_docs/docs/integrations/vectorstores/chroma.ipynb index 5b7d0458e1d0..567f9415ca5f 100644 --- a/docs/core_docs/docs/integrations/vectorstores/chroma.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/chroma.ipynb @@ -1,358 +1,358 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Chroma\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# Chroma\n", - "\n", - "[Chroma](https://docs.trychroma.com/getting-started) is a AI-native open-source vector database focused on developer productivity and happiness. Chroma is licensed under Apache 2.0.\n", - "\n", - "This guide provides a quick overview for getting started with Chroma [`vector stores`](/docs/concepts/#vectorstores). For detailed documentation of all `Chroma` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_chroma.Chroma.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/chroma/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`Chroma`](https://api.js.langchain.com/classes/langchain_community_vectorstores_chroma.Chroma.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Chroma vector stores, you'll need to install the `@langchain/community` integration package along with the [Chroma JS SDK](https://www.npmjs.com/package/chromadb) as a peer dependency.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/openai @langchain/core chromadb\n", - "\n", - "```\n", - "\n", - "Next, follow the following instructions to run Chroma with Docker on your computer:\n", - "\n", - "```\n", - "docker pull chromadb/chroma \n", - "docker run -p 8000:8000 chromadb/chroma\n", - "```\n", - "\n", - "You can see alternative setup instructions [in this guide](https://docs.trychroma.com/getting-started).\n", - "\n", - "### Credentials\n", - "\n", - "If you are running Chroma through Docker, you do not need to provide any credentials.\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { Chroma } from \"@langchain/community/vectorstores/chroma\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const vectorStore = new Chroma(embeddings, {\n", - " collectionName: \"a-test-collection\",\n", - " url: \"http://localhost:8000\", // Optional, will default to this value\n", - " collectionMetadata: {\n", - " \"hnsw:space\": \"cosine\",\n", - " }, // Optional, can be used to specify the distance method of the embedding space https://docs.trychroma.com/usage-guide#changing-the-distance-function\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "17f5efc0", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ '1', '2', '3', '4' ]\n" - ] - } - ], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "### Delete items from vector store\n", - "\n", - "You can delete documents from Chroma by id as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [\"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Chroma\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = { source: \"https://example.com\" };\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "See [this page](https://docs.trychroma.com/guides#filtering-by-metadata) for more on Chroma filter syntax.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# Chroma\n", + "\n", + "[Chroma](https://docs.trychroma.com/getting-started) is a AI-native open-source vector database focused on developer productivity and happiness. Chroma is licensed under Apache 2.0.\n", + "\n", + "This guide provides a quick overview for getting started with Chroma [`vector stores`](/docs/concepts/#vectorstores). For detailed documentation of all `Chroma` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_chroma.Chroma.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.852] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/chroma/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`Chroma`](https://api.js.langchain.com/classes/langchain_community_vectorstores_chroma.Chroma.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Chroma vector stores, you'll need to install the `@langchain/community` integration package along with the [Chroma JS SDK](https://www.npmjs.com/package/chromadb) as a peer dependency.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/openai @langchain/core chromadb\n", + "\n", + "```\n", + "\n", + "Next, follow the following instructions to run Chroma with Docker on your computer:\n", + "\n", + "```\n", + "docker pull chromadb/chroma \n", + "docker run -p 8000:8000 chromadb/chroma\n", + "```\n", + "\n", + "You can see alternative setup instructions [in this guide](https://docs.trychroma.com/getting-started).\n", + "\n", + "### Credentials\n", + "\n", + "If you are running Chroma through Docker, you do not need to provide any credentials.\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { Chroma } from \"@langchain/community/vectorstores/chroma\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const vectorStore = new Chroma(embeddings, {\n", + " collectionName: \"a-test-collection\",\n", + " url: \"http://localhost:8000\", // Optional, will default to this value\n", + " collectionMetadata: {\n", + " \"hnsw:space\": \"cosine\",\n", + " }, // Optional, can be used to specify the distance method of the embedding space https://docs.trychroma.com/usage-guide#changing-the-distance-function\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "17f5efc0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ '1', '2', '3', '4' ]\n" + ] + } + ], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "### Delete items from vector store\n", + "\n", + "You can delete documents from Chroma by id as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [\"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 11, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = { source: \"https://example.com\" };\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "See [this page](https://docs.trychroma.com/guides#filtering-by-metadata) for more on Chroma filter syntax.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.852] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `Chroma` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_chroma.Chroma.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `Chroma` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_chroma.Chroma.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/elasticsearch.ipynb b/docs/core_docs/docs/integrations/vectorstores/elasticsearch.ipynb index b8c6fd1339ab..2b3789eb9a7e 100644 --- a/docs/core_docs/docs/integrations/vectorstores/elasticsearch.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/elasticsearch.ipynb @@ -1,398 +1,398 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Elasticsearch\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# Elasticsearch\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Compatibility\n", - "Only available on Node.js.\n", - ":::\n", - "\n", - "```\n", - "\n", - "[Elasticsearch](https://github.com/elastic/elasticsearch) is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads. It supports also vector search using the [k-nearest neighbor](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) (kNN) algorithm and also [custom models for Natural Language Processing](https://www.elastic.co/blog/how-to-deploy-nlp-text-embeddings-and-vector-search) (NLP).\n", - "You can read more about the support of vector search in Elasticsearch [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search.html).\n", - "\n", - "This guide provides a quick overview for getting started with Elasticsearch [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `ElasticVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_elasticsearch.ElasticVectorSearch.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`ElasticVectorSearch`](https://api.js.langchain.com/classes/langchain_community_vectorstores_elasticsearch.ElasticVectorSearch.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Elasticsearch vector stores, you'll need to install the `@langchain/community` integration package.\n", - "\n", - "LangChain.js accepts [`@elastic/elasticsearch`](https://github.com/elastic/elasticsearch-js) as the client for Elasticsearch vectorstore. You'll need to install it as a peer dependency.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @elastic/elasticsearch @langchain/openai @langchain/core\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "To use Elasticsearch vector stores, you'll need to have an Elasticsearch instance running.\n", - "\n", - "You can use the [official Docker image](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html) to get started, or you can use [Elastic Cloud](https://www.elastic.co/cloud/), Elastic's official cloud service.\n", - "\n", - "For connecting to Elastic Cloud you can read the documentation reported [here](https://www.elastic.co/guide/en/kibana/current/api-keys.html) for obtaining an API key.\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Instatiating Elasticsearch will vary depending on where your instance is hosted." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import {\n", - " ElasticVectorSearch,\n", - " type ElasticClientArgs,\n", - "} from \"@langchain/community/vectorstores/elasticsearch\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "import { Client, type ClientOptions } from \"@elastic/elasticsearch\";\n", - "\n", - "import * as fs from \"node:fs\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const config: ClientOptions = {\n", - " node: process.env.ELASTIC_URL ?? \"https://127.0.0.1:9200\",\n", - "};\n", - "\n", - "if (process.env.ELASTIC_API_KEY) {\n", - " config.auth = {\n", - " apiKey: process.env.ELASTIC_API_KEY,\n", - " };\n", - "} else if (process.env.ELASTIC_USERNAME && process.env.ELASTIC_PASSWORD) {\n", - " config.auth = {\n", - " username: process.env.ELASTIC_USERNAME,\n", - " password: process.env.ELASTIC_PASSWORD,\n", - " };\n", - "}\n", - "// Local Docker deploys require a TLS certificate\n", - "if (process.env.ELASTIC_CERT_PATH) {\n", - " config.tls = {\n", - " ca: fs.readFileSync(process.env.ELASTIC_CERT_PATH),\n", - " rejectUnauthorized: false,\n", - " }\n", - "}\n", - "const clientArgs: ElasticClientArgs = {\n", - " client: new Client(config),\n", - " indexName: process.env.ELASTIC_INDEX ?? \"test_vectorstore\",\n", - "};\n", - "\n", - "const vectorStore = new ElasticVectorSearch(embeddings, clientArgs);" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ '1', '2', '3', '4' ]\n" - ] - } - ], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "### Delete items from vector store\n", - "\n", - "You can delete values from the store by passing the same id you passed in:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [\"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent.\n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Elasticsearch\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = [{\n", - " operator: \"match\",\n", - " field: \"source\",\n", - " value: \"https://example.com\",\n", - "}];\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "The vector store supports [Elasticsearch filter syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-filter-context.html) operators.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# Elasticsearch\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "Only available on Node.js.\n", + ":::\n", + "\n", + "```\n", + "\n", + "[Elasticsearch](https://github.com/elastic/elasticsearch) is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads. It supports also vector search using the [k-nearest neighbor](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) (kNN) algorithm and also [custom models for Natural Language Processing](https://www.elastic.co/blog/how-to-deploy-nlp-text-embeddings-and-vector-search) (NLP).\n", + "You can read more about the support of vector search in Elasticsearch [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search.html).\n", + "\n", + "This guide provides a quick overview for getting started with Elasticsearch [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `ElasticVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_elasticsearch.ElasticVectorSearch.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.374] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.370] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`ElasticVectorSearch`](https://api.js.langchain.com/classes/langchain_community_vectorstores_elasticsearch.ElasticVectorSearch.html) | [`@langchain/community`](https://www.npmjs.com/package/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Elasticsearch vector stores, you'll need to install the `@langchain/community` integration package.\n", + "\n", + "LangChain.js accepts [`@elastic/elasticsearch`](https://github.com/elastic/elasticsearch-js) as the client for Elasticsearch vectorstore. You'll need to install it as a peer dependency.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @elastic/elasticsearch @langchain/openai @langchain/core\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "To use Elasticsearch vector stores, you'll need to have an Elasticsearch instance running.\n", + "\n", + "You can use the [official Docker image](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html) to get started, or you can use [Elastic Cloud](https://www.elastic.co/cloud/), Elastic's official cloud service.\n", + "\n", + "For connecting to Elastic Cloud you can read the documentation reported [here](https://www.elastic.co/guide/en/kibana/current/api-keys.html) for obtaining an API key.\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Instatiating Elasticsearch will vary depending on where your instance is hosted." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import {\n", + " ElasticVectorSearch,\n", + " type ElasticClientArgs,\n", + "} from \"@langchain/community/vectorstores/elasticsearch\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "import { Client, type ClientOptions } from \"@elastic/elasticsearch\";\n", + "\n", + "import * as fs from \"node:fs\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const config: ClientOptions = {\n", + " node: process.env.ELASTIC_URL ?? \"https://127.0.0.1:9200\",\n", + "};\n", + "\n", + "if (process.env.ELASTIC_API_KEY) {\n", + " config.auth = {\n", + " apiKey: process.env.ELASTIC_API_KEY,\n", + " };\n", + "} else if (process.env.ELASTIC_USERNAME && process.env.ELASTIC_PASSWORD) {\n", + " config.auth = {\n", + " username: process.env.ELASTIC_USERNAME,\n", + " password: process.env.ELASTIC_PASSWORD,\n", + " };\n", + "}\n", + "// Local Docker deploys require a TLS certificate\n", + "if (process.env.ELASTIC_CERT_PATH) {\n", + " config.tls = {\n", + " ca: fs.readFileSync(process.env.ELASTIC_CERT_PATH),\n", + " rejectUnauthorized: false,\n", + " }\n", + "}\n", + "const clientArgs: ElasticClientArgs = {\n", + " client: new Client(config),\n", + " indexName: process.env.ELASTIC_INDEX ?? \"test_vectorstore\",\n", + "};\n", + "\n", + "const vectorStore = new ElasticVectorSearch(embeddings, clientArgs);" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ '1', '2', '3', '4' ]\n" + ] + } + ], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "### Delete items from vector store\n", + "\n", + "You can delete values from the store by passing the same id you passed in:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [\"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent.\n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 13, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = [{\n", + " operator: \"match\",\n", + " field: \"source\",\n", + " value: \"https://example.com\",\n", + "}];\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "The vector store supports [Elasticsearch filter syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-filter-context.html) operators.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.374] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.370] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `ElasticVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_elasticsearch.ElasticVectorSearch.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `ElasticVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_elasticsearch.ElasticVectorSearch.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/faiss.ipynb b/docs/core_docs/docs/integrations/vectorstores/faiss.ipynb index 5a696b45e0d4..1d3422ce34f5 100644 --- a/docs/core_docs/docs/integrations/vectorstores/faiss.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/faiss.ipynb @@ -1,467 +1,467 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Faiss\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# FaissStore\n", - "\n", - "```{=mdx}\n", - "\n", - ":::tip Compatibility\n", - "Only available on Node.js.\n", - ":::\n", - "\n", - "```\n", - "\n", - "[Faiss](https://github.com/facebookresearch/faiss) is a library for efficient similarity search and clustering of dense vectors.\n", - "\n", - "LangChain.js supports using Faiss as a locally-running vectorstore that can be saved to a file. It also provides the ability to read the saved file from the [LangChain Python implementation](https://python.langchain.com/docs/integrations/vectorstores/faiss#saving-and-loading).\n", - "\n", - "This guide provides a quick overview for getting started with Faiss [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `FaissStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_faiss.FaissStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/faiss) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`FaissStore`](https://api.js.langchain.com/classes/langchain_community_vectorstores_faiss.FaissStore.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Faiss vector stores, you'll need to install the `@langchain/community` integration package and the [`faiss-node`](https://github.com/ewfian/faiss-node) package as a peer dependency.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community faiss-node @langchain/openai @langchain/core\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "Because Faiss runs locally, you do not need any credentials to use it.\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { FaissStore } from \"@langchain/community/vectorstores/faiss\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const vectorStore = new FaissStore(embeddings, {});" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "17f5efc0", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ '1', '2', '3', '4' ]\n" - ] - } - ], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "### Delete items from vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [\"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Faiss\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "Filtering by metadata is currently not supported.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# FaissStore\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "Only available on Node.js.\n", + ":::\n", + "\n", + "```\n", + "\n", + "[Faiss](https://github.com/facebookresearch/faiss) is a library for efficient similarity search and clustering of dense vectors.\n", + "\n", + "LangChain.js supports using Faiss as a locally-running vectorstore that can be saved to a file. It also provides the ability to read the saved file from the [LangChain Python implementation](https://python.langchain.com/docs/integrations/vectorstores/faiss#saving-and-loading).\n", + "\n", + "This guide provides a quick overview for getting started with Faiss [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `FaissStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_faiss.FaissStore.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=1.671] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=1.705] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2);\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/faiss) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`FaissStore`](https://api.js.langchain.com/classes/langchain_community_vectorstores_faiss.FaissStore.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Faiss vector stores, you'll need to install the `@langchain/community` integration package and the [`faiss-node`](https://github.com/ewfian/faiss-node) package as a peer dependency.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community faiss-node @langchain/openai @langchain/core\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Because Faiss runs locally, you do not need any credentials to use it.\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { FaissStore } from \"@langchain/community/vectorstores/faiss\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const vectorStore = new FaissStore(embeddings, {});" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "17f5efc0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ '1', '2', '3', '4' ]\n" + ] + } + ], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' }\n", - " },\n", - " {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' }\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "### Delete items from vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [\"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "Filtering by metadata is currently not supported.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=1.671] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=1.705] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2);\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' }\n", + " },\n", + " {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "58a88011", + "metadata": {}, + "source": [ + "## Merging indexes\n", + "\n", + "Faiss also supports merging existing indexes:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79a65a68", + "metadata": {}, + "outputs": [], + "source": [ + "// Create an initial vector store\n", + "const initialStore = await FaissStore.fromTexts(\n", + " [\"Hello world\", \"Bye bye\", \"hello nice world\"],\n", + " [{ id: 2 }, { id: 1 }, { id: 3 }],\n", + " new OpenAIEmbeddings()\n", + ");\n", + "\n", + "// Create another vector store from texts\n", + "const newStore = await FaissStore.fromTexts(\n", + " [\"Some text\"],\n", + " [{ id: 1 }],\n", + " new OpenAIEmbeddings()\n", + ");\n", + "\n", + "// merge the first vector store into vectorStore2\n", + "await newStore.mergeFrom(initialStore);\n", + "\n", + "// You can also create a new vector store from another FaissStore index\n", + "const newStore2 = await FaissStore.fromIndex(\n", + " newStore,\n", + " new OpenAIEmbeddings()\n", + ");\n", + "\n", + "await newStore2.similaritySearch(\"Bye bye\", 1);" + ] + }, + { + "cell_type": "markdown", + "id": "b92a2301", + "metadata": {}, + "source": [ + "## Save an index to file and load it again\n", + "\n", + "To persist an index on disk, use the `.save` and static `.load` methods:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e4aecb9", + "metadata": {}, + "outputs": [], + "source": [ + "// Create a vector store through any method, here from texts as an example\n", + "const persistentStore = await FaissStore.fromTexts(\n", + " [\"Hello world\", \"Bye bye\", \"hello nice world\"],\n", + " [{ id: 2 }, { id: 1 }, { id: 3 }],\n", + " new OpenAIEmbeddings()\n", + ");\n", + "\n", + "// Save the vector store to a directory\n", + "const directory = \"your/directory/here\";\n", + "\n", + "await persistentStore.save(directory);\n", + "\n", + "// Load the vector store from the same directory\n", + "const loadedVectorStore = await FaissStore.load(\n", + " directory,\n", + " new OpenAIEmbeddings()\n", + ");\n", + "\n", + "// vectorStore and loadedVectorStore are identical\n", + "const result = await loadedVectorStore.similaritySearch(\"hello world\", 1);\n", + "console.log(result);" + ] + }, + { + "cell_type": "markdown", + "id": "069f1b5f", + "metadata": {}, + "source": [ + "## Reading saved files from Python\n", + "\n", + "To enable the ability to read the saved file from [LangChain Python's implementation](https://python.langchain.com/docs/integrations/vectorstores/faiss#saving-and-loading), you'll need to install the [`pickleparser`](https://github.com/ewfian/pickleparser) package.\n", + "\n", + "```{=mdx}\n", + "\n", + " pickleparser\n", + "\n", + "```\n", + "\n", + "Then you can use the `.loadFromPython` static method:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d959f997", + "metadata": {}, + "outputs": [], + "source": [ + "// The directory of data saved from Python\n", + "const directoryWithSavedPythonStore = \"your/directory/here\";\n", + "\n", + "// Load the vector store from the directory\n", + "const pythonLoadedStore = await FaissStore.loadFromPython(\n", + " directoryWithSavedPythonStore,\n", + " new OpenAIEmbeddings()\n", + ");\n", + "\n", + "// Search for the most similar document\n", + "await pythonLoadedStore.similaritySearch(\"test\", 2);" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `FaissStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_faiss.FaissStore.html)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "58a88011", - "metadata": {}, - "source": [ - "## Merging indexes\n", - "\n", - "Faiss also supports merging existing indexes:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "79a65a68", - "metadata": {}, - "outputs": [], - "source": [ - "// Create an initial vector store\n", - "const initialStore = await FaissStore.fromTexts(\n", - " [\"Hello world\", \"Bye bye\", \"hello nice world\"],\n", - " [{ id: 2 }, { id: 1 }, { id: 3 }],\n", - " new OpenAIEmbeddings()\n", - ");\n", - "\n", - "// Create another vector store from texts\n", - "const newStore = await FaissStore.fromTexts(\n", - " [\"Some text\"],\n", - " [{ id: 1 }],\n", - " new OpenAIEmbeddings()\n", - ");\n", - "\n", - "// merge the first vector store into vectorStore2\n", - "await newStore.mergeFrom(initialStore);\n", - "\n", - "// You can also create a new vector store from another FaissStore index\n", - "const newStore2 = await FaissStore.fromIndex(\n", - " newStore,\n", - " new OpenAIEmbeddings()\n", - ");\n", - "\n", - "await newStore2.similaritySearch(\"Bye bye\", 1);" - ] - }, - { - "cell_type": "markdown", - "id": "b92a2301", - "metadata": {}, - "source": [ - "## Save an index to file and load it again\n", - "\n", - "To persist an index on disk, use the `.save` and static `.load` methods:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9e4aecb9", - "metadata": {}, - "outputs": [], - "source": [ - "// Create a vector store through any method, here from texts as an example\n", - "const persistentStore = await FaissStore.fromTexts(\n", - " [\"Hello world\", \"Bye bye\", \"hello nice world\"],\n", - " [{ id: 2 }, { id: 1 }, { id: 3 }],\n", - " new OpenAIEmbeddings()\n", - ");\n", - "\n", - "// Save the vector store to a directory\n", - "const directory = \"your/directory/here\";\n", - "\n", - "await persistentStore.save(directory);\n", - "\n", - "// Load the vector store from the same directory\n", - "const loadedVectorStore = await FaissStore.load(\n", - " directory,\n", - " new OpenAIEmbeddings()\n", - ");\n", - "\n", - "// vectorStore and loadedVectorStore are identical\n", - "const result = await loadedVectorStore.similaritySearch(\"hello world\", 1);\n", - "console.log(result);" - ] - }, - { - "cell_type": "markdown", - "id": "069f1b5f", - "metadata": {}, - "source": [ - "## Reading saved files from Python\n", - "\n", - "To enable the ability to read the saved file from [LangChain Python's implementation](https://python.langchain.com/docs/integrations/vectorstores/faiss#saving-and-loading), you'll need to install the [`pickleparser`](https://github.com/ewfian/pickleparser) package.\n", - "\n", - "```{=mdx}\n", - "\n", - " pickleparser\n", - "\n", - "```\n", - "\n", - "Then you can use the `.loadFromPython` static method:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d959f997", - "metadata": {}, - "outputs": [], - "source": [ - "// The directory of data saved from Python\n", - "const directoryWithSavedPythonStore = \"your/directory/here\";\n", - "\n", - "// Load the vector store from the directory\n", - "const pythonLoadedStore = await FaissStore.loadFromPython(\n", - " directoryWithSavedPythonStore,\n", - " new OpenAIEmbeddings()\n", - ");\n", - "\n", - "// Search for the most similar document\n", - "await pythonLoadedStore.similaritySearch(\"test\", 2);" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `FaissStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_faiss.FaissStore.html)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/hnswlib.ipynb b/docs/core_docs/docs/integrations/vectorstores/hnswlib.ipynb index 44ba0e864590..8ec5ece7ed0b 100644 --- a/docs/core_docs/docs/integrations/vectorstores/hnswlib.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/hnswlib.ipynb @@ -1,381 +1,381 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: HNSWLib\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# HNSWLib\n", - "\n", - "```{=mdx}\n", - ":::tip Compatibility\n", - "Only available on Node.js.\n", - ":::\n", - "```\n", - "\n", - "HNSWLib is an in-memory vector store that can be saved to a file. It uses the [HNSWLib library](https://github.com/nmslib/hnswlib).\n", - "\n", - "This guide provides a quick overview for getting started with HNSWLib [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `HNSWLib` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_hnswlib.HNSWLib.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | PY support | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`HNSWLib`](https://api.js.langchain.com/classes/langchain_community_vectorstores_hnswlib.HNSWLib.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ❌ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use HNSWLib vector stores, you'll need to install the `@langchain/community` integration package with the [`hnswlib-node`](https://www.npmjs.com/package/hnswlib-node) package as a peer dependency.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community hnswlib-node @langchain/openai @langchain/core\n", - "\n", - "```\n", - "\n", - "```{=mdx}\n", - ":::caution\n", - "\n", - "**On Windows**, you might need to install [Visual Studio](https://visualstudio.microsoft.com/downloads/) first in order to properly build the `hnswlib-node` package.\n", - "\n", - ":::\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "Because HNSWLib runs locally, you do not need any credentials to use it.\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { HNSWLib } from \"@langchain/community/vectorstores/hnswlib\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const vectorStore = await HNSWLib.fromDocuments([], embeddings);" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents);" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "Deletion and ids for individual documents are not currently supported.\n", - "\n", - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = (doc) => doc.metadata.source === \"https://example.com\";\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "The filter is optional, and must be a predicate function that takes a document as input, and returns `true` or `false` depending on whether the document should be returned.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: HNSWLib\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.852] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# HNSWLib\n", + "\n", + "```{=mdx}\n", + ":::tip Compatibility\n", + "Only available on Node.js.\n", + ":::\n", + "```\n", + "\n", + "HNSWLib is an in-memory vector store that can be saved to a file. It uses the [HNSWLib library](https://github.com/nmslib/hnswlib).\n", + "\n", + "This guide provides a quick overview for getting started with HNSWLib [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `HNSWLib` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_hnswlib.HNSWLib.html)." + ] + }, + { + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | PY support | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`HNSWLib`](https://api.js.langchain.com/classes/langchain_community_vectorstores_hnswlib.HNSWLib.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ❌ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use HNSWLib vector stores, you'll need to install the `@langchain/community` integration package with the [`hnswlib-node`](https://www.npmjs.com/package/hnswlib-node) package as a peer dependency.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community hnswlib-node @langchain/openai @langchain/core\n", + "\n", + "```\n", + "\n", + "```{=mdx}\n", + ":::caution\n", + "\n", + "**On Windows**, you might need to install [Visual Studio](https://visualstudio.microsoft.com/downloads/) first in order to properly build the `hnswlib-node` package.\n", + "\n", + ":::\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Because HNSWLib runs locally, you do not need any credentials to use it.\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { HNSWLib } from \"@langchain/community/vectorstores/hnswlib\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const vectorStore = await HNSWLib.fromDocuments([], embeddings);" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' }\n", - " },\n", - " {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' }\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents);" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "Deletion and ids for individual documents are not currently supported.\n", + "\n", + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = (doc) => doc.metadata.source === \"https://example.com\";\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "The filter is optional, and must be a predicate function that takes a document as input, and returns `true` or `false` depending on whether the document should be returned.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.852] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' }\n", + " },\n", + " {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' }\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "069f1b5f", + "metadata": {}, + "source": [ + "## Save to/load from file\n", + "\n", + "HNSWLib supports saving your index to a file, then reloading it at a later date:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f71ce986", + "metadata": {}, + "outputs": [], + "source": [ + "// Save the vector store to a directory\n", + "const directory = \"your/directory/here\";\n", + "await vectorStore.save(directory);\n", + "\n", + "// Load the vector store from the same directory\n", + "const loadedVectorStore = await HNSWLib.load(directory, new OpenAIEmbeddings());\n", + "\n", + "// vectorStore and loadedVectorStore are identical\n", + "await loadedVectorStore.similaritySearch(\"hello world\", 1);" + ] + }, + { + "cell_type": "markdown", + "id": "22f0d74f", + "metadata": {}, + "source": [ + "### Delete a saved index\n", + "\n", + "You can use the `.delete` method to clear an index saved to a given directory:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "daabbffd", + "metadata": {}, + "outputs": [], + "source": [ + "// Load the vector store from the same directory\n", + "const savedVectorStore = await HNSWLib.load(directory, new OpenAIEmbeddings());\n", + "\n", + "await savedVectorStore.delete({ directory });" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `HNSWLib` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_hnswlib.HNSWLib.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "069f1b5f", - "metadata": {}, - "source": [ - "## Save to/load from file\n", - "\n", - "HNSWLib supports saving your index to a file, then reloading it at a later date:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f71ce986", - "metadata": {}, - "outputs": [], - "source": [ - "// Save the vector store to a directory\n", - "const directory = \"your/directory/here\";\n", - "await vectorStore.save(directory);\n", - "\n", - "// Load the vector store from the same directory\n", - "const loadedVectorStore = await HNSWLib.load(directory, new OpenAIEmbeddings());\n", - "\n", - "// vectorStore and loadedVectorStore are identical\n", - "await loadedVectorStore.similaritySearch(\"hello world\", 1);" - ] - }, - { - "cell_type": "markdown", - "id": "22f0d74f", - "metadata": {}, - "source": [ - "### Delete a saved index\n", - "\n", - "You can use the `.delete` method to clear an index saved to a given directory:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "daabbffd", - "metadata": {}, - "outputs": [], - "source": [ - "// Load the vector store from the same directory\n", - "const savedVectorStore = await HNSWLib.load(directory, new OpenAIEmbeddings());\n", - "\n", - "await savedVectorStore.delete({ directory });" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `HNSWLib` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_hnswlib.HNSWLib.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/index.mdx b/docs/core_docs/docs/integrations/vectorstores/index.mdx index eb93d1f64953..f6c9a6f5a194 100644 --- a/docs/core_docs/docs/integrations/vectorstores/index.mdx +++ b/docs/core_docs/docs/integrations/vectorstores/index.mdx @@ -5,7 +5,7 @@ sidebar_class_name: hidden # Vector stores -A [vector store](/docs/concepts/#vectorstores) stores [embedded](/docs/concepts/#embedding-models) data and performs similarity search. +A [vector store](/docs/concepts/#vectorstores) stores [embedded](/docs/concepts/embedding_models) data and performs similarity search. LangChain.js integrates with a variety of vector stores. You can check out a full list below: diff --git a/docs/core_docs/docs/integrations/vectorstores/memory.ipynb b/docs/core_docs/docs/integrations/vectorstores/memory.ipynb index df44dbbfeb20..c38d1a03e118 100644 --- a/docs/core_docs/docs/integrations/vectorstores/memory.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/memory.ipynb @@ -1,362 +1,362 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: In-memory\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# MemoryVectorStore\n", - "\n", - "LangChain offers is an in-memory, ephemeral vectorstore that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. The default similarity metric is cosine similarity, but can be changed to any of the similarity metrics supported by [ml-distance](https://mljs.github.io/distance/modules/similarity.html).\n", - "\n", - "As it is intended for demos, it does not yet support ids or deletion.\n", - "\n", - "This guide provides a quick overview for getting started with in-memory [`vector stores`](/docs/concepts/#vectorstores). For detailed documentation of all `MemoryVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "70d54e56", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | PY support | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`MemoryVectorStore`](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html) | [`langchain`](https://www.npmjs.com/package/langchain) | ❌ | ![NPM - Version](https://img.shields.io/npm/v/langchain?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use in-memory vector stores, you'll need to install the `langchain` package:\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " langchain @langchain/openai @langchain/core\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "There are no required credentials to use in-memory vector stores.\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const vectorStore = new MemoryVectorStore(embeddings);" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const documents = [document1, document2, document3];\n", - "\n", - "await vectorStore.addDocuments(documents);" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = (doc) => doc.metadata.source === \"https://example.com\";\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter)\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "The filter is optional, and must be a predicate function that takes a document as input, and returns `true` or `false` depending on whether the document should be returned.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: In-memory\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.165] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.148] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# MemoryVectorStore\n", + "\n", + "LangChain offers is an in-memory, ephemeral vectorstore that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. The default similarity metric is cosine similarity, but can be changed to any of the similarity metrics supported by [ml-distance](https://mljs.github.io/distance/modules/similarity.html).\n", + "\n", + "As it is intended for demos, it does not yet support ids or deletion.\n", + "\n", + "This guide provides a quick overview for getting started with in-memory [`vector stores`](/docs/concepts/#vectorstores). For detailed documentation of all `MemoryVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "423d779a", - "metadata": {}, - "source": [ - "### Maximal marginal relevance\n", - "\n", - "This vector store also supports maximal marginal relevance (MMR), a technique that first fetches a larger number of results (given by `searchKwargs.fetchK`), with classic similarity search, then reranks for diversity and returns the top `k` results. This helps guard against redundant information:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "56817a1c", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "70d54e56", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | PY support | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`MemoryVectorStore`](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html) | [`langchain`](https://www.npmjs.com/package/langchain) | ❌ | ![NPM - Version](https://img.shields.io/npm/v/langchain?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use in-memory vector stores, you'll need to install the `langchain` package:\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " langchain @langchain/openai @langchain/core\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "There are no required credentials to use in-memory vector stores.\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const vectorStore = new MemoryVectorStore(embeddings);" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const documents = [document1, document2, document3];\n", + "\n", + "await vectorStore.addDocuments(documents);" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = (doc) => doc.metadata.source === \"https://example.com\";\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter)\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "The filter is optional, and must be a predicate function that takes a document as input, and returns `true` or `false` depending on whether the document should be returned.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Buildings are made out of brick',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 4, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.165] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.148] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "423d779a", + "metadata": {}, + "source": [ + "### Maximal marginal relevance\n", + "\n", + "This vector store also supports maximal marginal relevance (MMR), a technique that first fetches a larger number of results (given by `searchKwargs.fetchK`), with classic similarity search, then reranks for diversity and returns the top `k` results. This helps guard against redundant information:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "56817a1c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Buildings are made out of brick',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const mmrRetriever = vectorStore.asRetriever({\n", + " searchType: \"mmr\",\n", + " searchKwargs: {\n", + " fetchK: 10,\n", + " },\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "\n", + "await mmrRetriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `MemoryVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const mmrRetriever = vectorStore.asRetriever({\n", - " searchType: \"mmr\",\n", - " searchKwargs: {\n", - " fetchK: 10,\n", - " },\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "\n", - "await mmrRetriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `MemoryVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain.vectorstores_memory.MemoryVectorStore.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.ipynb b/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.ipynb index df112e73991b..e84e4a8bf31d 100644 --- a/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/mongodb_atlas.ipynb @@ -1,497 +1,497 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: MongoDB Atlas\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# MongoDB Atlas\n", - "\n", - "```{=mdx}\n", - ":::tip Compatibility\n", - "Only available on Node.js.\n", - "\n", - "You can still create API routes that use MongoDB with Next.js by setting the `runtime` variable to `nodejs` like so:\n", - "\n", - "`export const runtime = \"nodejs\";`\n", - "\n", - "You can read more about Edge runtimes in the Next.js documentation [here](https://nextjs.org/docs/app/building-your-application/rendering/edge-and-nodejs-runtimes).\n", - ":::\n", - "```\n", - "\n", - "This guide provides a quick overview for getting started with MongoDB Atlas [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `MongoDBAtlasVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_mongodb.MongoDBAtlasVectorSearch.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`MongoDBAtlasVectorSearch`](https://api.js.langchain.com/classes/langchain_mongodb.MongoDBAtlasVectorSearch.html) | [`@langchain/mongodb`](https://www.npmjs.com/package/@langchain/mongodb) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mongodb?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use MongoDB Atlas vector stores, you'll need to configure a MongoDB Atlas cluster and install the `@langchain/mongodb` integration package.\n", - "\n", - "### Initial Cluster Configuration\n", - "\n", - "To create a MongoDB Atlas cluster, navigate to the [MongoDB Atlas website](https://www.mongodb.com/products/platform/atlas-database) and create an account if you don't already have one.\n", - "\n", - "Create and name a cluster when prompted, then find it under `Database`. Select `Browse Collections` and create either a blank collection or one from the provided sample data.\n", - "\n", - "**Note:** The cluster created must be MongoDB 7.0 or higher.\n", - "\n", - "### Creating an Index\n", - "\n", - "After configuring your cluster, you'll need to create an index on the collection field you want to search over.\n", - "\n", - "Switch to the `Atlas Search` tab and click `Create Search Index`. From there, make sure you select `Atlas Vector Search - JSON Editor`, then select the appropriate database and collection and paste the following into the textbox:\n", - "\n", - "```json\n", - "{\n", - " \"fields\": [\n", - " {\n", - " \"numDimensions\": 1536,\n", - " \"path\": \"embedding\",\n", - " \"similarity\": \"euclidean\",\n", - " \"type\": \"vector\"\n", - " }\n", - " ]\n", - "}\n", - "```\n", - "\n", - "Note that the dimensions property should match the dimensionality of the embeddings you are using. For example, Cohere embeddings have 1024 dimensions, and by default OpenAI embeddings have 1536:\n", - "\n", - "Note: By default the vector store expects an index name of default, an indexed collection field name of embedding, and a raw text field name of text. You should initialize the vector store with field names matching your index name collection schema as shown below.\n", - "\n", - "Finally, proceed to build the index.\n", - "\n", - "### Embeddings\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "### Installation\n", - "\n", - "Install the following packages:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/mongodb mongodb @langchain/openai @langchain/core\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "Once you've done the above, set the `MONGODB_ATLAS_URI` environment variable from the `Connect` button in Mongo's dashboard. You'll also need your DB name and collection name:\n", - "\n", - "```typescript\n", - "process.env.MONGODB_ATLAS_URI = \"your-atlas-url\";\n", - "process.env.MONGODB_ATLAS_COLLECTION_NAME = \"your-atlas-db-name\";\n", - "process.env.MONGODB_ATLAS_DB_NAME = \"your-atlas-db-name\";\n", - "```\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Once you've set up your cluster as shown above, you can initialize your vector store as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { MongoDBAtlasVectorSearch } from \"@langchain/mongodb\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { MongoClient } from \"mongodb\";\n", - "\n", - "const client = new MongoClient(process.env.MONGODB_ATLAS_URI || \"\");\n", - "const collection = client.db(process.env.MONGODB_ATLAS_DB_NAME)\n", - " .collection(process.env.MONGODB_ATLAS_COLLECTION_NAME);\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const vectorStore = new MongoDBAtlasVectorSearch(embeddings, {\n", - " collection: collection,\n", - " indexName: \"vector_index\", // The name of the Atlas search index. Defaults to \"default\"\n", - " textKey: \"text\", // The name of the collection field containing the raw content. Defaults to \"text\"\n", - " embeddingKey: \"embedding\", // The name of the collection field containing the embedded text. Defaults to \"embedding\"\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store\n", - "\n", - "You can now add documents to your vector store:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ '1', '2', '3', '4' ]\n" - ] - } - ], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "**Note:** After adding documents, there is a slight delay before they become queryable.\n", - "\n", - "Adding a document with the same `id` as an existing document will update the existing one.\n", - "\n", - "### Delete items from vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [\"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: MongoDB Atlas\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"_id\":\"1\",\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"_id\":\"3\",\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "### Filtering\n", - "\n", - "MongoDB Atlas supports pre-filtering of results on other fields. They require you to define which metadata fields you plan to filter on by updating the index you created initially. Here's an example:\n", - "\n", - "```json\n", - "{\n", - " \"fields\": [\n", - " {\n", - " \"numDimensions\": 1024,\n", - " \"path\": \"embedding\",\n", - " \"similarity\": \"euclidean\",\n", - " \"type\": \"vector\"\n", - " },\n", - " {\n", - " \"path\": \"source\",\n", - " \"type\": \"filter\"\n", - " }\n", - " ]\n", - "}\n", - "```\n", - "\n", - "Above, the first item in `fields` is the vector index, and the second item is the metadata property you want to filter on. The name of the property is the value of the `path` key. So the above index would allow us to search on a metadata field named `source`.\n", - "\n", - "Then, in your code you can use [MQL Query Operators](https://www.mongodb.com/docs/manual/reference/operator/query/) for filtering.\n", - "\n", - "The below example illustrates this:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "bc8f242e", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# MongoDB Atlas\n", + "\n", + "```{=mdx}\n", + ":::tip Compatibility\n", + "Only available on Node.js.\n", + "\n", + "You can still create API routes that use MongoDB with Next.js by setting the `runtime` variable to `nodejs` like so:\n", + "\n", + "`export const runtime = \"nodejs\";`\n", + "\n", + "You can read more about Edge runtimes in the Next.js documentation [here](https://nextjs.org/docs/app/building-your-application/rendering/edge-and-nodejs-runtimes).\n", + ":::\n", + "```\n", + "\n", + "This guide provides a quick overview for getting started with MongoDB Atlas [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `MongoDBAtlasVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_mongodb.MongoDBAtlasVectorSearch.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"_id\":\"1\",\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"_id\":\"3\",\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = {\n", - " preFilter: {\n", - " source: {\n", - " $eq: \"https://example.com\",\n", - " },\n", - " },\n", - "}\n", - "\n", - "const filteredResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of filteredResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "69326bba", - "metadata": {}, - "source": [ - "### Returning scores\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`MongoDBAtlasVectorSearch`](https://api.js.langchain.com/classes/langchain_mongodb.MongoDBAtlasVectorSearch.html) | [`@langchain/mongodb`](https://www.npmjs.com/package/@langchain/mongodb) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mongodb?style=flat-square&label=%20&) |" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.374] The powerhouse of the cell is the mitochondria [{\"_id\":\"1\",\"source\":\"https://example.com\"}]\n", - "* [SIM=0.370] Mitochondria are made out of lipids [{\"_id\":\"3\",\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use MongoDB Atlas vector stores, you'll need to configure a MongoDB Atlas cluster and install the `@langchain/mongodb` integration package.\n", + "\n", + "### Initial Cluster Configuration\n", + "\n", + "To create a MongoDB Atlas cluster, navigate to the [MongoDB Atlas website](https://www.mongodb.com/products/platform/atlas-database) and create an account if you don't already have one.\n", + "\n", + "Create and name a cluster when prompted, then find it under `Database`. Select `Browse Collections` and create either a blank collection or one from the provided sample data.\n", + "\n", + "**Note:** The cluster created must be MongoDB 7.0 or higher.\n", + "\n", + "### Creating an Index\n", + "\n", + "After configuring your cluster, you'll need to create an index on the collection field you want to search over.\n", + "\n", + "Switch to the `Atlas Search` tab and click `Create Search Index`. From there, make sure you select `Atlas Vector Search - JSON Editor`, then select the appropriate database and collection and paste the following into the textbox:\n", + "\n", + "```json\n", + "{\n", + " \"fields\": [\n", + " {\n", + " \"numDimensions\": 1536,\n", + " \"path\": \"embedding\",\n", + " \"similarity\": \"euclidean\",\n", + " \"type\": \"vector\"\n", + " }\n", + " ]\n", + "}\n", + "```\n", + "\n", + "Note that the dimensions property should match the dimensionality of the embeddings you are using. For example, Cohere embeddings have 1024 dimensions, and by default OpenAI embeddings have 1536:\n", + "\n", + "Note: By default the vector store expects an index name of default, an indexed collection field name of embedding, and a raw text field name of text. You should initialize the vector store with field names matching your index name collection schema as shown below.\n", + "\n", + "Finally, proceed to build the index.\n", + "\n", + "### Embeddings\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "### Installation\n", + "\n", + "Install the following packages:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/mongodb mongodb @langchain/openai @langchain/core\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Once you've done the above, set the `MONGODB_ATLAS_URI` environment variable from the `Connect` button in Mongo's dashboard. You'll also need your DB name and collection name:\n", + "\n", + "```typescript\n", + "process.env.MONGODB_ATLAS_URI = \"your-atlas-url\";\n", + "process.env.MONGODB_ATLAS_COLLECTION_NAME = \"your-atlas-db-name\";\n", + "process.env.MONGODB_ATLAS_DB_NAME = \"your-atlas-db-name\";\n", + "```\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { _id: '1', source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { _id: '3', source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Once you've set up your cluster as shown above, you can initialize your vector store as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { MongoDBAtlasVectorSearch } from \"@langchain/mongodb\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { MongoClient } from \"mongodb\";\n", + "\n", + "const client = new MongoClient(process.env.MONGODB_ATLAS_URI || \"\");\n", + "const collection = client.db(process.env.MONGODB_ATLAS_DB_NAME)\n", + " .collection(process.env.MONGODB_ATLAS_COLLECTION_NAME);\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const vectorStore = new MongoDBAtlasVectorSearch(embeddings, {\n", + " collection: collection,\n", + " indexName: \"vector_index\", // The name of the Atlas search index. Defaults to \"default\"\n", + " textKey: \"text\", // The name of the collection field containing the raw content. Defaults to \"text\"\n", + " embeddingKey: \"embedding\", // The name of the collection field containing the embedded text. Defaults to \"embedding\"\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store\n", + "\n", + "You can now add documents to your vector store:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ '1', '2', '3', '4' ]\n" + ] + } + ], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "**Note:** After adding documents, there is a slight delay before they become queryable.\n", + "\n", + "Adding a document with the same `id` as an existing document will update the existing one.\n", + "\n", + "### Delete items from vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [\"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"_id\":\"1\",\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"_id\":\"3\",\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "### Filtering\n", + "\n", + "MongoDB Atlas supports pre-filtering of results on other fields. They require you to define which metadata fields you plan to filter on by updating the index you created initially. Here's an example:\n", + "\n", + "```json\n", + "{\n", + " \"fields\": [\n", + " {\n", + " \"numDimensions\": 1024,\n", + " \"path\": \"embedding\",\n", + " \"similarity\": \"euclidean\",\n", + " \"type\": \"vector\"\n", + " },\n", + " {\n", + " \"path\": \"source\",\n", + " \"type\": \"filter\"\n", + " }\n", + " ]\n", + "}\n", + "```\n", + "\n", + "Above, the first item in `fields` is the vector index, and the second item is the metadata property you want to filter on. The name of the property is the value of the `path` key. So the above index would allow us to search on a metadata field named `source`.\n", + "\n", + "Then, in your code you can use [MQL Query Operators](https://www.mongodb.com/docs/manual/reference/operator/query/) for filtering.\n", + "\n", + "The below example illustrates this:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "bc8f242e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"_id\":\"1\",\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"_id\":\"3\",\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = {\n", + " preFilter: {\n", + " source: {\n", + " $eq: \"https://example.com\",\n", + " },\n", + " },\n", + "}\n", + "\n", + "const filteredResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of filteredResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "69326bba", + "metadata": {}, + "source": [ + "### Returning scores\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.374] The powerhouse of the cell is the mitochondria [{\"_id\":\"1\",\"source\":\"https://example.com\"}]\n", + "* [SIM=0.370] Mitochondria are made out of lipids [{\"_id\":\"3\",\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { _id: '1', source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { _id: '3', source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "069f1b5f", + "metadata": {}, + "source": [ + "## Closing connections\n", + "\n", + "Make sure you close the client instance when you are finished to avoid excessive resource consumption:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "f71ce986", + "metadata": {}, + "outputs": [], + "source": [ + "await client.close();" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `MongoDBAtlasVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_mongodb.MongoDBAtlasVectorSearch.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "069f1b5f", - "metadata": {}, - "source": [ - "## Closing connections\n", - "\n", - "Make sure you close the client instance when you are finished to avoid excessive resource consumption:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "f71ce986", - "metadata": {}, - "outputs": [], - "source": [ - "await client.close();" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `MongoDBAtlasVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_mongodb.MongoDBAtlasVectorSearch.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/pgvector.ipynb b/docs/core_docs/docs/integrations/vectorstores/pgvector.ipynb index 702f45a3eda8..9d86461dac56 100644 --- a/docs/core_docs/docs/integrations/vectorstores/pgvector.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/pgvector.ipynb @@ -1,629 +1,629 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: PGVector\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# PGVectorStore\n", - "\n", - "```{=mdx}\n", - ":::tip Compatibility\n", - "Only available on Node.js.\n", - ":::\n", - "```\n", - "\n", - "To enable vector search in generic PostgreSQL databases, LangChain.js supports using the [`pgvector`](https://github.com/pgvector/pgvector) Postgres extension.\n", - "\n", - "This guide provides a quick overview for getting started with PGVector [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `PGVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_pgvector.PGVectorStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/pgvector/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`PGVectorStore`](https://api.js.langchain.com/classes/langchain_community_vectorstores_pgvector.PGVectorStore.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use PGVector vector stores, you'll need to set up a Postgres instance with the [`pgvector`](https://github.com/pgvector/pgvector) extension enabled. You'll also need to install the `@langchain/community` integration package with the [`pg`](https://www.npmjs.com/package/pg) package as a peer dependency.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "We'll also use the [`uuid`](https://www.npmjs.com/package/uuid) package to generate ids in the required format.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/openai @langchain/core pg uuid\n", - "\n", - "```\n", - "\n", - "### Setting up an instance\n", - "\n", - "There are many ways to connect to Postgres depending on how you've set up your instance. Here's one example of a local setup using a prebuilt Docker image provided by the `pgvector` team.\n", - "\n", - "Create a file with the below content named docker-compose.yml:\n", - "\n", - "```yaml\n", - "# Run this command to start the database:\n", - "# docker-compose up --build\n", - "version: \"3\"\n", - "services:\n", - " db:\n", - " hostname: 127.0.0.1\n", - " image: pgvector/pgvector:pg16\n", - " ports:\n", - " - 5432:5432\n", - " restart: always\n", - " environment:\n", - " - POSTGRES_DB=api\n", - " - POSTGRES_USER=myuser\n", - " - POSTGRES_PASSWORD=ChangeMe\n", - " volumes:\n", - " - ./init.sql:/docker-entrypoint-initdb.d/init.sql\n", - "```\n", - "\n", - "And then in the same directory, run docker compose up to start the container.\n", - "\n", - "You can find more information on how to setup pgvector in the [official repository](https://github.com/pgvector/pgvector/).\n", - "\n", - "### Credentials\n", - "\n", - "To connect to you Postgres instance, you'll need corresponding credentials. For a full list of supported options, see the [`node-postgres` docs](https://node-postgres.com/apis/client).\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "To instantiate the vector store, call the `.initialize()` static method. This will automatically check for the presence of a table, given by `tableName` in the passed `config`. If it is not there, it will create it with the required columns.\n", - "\n", - "```{=mdx}\n", - "\n", - "::::danger Security\n", - "User-generated data such as usernames should not be used as input for table and column names. \n", - "**This may lead to SQL Injection!**\n", - "::::\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import {\n", - " PGVectorStore,\n", - " DistanceStrategy,\n", - "} from \"@langchain/community/vectorstores/pgvector\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { PoolConfig } from \"pg\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "// Sample config\n", - "const config = {\n", - " postgresConnectionOptions: {\n", - " type: \"postgres\",\n", - " host: \"127.0.0.1\",\n", - " port: 5433,\n", - " user: \"myuser\",\n", - " password: \"ChangeMe\",\n", - " database: \"api\",\n", - " } as PoolConfig,\n", - " tableName: \"testlangchainjs\",\n", - " columns: {\n", - " idColumnName: \"id\",\n", - " vectorColumnName: \"vector\",\n", - " contentColumnName: \"content\",\n", - " metadataColumnName: \"metadata\",\n", - " },\n", - " // supported distance strategies: cosine (default), innerProduct, or euclidean\n", - " distanceStrategy: \"cosine\" as DistanceStrategy,\n", - "};\n", - "\n", - "const vectorStore = await PGVectorStore.initialize(\n", - " embeddings,\n", - " config\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [], - "source": [ - "import { v4 as uuidv4 } from \"uuid\";\n", - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "const ids = [uuidv4(), uuidv4(), uuidv4(), uuidv4()]\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: ids });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "### Delete items from vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "const id4 = ids[ids.length - 1];\n", - "\n", - "await vectorStore.delete({ ids: [id4] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = { source: \"https://example.com\" };\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "The above filter syntax supports exact match, but the following are also supported:\n", - "\n", - "#### Using the `in` operator\n", - "\n", - "```json\n", - "{\n", - " \"field\": {\n", - " \"in\": [\"value1\", \"value2\"],\n", - " }\n", - "}\n", - "```\n", - "\n", - "#### Using the `arrayContains` operator\n", - "\n", - "```json\n", - "{\n", - " \"field\": {\n", - " \"arrayContains\": [\"value1\", \"value2\"],\n", - " }\n", - "}\n", - "```\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: PGVector\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.852] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# PGVectorStore\n", + "\n", + "```{=mdx}\n", + ":::tip Compatibility\n", + "Only available on Node.js.\n", + ":::\n", + "```\n", + "\n", + "To enable vector search in generic PostgreSQL databases, LangChain.js supports using the [`pgvector`](https://github.com/pgvector/pgvector) Postgres extension.\n", + "\n", + "This guide provides a quick overview for getting started with PGVector [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `PGVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_pgvector.PGVectorStore.html)." + ] + }, + { + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/pgvector/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`PGVectorStore`](https://api.js.langchain.com/classes/langchain_community_vectorstores_pgvector.PGVectorStore.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use PGVector vector stores, you'll need to set up a Postgres instance with the [`pgvector`](https://github.com/pgvector/pgvector) extension enabled. You'll also need to install the `@langchain/community` integration package with the [`pg`](https://www.npmjs.com/package/pg) package as a peer dependency.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "We'll also use the [`uuid`](https://www.npmjs.com/package/uuid) package to generate ids in the required format.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/openai @langchain/core pg uuid\n", + "\n", + "```\n", + "\n", + "### Setting up an instance\n", + "\n", + "There are many ways to connect to Postgres depending on how you've set up your instance. Here's one example of a local setup using a prebuilt Docker image provided by the `pgvector` team.\n", + "\n", + "Create a file with the below content named docker-compose.yml:\n", + "\n", + "```yaml\n", + "# Run this command to start the database:\n", + "# docker-compose up --build\n", + "version: \"3\"\n", + "services:\n", + " db:\n", + " hostname: 127.0.0.1\n", + " image: pgvector/pgvector:pg16\n", + " ports:\n", + " - 5432:5432\n", + " restart: always\n", + " environment:\n", + " - POSTGRES_DB=api\n", + " - POSTGRES_USER=myuser\n", + " - POSTGRES_PASSWORD=ChangeMe\n", + " volumes:\n", + " - ./init.sql:/docker-entrypoint-initdb.d/init.sql\n", + "```\n", + "\n", + "And then in the same directory, run docker compose up to start the container.\n", + "\n", + "You can find more information on how to setup pgvector in the [official repository](https://github.com/pgvector/pgvector/).\n", + "\n", + "### Credentials\n", + "\n", + "To connect to you Postgres instance, you'll need corresponding credentials. For a full list of supported options, see the [`node-postgres` docs](https://node-postgres.com/apis/client).\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "To instantiate the vector store, call the `.initialize()` static method. This will automatically check for the presence of a table, given by `tableName` in the passed `config`. If it is not there, it will create it with the required columns.\n", + "\n", + "```{=mdx}\n", + "\n", + "::::danger Security\n", + "User-generated data such as usernames should not be used as input for table and column names. \n", + "**This may lead to SQL Injection!**\n", + "::::\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import {\n", + " PGVectorStore,\n", + " DistanceStrategy,\n", + "} from \"@langchain/community/vectorstores/pgvector\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { PoolConfig } from \"pg\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "// Sample config\n", + "const config = {\n", + " postgresConnectionOptions: {\n", + " type: \"postgres\",\n", + " host: \"127.0.0.1\",\n", + " port: 5433,\n", + " user: \"myuser\",\n", + " password: \"ChangeMe\",\n", + " database: \"api\",\n", + " } as PoolConfig,\n", + " tableName: \"testlangchainjs\",\n", + " columns: {\n", + " idColumnName: \"id\",\n", + " vectorColumnName: \"vector\",\n", + " contentColumnName: \"content\",\n", + " metadataColumnName: \"metadata\",\n", + " },\n", + " // supported distance strategies: cosine (default), innerProduct, or euclidean\n", + " distanceStrategy: \"cosine\" as DistanceStrategy,\n", + "};\n", + "\n", + "const vectorStore = await PGVectorStore.initialize(\n", + " embeddings,\n", + " config\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [], + "source": [ + "import { v4 as uuidv4 } from \"uuid\";\n", + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "const ids = [uuidv4(), uuidv4(), uuidv4(), uuidv4()]\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: ids });" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "### Delete items from vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "const id4 = ids[ids.length - 1];\n", + "\n", + "await vectorStore.delete({ ids: [id4] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = { source: \"https://example.com\" };\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "The above filter syntax supports exact match, but the following are also supported:\n", + "\n", + "#### Using the `in` operator\n", + "\n", + "```json\n", + "{\n", + " \"field\": {\n", + " \"in\": [\"value1\", \"value2\"],\n", + " }\n", + "}\n", + "```\n", + "\n", + "#### Using the `arrayContains` operator\n", + "\n", + "```json\n", + "{\n", + " \"field\": {\n", + " \"arrayContains\": [\"value1\", \"value2\"],\n", + " }\n", + "}\n", + "```\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.852] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "371727a8", + "metadata": {}, + "source": [ + "## Advanced: reusing connections\n", + "\n", + "You can reuse connections by creating a pool, then creating new `PGVectorStore` instances directly via the constructor.\n", + "\n", + "Note that you should call `.initialize()` to set up your database at least once to set up your tables properly before using the constructor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "09efeac4", + "metadata": {}, + "outputs": [], + "source": [ + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { PGVectorStore } from \"@langchain/community/vectorstores/pgvector\";\n", + "import pg from \"pg\";\n", + "\n", + "// First, follow set-up instructions at\n", + "// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/pgvector\n", + "\n", + "const reusablePool = new pg.Pool({\n", + " host: \"127.0.0.1\",\n", + " port: 5433,\n", + " user: \"myuser\",\n", + " password: \"ChangeMe\",\n", + " database: \"api\",\n", + "});\n", + "\n", + "const originalConfig = {\n", + " pool: reusablePool,\n", + " tableName: \"testlangchainjs\",\n", + " collectionName: \"sample\",\n", + " collectionTableName: \"collections\",\n", + " columns: {\n", + " idColumnName: \"id\",\n", + " vectorColumnName: \"vector\",\n", + " contentColumnName: \"content\",\n", + " metadataColumnName: \"metadata\",\n", + " },\n", + "};\n", + "\n", + "// Set up the DB.\n", + "// Can skip this step if you've already initialized the DB.\n", + "// await PGVectorStore.initialize(new OpenAIEmbeddings(), originalConfig);\n", + "const pgvectorStore = new PGVectorStore(new OpenAIEmbeddings(), originalConfig);\n", + "\n", + "await pgvectorStore.addDocuments([\n", + " { pageContent: \"what's this\", metadata: { a: 2 } },\n", + " { pageContent: \"Cat drinks milk\", metadata: { a: 1 } },\n", + "]);\n", + "\n", + "const results = await pgvectorStore.similaritySearch(\"water\", 1);\n", + "\n", + "console.log(results);\n", + "\n", + "/*\n", + " [ Document { pageContent: 'Cat drinks milk', metadata: { a: 1 } } ]\n", + "*/\n", + "\n", + "const pgvectorStore2 = new PGVectorStore(new OpenAIEmbeddings(), {\n", + " pool: reusablePool,\n", + " tableName: \"testlangchainjs\",\n", + " collectionTableName: \"collections\",\n", + " collectionName: \"some_other_collection\",\n", + " columns: {\n", + " idColumnName: \"id\",\n", + " vectorColumnName: \"vector\",\n", + " contentColumnName: \"content\",\n", + " metadataColumnName: \"metadata\",\n", + " },\n", + "});\n", + "\n", + "const results2 = await pgvectorStore2.similaritySearch(\"water\", 1);\n", + "\n", + "console.log(results2);\n", + "\n", + "/*\n", + " []\n", + "*/\n", + "\n", + "await reusablePool.end();" + ] + }, + { + "cell_type": "markdown", + "id": "23bd7096", + "metadata": {}, + "source": [ + "## Create HNSW Index\n", + "\n", + "By default, the extension performs a sequential scan search, with 100% recall. You might consider creating an HNSW index for approximate nearest neighbor (ANN) search to speed up `similaritySearchVectorWithScore` execution time. To create the HNSW index on your vector column, use the `createHnswIndex()` method.\n", + "\n", + "The method parameters include:\n", + "\n", + "- `dimensions`: Defines the number of dimensions in your vector data type, up to 2000. For example, use 1536 for OpenAI's text-embedding-ada-002 and Amazon's amazon.titan-embed-text-v1 models.\n", + "\n", + "- `m?`: The max number of connections per layer (16 by default). Index build time improves with smaller values, while higher values can speed up search queries.\n", + "\n", + "- `efConstruction?`: The size of the dynamic candidate list for constructing the graph (64 by default). A higher value can potentially improve the index quality at the cost of index build time.\n", + "\n", + "- `distanceFunction?`: The distance function name you want to use, is automatically selected based on the distanceStrategy.\n", + "\n", + "For more info, see the [Pgvector GitHub repo](https://github.com/pgvector/pgvector?tab=readme-ov-file#hnsw) and the [HNSW paper from Malkov Yu A. and Yashunin D. A.. 2020. Efficient and robust approximate nearest neighbor search using hierarchical navigable small world graphs](https://arxiv.org/pdf/1603.09320)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5e5b9595", + "metadata": {}, + "outputs": [], + "source": [ + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import {\n", + " DistanceStrategy,\n", + " PGVectorStore,\n", + "} from \"@langchain/community/vectorstores/pgvector\";\n", + "import { PoolConfig } from \"pg\";\n", + "\n", + "// First, follow set-up instructions at\n", + "// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/pgvector\n", + "\n", + "const hnswConfig = {\n", + " postgresConnectionOptions: {\n", + " type: \"postgres\",\n", + " host: \"127.0.0.1\",\n", + " port: 5433,\n", + " user: \"myuser\",\n", + " password: \"ChangeMe\",\n", + " database: \"api\",\n", + " } as PoolConfig,\n", + " tableName: \"testlangchainjs\",\n", + " columns: {\n", + " idColumnName: \"id\",\n", + " vectorColumnName: \"vector\",\n", + " contentColumnName: \"content\",\n", + " metadataColumnName: \"metadata\",\n", + " },\n", + " // supported distance strategies: cosine (default), innerProduct, or euclidean\n", + " distanceStrategy: \"cosine\" as DistanceStrategy,\n", + "};\n", + "\n", + "const hnswPgVectorStore = await PGVectorStore.initialize(\n", + " new OpenAIEmbeddings(),\n", + " hnswConfig\n", + ");\n", + "\n", + "// create the index\n", + "await hnswPgVectorStore.createHnswIndex({\n", + " dimensions: 1536,\n", + " efConstruction: 64,\n", + " m: 16,\n", + "});\n", + "\n", + "await hnswPgVectorStore.addDocuments([\n", + " { pageContent: \"what's this\", metadata: { a: 2, b: [\"tag1\", \"tag2\"] } },\n", + " { pageContent: \"Cat drinks milk\", metadata: { a: 1, b: [\"tag2\"] } },\n", + "]);\n", + "\n", + "const model = new OpenAIEmbeddings();\n", + "const query = await model.embedQuery(\"water\");\n", + "const hnswResults = await hnswPgVectorStore.similaritySearchVectorWithScore(query, 1);\n", + "\n", + "console.log(hnswResults);\n", + "\n", + "await pgvectorStore.end();" + ] + }, + { + "cell_type": "markdown", + "id": "069f1b5f", + "metadata": {}, + "source": [ + "## Closing connections\n", + "\n", + "Make sure you close the connection when you are finished to avoid excessive resource consumption:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f71ce986", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.end();" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `PGVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_pgvector.PGVectorStore.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "371727a8", - "metadata": {}, - "source": [ - "## Advanced: reusing connections\n", - "\n", - "You can reuse connections by creating a pool, then creating new `PGVectorStore` instances directly via the constructor.\n", - "\n", - "Note that you should call `.initialize()` to set up your database at least once to set up your tables properly before using the constructor." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "09efeac4", - "metadata": {}, - "outputs": [], - "source": [ - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { PGVectorStore } from \"@langchain/community/vectorstores/pgvector\";\n", - "import pg from \"pg\";\n", - "\n", - "// First, follow set-up instructions at\n", - "// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/pgvector\n", - "\n", - "const reusablePool = new pg.Pool({\n", - " host: \"127.0.0.1\",\n", - " port: 5433,\n", - " user: \"myuser\",\n", - " password: \"ChangeMe\",\n", - " database: \"api\",\n", - "});\n", - "\n", - "const originalConfig = {\n", - " pool: reusablePool,\n", - " tableName: \"testlangchainjs\",\n", - " collectionName: \"sample\",\n", - " collectionTableName: \"collections\",\n", - " columns: {\n", - " idColumnName: \"id\",\n", - " vectorColumnName: \"vector\",\n", - " contentColumnName: \"content\",\n", - " metadataColumnName: \"metadata\",\n", - " },\n", - "};\n", - "\n", - "// Set up the DB.\n", - "// Can skip this step if you've already initialized the DB.\n", - "// await PGVectorStore.initialize(new OpenAIEmbeddings(), originalConfig);\n", - "const pgvectorStore = new PGVectorStore(new OpenAIEmbeddings(), originalConfig);\n", - "\n", - "await pgvectorStore.addDocuments([\n", - " { pageContent: \"what's this\", metadata: { a: 2 } },\n", - " { pageContent: \"Cat drinks milk\", metadata: { a: 1 } },\n", - "]);\n", - "\n", - "const results = await pgvectorStore.similaritySearch(\"water\", 1);\n", - "\n", - "console.log(results);\n", - "\n", - "/*\n", - " [ Document { pageContent: 'Cat drinks milk', metadata: { a: 1 } } ]\n", - "*/\n", - "\n", - "const pgvectorStore2 = new PGVectorStore(new OpenAIEmbeddings(), {\n", - " pool: reusablePool,\n", - " tableName: \"testlangchainjs\",\n", - " collectionTableName: \"collections\",\n", - " collectionName: \"some_other_collection\",\n", - " columns: {\n", - " idColumnName: \"id\",\n", - " vectorColumnName: \"vector\",\n", - " contentColumnName: \"content\",\n", - " metadataColumnName: \"metadata\",\n", - " },\n", - "});\n", - "\n", - "const results2 = await pgvectorStore2.similaritySearch(\"water\", 1);\n", - "\n", - "console.log(results2);\n", - "\n", - "/*\n", - " []\n", - "*/\n", - "\n", - "await reusablePool.end();" - ] - }, - { - "cell_type": "markdown", - "id": "23bd7096", - "metadata": {}, - "source": [ - "## Create HNSW Index\n", - "\n", - "By default, the extension performs a sequential scan search, with 100% recall. You might consider creating an HNSW index for approximate nearest neighbor (ANN) search to speed up `similaritySearchVectorWithScore` execution time. To create the HNSW index on your vector column, use the `createHnswIndex()` method.\n", - "\n", - "The method parameters include:\n", - "\n", - "- `dimensions`: Defines the number of dimensions in your vector data type, up to 2000. For example, use 1536 for OpenAI's text-embedding-ada-002 and Amazon's amazon.titan-embed-text-v1 models.\n", - "\n", - "- `m?`: The max number of connections per layer (16 by default). Index build time improves with smaller values, while higher values can speed up search queries.\n", - "\n", - "- `efConstruction?`: The size of the dynamic candidate list for constructing the graph (64 by default). A higher value can potentially improve the index quality at the cost of index build time.\n", - "\n", - "- `distanceFunction?`: The distance function name you want to use, is automatically selected based on the distanceStrategy.\n", - "\n", - "For more info, see the [Pgvector GitHub repo](https://github.com/pgvector/pgvector?tab=readme-ov-file#hnsw) and the [HNSW paper from Malkov Yu A. and Yashunin D. A.. 2020. Efficient and robust approximate nearest neighbor search using hierarchical navigable small world graphs](https://arxiv.org/pdf/1603.09320)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5e5b9595", - "metadata": {}, - "outputs": [], - "source": [ - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import {\n", - " DistanceStrategy,\n", - " PGVectorStore,\n", - "} from \"@langchain/community/vectorstores/pgvector\";\n", - "import { PoolConfig } from \"pg\";\n", - "\n", - "// First, follow set-up instructions at\n", - "// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/pgvector\n", - "\n", - "const hnswConfig = {\n", - " postgresConnectionOptions: {\n", - " type: \"postgres\",\n", - " host: \"127.0.0.1\",\n", - " port: 5433,\n", - " user: \"myuser\",\n", - " password: \"ChangeMe\",\n", - " database: \"api\",\n", - " } as PoolConfig,\n", - " tableName: \"testlangchainjs\",\n", - " columns: {\n", - " idColumnName: \"id\",\n", - " vectorColumnName: \"vector\",\n", - " contentColumnName: \"content\",\n", - " metadataColumnName: \"metadata\",\n", - " },\n", - " // supported distance strategies: cosine (default), innerProduct, or euclidean\n", - " distanceStrategy: \"cosine\" as DistanceStrategy,\n", - "};\n", - "\n", - "const hnswPgVectorStore = await PGVectorStore.initialize(\n", - " new OpenAIEmbeddings(),\n", - " hnswConfig\n", - ");\n", - "\n", - "// create the index\n", - "await hnswPgVectorStore.createHnswIndex({\n", - " dimensions: 1536,\n", - " efConstruction: 64,\n", - " m: 16,\n", - "});\n", - "\n", - "await hnswPgVectorStore.addDocuments([\n", - " { pageContent: \"what's this\", metadata: { a: 2, b: [\"tag1\", \"tag2\"] } },\n", - " { pageContent: \"Cat drinks milk\", metadata: { a: 1, b: [\"tag2\"] } },\n", - "]);\n", - "\n", - "const model = new OpenAIEmbeddings();\n", - "const query = await model.embedQuery(\"water\");\n", - "const hnswResults = await hnswPgVectorStore.similaritySearchVectorWithScore(query, 1);\n", - "\n", - "console.log(hnswResults);\n", - "\n", - "await pgvectorStore.end();" - ] - }, - { - "cell_type": "markdown", - "id": "069f1b5f", - "metadata": {}, - "source": [ - "## Closing connections\n", - "\n", - "Make sure you close the connection when you are finished to avoid excessive resource consumption:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f71ce986", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.end();" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `PGVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_pgvector.PGVectorStore.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/pinecone.ipynb b/docs/core_docs/docs/integrations/vectorstores/pinecone.ipynb index 16cae8ead6ca..d47880caebb0 100644 --- a/docs/core_docs/docs/integrations/vectorstores/pinecone.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/pinecone.ipynb @@ -1,366 +1,366 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Pinecone\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# PineconeStore\n", - "\n", - "[Pinecone](https://www.pinecone.io/) is a vector database that helps power AI for some of the world’s best companies.\n", - "\n", - "This guide provides a quick overview for getting started with Pinecone [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `PineconeStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_pinecone.PineconeStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/pinecone/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`PineconeStore`](https://api.js.langchain.com/classes/langchain_pinecone.PineconeStore.html) | [`@langchain/pinecone`](https://npmjs.com/@langchain/pinecone) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/pinecone?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Pinecone vector stores, you'll need to create a Pinecone account, initialize an index, and install the `@langchain/pinecone` integration package. You'll also want to install the [official Pinecone SDK](https://www.npmjs.com/package/@pinecone-database/pinecone) to initialize a client to pass into the `PineconeStore` instance.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/pinecone @langchain/openai @langchain/core @pinecone-database/pinecone \n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "Sign up for a [Pinecone](https://www.pinecone.io/) account and create an index. Make sure the dimensions match those of the embeddings you want to use (the default is 1536 for OpenAI's `text-embedding-3-small`). Once you've done this set the `PINECONE_INDEX`, `PINECONE_API_KEY`, and (optionally) `PINECONE_ENVIRONMENT` environment variables:\n", - "\n", - "```typescript\n", - "process.env.PINECONE_API_KEY = \"your-pinecone-api-key\";\n", - "process.env.PINECONE_INDEX = \"your-pinecone-index\";\n", - "\n", - "// Optional\n", - "process.env.PINECONE_ENVIRONMENT = \"your-pinecone-environment\";\n", - "```\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { PineconeStore } from \"@langchain/pinecone\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "import { Pinecone as PineconeClient } from \"@pinecone-database/pinecone\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const pinecone = new PineconeClient();\n", - "// Will automatically read the PINECONE_API_KEY and PINECONE_ENVIRONMENT env vars\n", - "const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);\n", - "\n", - "const vectorStore = await PineconeStore.fromExistingIndex(\n", - " embeddings,\n", - " {\n", - " pineconeIndex,\n", - " // Maximum number of batch requests to allow at once. Each batch is 1000 vectors.\n", - " maxConcurrency: 5,\n", - " // You can pass a namespace here too\n", - " // namespace: \"foo\",\n", - " }\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ '1', '2', '3', '4' ]\n" - ] - } - ], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "**Note:** After adding documents, there is a slight delay before they become queryable.\n", - "\n", - "### Delete items from vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [\"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Pinecone\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "// Optional filter\n", - "const filter = { source: \"https://example.com\" };\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# PineconeStore\n", + "\n", + "[Pinecone](https://www.pinecone.io/) is a vector database that helps power AI for some of the world’s best companies.\n", + "\n", + "This guide provides a quick overview for getting started with Pinecone [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `PineconeStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_pinecone.PineconeStore.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.165] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.148] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/pinecone/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`PineconeStore`](https://api.js.langchain.com/classes/langchain_pinecone.PineconeStore.html) | [`@langchain/pinecone`](https://npmjs.com/@langchain/pinecone) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/pinecone?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Pinecone vector stores, you'll need to create a Pinecone account, initialize an index, and install the `@langchain/pinecone` integration package. You'll also want to install the [official Pinecone SDK](https://www.npmjs.com/package/@pinecone-database/pinecone) to initialize a client to pass into the `PineconeStore` instance.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/pinecone @langchain/openai @langchain/core @pinecone-database/pinecone \n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Sign up for a [Pinecone](https://www.pinecone.io/) account and create an index. Make sure the dimensions match those of the embeddings you want to use (the default is 1536 for OpenAI's `text-embedding-3-small`). Once you've done this set the `PINECONE_INDEX`, `PINECONE_API_KEY`, and (optionally) `PINECONE_ENVIRONMENT` environment variables:\n", + "\n", + "```typescript\n", + "process.env.PINECONE_API_KEY = \"your-pinecone-api-key\";\n", + "process.env.PINECONE_INDEX = \"your-pinecone-index\";\n", + "\n", + "// Optional\n", + "process.env.PINECONE_ENVIRONMENT = \"your-pinecone-environment\";\n", + "```\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { PineconeStore } from \"@langchain/pinecone\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "import { Pinecone as PineconeClient } from \"@pinecone-database/pinecone\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const pinecone = new PineconeClient();\n", + "// Will automatically read the PINECONE_API_KEY and PINECONE_ENVIRONMENT env vars\n", + "const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);\n", + "\n", + "const vectorStore = await PineconeStore.fromExistingIndex(\n", + " embeddings,\n", + " {\n", + " pineconeIndex,\n", + " // Maximum number of batch requests to allow at once. Each batch is 1000 vectors.\n", + " maxConcurrency: 5,\n", + " // You can pass a namespace here too\n", + " // namespace: \"foo\",\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ '1', '2', '3', '4' ]\n" + ] + } + ], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "**Note:** After adding documents, there is a slight delay before they become queryable.\n", + "\n", + "### Delete items from vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [\"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 8, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "// Optional filter\n", + "const filter = { source: \"https://example.com\" };\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.165] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.148] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `PineconeStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_pinecone.PineconeStore.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `PineconeStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_pinecone.PineconeStore.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/qdrant.ipynb b/docs/core_docs/docs/integrations/vectorstores/qdrant.ipynb index 9ea4f1d4a646..ce91818a4ca0 100644 --- a/docs/core_docs/docs/integrations/vectorstores/qdrant.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/qdrant.ipynb @@ -1,344 +1,344 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Qdrant\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# QdrantVectorStore\n", - "\n", - "```{=mdx}\n", - ":::tip Compatibility\n", - "Only available on Node.js.\n", - ":::\n", - "```\n", - "\n", - "[Qdrant](https://qdrant.tech/) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload.\n", - "\n", - "This guide provides a quick overview for getting started with Qdrant [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `QdrantVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_qdrant.QdrantVectorStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/qdrant/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`QdrantVectorStore`](https://api.js.langchain.com/classes/langchain_qdrant.QdrantVectorStore.html) | [`@langchain/qdrant`](https://npmjs.com/@langchain/qdrant) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/qdrant?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Qdrant vector stores, you'll need to set up a Qdrant instance and install the `@langchain/qdrant` integration package.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/qdrant @langchain/core @langchain/openai\n", - "\n", - "```\n", - "\n", - "After installing the required dependencies, run a Qdrant instance with Docker on your computer by following the [Qdrant setup instructions](https://qdrant.tech/documentation/quickstart/). Note the URL your container runs on.\n", - "\n", - "### Credentials\n", - "\n", - "Once you've done this set a `QDRANT_URL` environment variable:\n", - "\n", - "```typescript\n", - "// e.g. http://localhost:6333\n", - "process.env.QDRANT_URL = \"your-qdrant-url\"\n", - "```\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { QdrantVectorStore } from \"@langchain/qdrant\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const vectorStore = await QdrantVectorStore.fromExistingCollection(embeddings, {\n", - " url: process.env.QDRANT_URL,\n", - " collectionName: \"langchainjs-testing\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "17f5efc0", - "metadata": {}, - "outputs": [], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents);" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "Top-level document ids and deletion are currently not supported." - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = {\n", - " \"must\": [\n", - " { \"key\": \"metadata.source\", \"match\": { \"value\": \"https://example.com\" } },\n", - " ]\n", - "};\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "See [this page](https://qdrant.tech/documentation/concepts/filtering/) for more on Qdrant filter syntax. Note that all values must be prefixed with `metadata.`\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Qdrant\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.165] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.148] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# QdrantVectorStore\n", + "\n", + "```{=mdx}\n", + ":::tip Compatibility\n", + "Only available on Node.js.\n", + ":::\n", + "```\n", + "\n", + "[Qdrant](https://qdrant.tech/) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload.\n", + "\n", + "This guide provides a quick overview for getting started with Qdrant [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `QdrantVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_qdrant.QdrantVectorStore.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/qdrant/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`QdrantVectorStore`](https://api.js.langchain.com/classes/langchain_qdrant.QdrantVectorStore.html) | [`@langchain/qdrant`](https://npmjs.com/@langchain/qdrant) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/qdrant?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Qdrant vector stores, you'll need to set up a Qdrant instance and install the `@langchain/qdrant` integration package.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/qdrant @langchain/core @langchain/openai\n", + "\n", + "```\n", + "\n", + "After installing the required dependencies, run a Qdrant instance with Docker on your computer by following the [Qdrant setup instructions](https://qdrant.tech/documentation/quickstart/). Note the URL your container runs on.\n", + "\n", + "### Credentials\n", + "\n", + "Once you've done this set a `QDRANT_URL` environment variable:\n", + "\n", + "```typescript\n", + "// e.g. http://localhost:6333\n", + "process.env.QDRANT_URL = \"your-qdrant-url\"\n", + "```\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { QdrantVectorStore } from \"@langchain/qdrant\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const vectorStore = await QdrantVectorStore.fromExistingCollection(embeddings, {\n", + " url: process.env.QDRANT_URL,\n", + " collectionName: \"langchainjs-testing\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "17f5efc0", + "metadata": {}, + "outputs": [], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents);" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "Top-level document ids and deletion are currently not supported." + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = {\n", + " \"must\": [\n", + " { \"key\": \"metadata.source\", \"match\": { \"value\": \"https://example.com\" } },\n", + " ]\n", + "};\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "See [this page](https://qdrant.tech/documentation/concepts/filtering/) for more on Qdrant filter syntax. Note that all values must be prefixed with `metadata.`\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.165] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.148] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `QdrantVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_qdrant.QdrantVectorStore.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `QdrantVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_qdrant.QdrantVectorStore.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/redis.ipynb b/docs/core_docs/docs/integrations/vectorstores/redis.ipynb index e31f74a9aa2e..3d6a038d8940 100644 --- a/docs/core_docs/docs/integrations/vectorstores/redis.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/redis.ipynb @@ -1,373 +1,373 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Redis\n", - "sidebar_class_name: node-only\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# RedisVectorStore\n", - "\n", - "```{=mdx}\n", - ":::tip Compatibility\n", - "Only available on Node.js.\n", - ":::\n", - "```\n", - "\n", - "[Redis](https://redis.io/) is a fast open source, in-memory data store. As part of the [Redis Stack](https://redis.io/docs/latest/operate/oss_and_stack/install/install-stack/), [RediSearch](https://redis.io/docs/latest/develop/interact/search-and-query/) is the module that enables vector similarity semantic search, as well as many other types of searching.\n", - "\n", - "This guide provides a quick overview for getting started with Redis [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `RedisVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_redis.RedisVectorStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/redis/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`RedisVectorStore`](https://api.js.langchain.com/classes/langchain_redis.RedisVectorStore.html) | [`@langchain/redis`](https://npmjs.com/@langchain/redis/) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/redis?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Redis vector stores, you'll need to set up a Redis instance and install the `@langchain/redis` integration package. You can also install the [`node-redis`](https://github.com/redis/node-redis) package to initialize the vector store with a specific client instance.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/redis @langchain/core redis @langchain/openai\n", - "\n", - "```\n", - "\n", - "You can set up a Redis instance locally with Docker by following [these instructions](https://redis.io/docs/latest/operate/oss_and_stack/install/install-stack/docker/#redisredis-stack).\n", - "\n", - "### Credentials\n", - "\n", - "Once you've set up an instance, set the `REDIS_URL` environment variable:\n", - "\n", - "```typescript\n", - "process.env.REDIS_URL = \"your-redis-url\"\n", - "```\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { RedisVectorStore } from \"@langchain/redis\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "import { createClient } from \"redis\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const client = createClient({\n", - " url: process.env.REDIS_URL ?? \"redis://localhost:6379\",\n", - "});\n", - "await client.connect();\n", - "\n", - "const vectorStore = new RedisVectorStore(embeddings, {\n", - " redisClient: client,\n", - " indexName: \"langchainjs-testing\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { type: \"example\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { type: \"example\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { type: \"example\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { type: \"example\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents);" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "Top-level document ids and deletion are currently not supported." - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [], - "source": [ - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "Filtering will currently look for any metadata key containing the provided string.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"type\":\"example\"}]\n", - "* [SIM=0.852] Mitochondria are made out of lipids [{\"type\":\"example\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Redis\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# RedisVectorStore\n", + "\n", + "```{=mdx}\n", + ":::tip Compatibility\n", + "Only available on Node.js.\n", + ":::\n", + "```\n", + "\n", + "[Redis](https://redis.io/) is a fast open source, in-memory data store. As part of the [Redis Stack](https://redis.io/docs/latest/operate/oss_and_stack/install/install-stack/), [RediSearch](https://redis.io/docs/latest/develop/interact/search-and-query/) is the module that enables vector similarity semantic search, as well as many other types of searching.\n", + "\n", + "This guide provides a quick overview for getting started with Redis [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `RedisVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_redis.RedisVectorStore.html)." + ] + }, + { + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/redis/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`RedisVectorStore`](https://api.js.langchain.com/classes/langchain_redis.RedisVectorStore.html) | [`@langchain/redis`](https://npmjs.com/@langchain/redis/) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/redis?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Redis vector stores, you'll need to set up a Redis instance and install the `@langchain/redis` integration package. You can also install the [`node-redis`](https://github.com/redis/node-redis) package to initialize the vector store with a specific client instance.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/redis @langchain/core redis @langchain/openai\n", + "\n", + "```\n", + "\n", + "You can set up a Redis instance locally with Docker by following [these instructions](https://redis.io/docs/latest/operate/oss_and_stack/install/install-stack/docker/#redisredis-stack).\n", + "\n", + "### Credentials\n", + "\n", + "Once you've set up an instance, set the `REDIS_URL` environment variable:\n", + "\n", + "```typescript\n", + "process.env.REDIS_URL = \"your-redis-url\"\n", + "```\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { RedisVectorStore } from \"@langchain/redis\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "import { createClient } from \"redis\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const client = createClient({\n", + " url: process.env.REDIS_URL ?? \"redis://localhost:6379\",\n", + "});\n", + "await client.connect();\n", + "\n", + "const vectorStore = new RedisVectorStore(embeddings, {\n", + " redisClient: client,\n", + " indexName: \"langchainjs-testing\",\n", + "});" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { type: 'example' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { type: 'example' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { type: \"example\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { type: \"example\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { type: \"example\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { type: \"example\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents);" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "Top-level document ids and deletion are currently not supported." + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [], + "source": [ + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "Filtering will currently look for any metadata key containing the provided string.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"type\":\"example\"}]\n", + "* [SIM=0.852] Mitochondria are made out of lipids [{\"type\":\"example\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { type: 'example' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { type: 'example' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "069f1b5f", + "metadata": {}, + "source": [ + "## Deleting an index\n", + "\n", + "You can delete an entire index with the following command:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f71ce986", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ deleteAll: true });" + ] + }, + { + "cell_type": "markdown", + "id": "bf2357b3", + "metadata": {}, + "source": [ + "## Closing connections\n", + "\n", + "Make sure you close the client connection when you are finished to avoid excessive resource consumption:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "48a98cba", + "metadata": {}, + "outputs": [], + "source": [ + "await client.disconnect();" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `RedisVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_redis.RedisVectorStore.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "069f1b5f", - "metadata": {}, - "source": [ - "## Deleting an index\n", - "\n", - "You can delete an entire index with the following command:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "f71ce986", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ deleteAll: true });" - ] - }, - { - "cell_type": "markdown", - "id": "bf2357b3", - "metadata": {}, - "source": [ - "## Closing connections\n", - "\n", - "Make sure you close the client connection when you are finished to avoid excessive resource consumption:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "48a98cba", - "metadata": {}, - "outputs": [], - "source": [ - "await client.disconnect();" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `RedisVectorSearch` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_redis.RedisVectorStore.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/supabase.ipynb b/docs/core_docs/docs/integrations/vectorstores/supabase.ipynb index 70a98dad4e46..6b6f49a09d02 100644 --- a/docs/core_docs/docs/integrations/vectorstores/supabase.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/supabase.ipynb @@ -1,440 +1,440 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Supabase\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# SupabaseVectorStore\n", - "\n", - "[Supabase](https://supabase.com/docs) is an open-source Firebase alternative. Supabase is built on top of PostgreSQL, which offers strong SQL querying capabilities and enables a simple interface with already-existing tools and frameworks.\n", - "\n", - "LangChain.js supports using a Supabase Postgres database as a vector store, using the [`pgvector`](https://github.com/pgvector/pgvector) extension. Refer to the [Supabase blog post](https://supabase.com/blog/openai-embeddings-postgres-vector) for more information.\n", - "\n", - "This guide provides a quick overview for getting started with Supabase [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `SupabaseVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_supabase.SupabaseVectorStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/supabase/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`SupabaseVectorStore`](https://api.js.langchain.com/classes/langchain_community_vectorstores_supabase.SupabaseVectorStore.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Supabase vector stores, you'll need to set up a Supabase database and install the `@langchain/community` integration package. You'll also need to install the official [`@supabase/supabase-js`](https://www.npmjs.com/package/@supabase/supabase-js) SDK as a peer dependency.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core @supabase/supabase-js @langchain/openai\n", - "\n", - "```\n", - "\n", - "Once you've created a database, run the following SQL to set up [`pgvector`](https://github.com/pgvector/pgvector) and create the necessary table and functions:\n", - "\n", - "```sql\n", - "-- Enable the pgvector extension to work with embedding vectors\n", - "create extension vector;\n", - "\n", - "-- Create a table to store your documents\n", - "create table documents (\n", - " id bigserial primary key,\n", - " content text, -- corresponds to Document.pageContent\n", - " metadata jsonb, -- corresponds to Document.metadata\n", - " embedding vector(1536) -- 1536 works for OpenAI embeddings, change if needed\n", - ");\n", - "\n", - "-- Create a function to search for documents\n", - "create function match_documents (\n", - " query_embedding vector(1536),\n", - " match_count int DEFAULT null,\n", - " filter jsonb DEFAULT '{}'\n", - ") returns table (\n", - " id bigint,\n", - " content text,\n", - " metadata jsonb,\n", - " embedding jsonb,\n", - " similarity float\n", - ")\n", - "language plpgsql\n", - "as $$\n", - "#variable_conflict use_column\n", - "begin\n", - " return query\n", - " select\n", - " id,\n", - " content,\n", - " metadata,\n", - " (embedding::text)::jsonb as embedding,\n", - " 1 - (documents.embedding <=> query_embedding) as similarity\n", - " from documents\n", - " where metadata @> filter\n", - " order by documents.embedding <=> query_embedding\n", - " limit match_count;\n", - "end;\n", - "$$;\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "Once you've done this set the `SUPABASE_PRIVATE_KEY` and `SUPABASE_URL` environment variables:\n", - "\n", - "```typescript\n", - "process.env.SUPABASE_PRIVATE_KEY = \"your-api-key\";\n", - "process.env.SUPABASE_URL = \"your-supabase-db-url\";\n", - "```\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { SupabaseVectorStore } from \"@langchain/community/vectorstores/supabase\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "import { createClient } from \"@supabase/supabase-js\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const supabaseClient = createClient(\n", - " process.env.SUPABASE_URL,\n", - " process.env.SUPABASE_PRIVATE_KEY\n", - ");\n", - "\n", - "const vectorStore = new SupabaseVectorStore(embeddings, {\n", - " client: supabaseClient,\n", - " tableName: \"documents\",\n", - " queryName: \"match_documents\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 1, 2, 3, 4 ]\n" - ] - } - ], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "### Delete items from vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [\"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Supabase\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = { source: \"https://example.com\" };\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# SupabaseVectorStore\n", + "\n", + "[Supabase](https://supabase.com/docs) is an open-source Firebase alternative. Supabase is built on top of PostgreSQL, which offers strong SQL querying capabilities and enables a simple interface with already-existing tools and frameworks.\n", + "\n", + "LangChain.js supports using a Supabase Postgres database as a vector store, using the [`pgvector`](https://github.com/pgvector/pgvector) extension. Refer to the [Supabase blog post](https://supabase.com/blog/openai-embeddings-postgres-vector) for more information.\n", + "\n", + "This guide provides a quick overview for getting started with Supabase [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `SupabaseVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_supabase.SupabaseVectorStore.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.165] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.148] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "180b0e66", - "metadata": {}, - "source": [ - "### Metadata Query Builder Filtering\n", - "\n", - "You can also use query builder-style filtering similar to how the [Supabase JavaScript library](https://supabase.com/docs/reference/javascript/using-filters) works instead of passing an object. Note that since most of the filter properties are in the metadata column, you need to use arrow operators (-> for integer or ->> for text) as defined in [Postgrest API documentation](https://postgrest.org/en/stable/references/api/tables_views.html#json-columns) and specify the data type of the property (e.g. the column should look something like `metadata->some_int_prop_name::int`)." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "e3287768", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/supabase/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`SupabaseVectorStore`](https://api.js.langchain.com/classes/langchain_community_vectorstores_supabase.SupabaseVectorStore.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "import { SupabaseFilterRPCCall } from \"@langchain/community/vectorstores/supabase\";\n", - "\n", - "const funcFilter: SupabaseFilterRPCCall = (rpc) =>\n", - " rpc.filter(\"metadata->>source\", \"eq\", \"https://example.com\");\n", - "\n", - "const funcFilterSearchResults = await vectorStore.similaritySearch(\"biology\", 2, funcFilter);\n", - "\n", - "for (const doc of funcFilterSearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Supabase vector stores, you'll need to set up a Supabase database and install the `@langchain/community` integration package. You'll also need to install the official [`@supabase/supabase-js`](https://www.npmjs.com/package/@supabase/supabase-js) SDK as a peer dependency.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core @supabase/supabase-js @langchain/openai\n", + "\n", + "```\n", + "\n", + "Once you've created a database, run the following SQL to set up [`pgvector`](https://github.com/pgvector/pgvector) and create the necessary table and functions:\n", + "\n", + "```sql\n", + "-- Enable the pgvector extension to work with embedding vectors\n", + "create extension vector;\n", + "\n", + "-- Create a table to store your documents\n", + "create table documents (\n", + " id bigserial primary key,\n", + " content text, -- corresponds to Document.pageContent\n", + " metadata jsonb, -- corresponds to Document.metadata\n", + " embedding vector(1536) -- 1536 works for OpenAI embeddings, change if needed\n", + ");\n", + "\n", + "-- Create a function to search for documents\n", + "create function match_documents (\n", + " query_embedding vector(1536),\n", + " match_count int DEFAULT null,\n", + " filter jsonb DEFAULT '{}'\n", + ") returns table (\n", + " id bigint,\n", + " content text,\n", + " metadata jsonb,\n", + " embedding jsonb,\n", + " similarity float\n", + ")\n", + "language plpgsql\n", + "as $$\n", + "#variable_conflict use_column\n", + "begin\n", + " return query\n", + " select\n", + " id,\n", + " content,\n", + " metadata,\n", + " (embedding::text)::jsonb as embedding,\n", + " 1 - (documents.embedding <=> query_embedding) as similarity\n", + " from documents\n", + " where metadata @> filter\n", + " order by documents.embedding <=> query_embedding\n", + " limit match_count;\n", + "end;\n", + "$$;\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Once you've done this set the `SUPABASE_PRIVATE_KEY` and `SUPABASE_URL` environment variables:\n", + "\n", + "```typescript\n", + "process.env.SUPABASE_PRIVATE_KEY = \"your-api-key\";\n", + "process.env.SUPABASE_URL = \"your-supabase-db-url\";\n", + "```\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { SupabaseVectorStore } from \"@langchain/community/vectorstores/supabase\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "import { createClient } from \"@supabase/supabase-js\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const supabaseClient = createClient(\n", + " process.env.SUPABASE_URL,\n", + " process.env.SUPABASE_PRIVATE_KEY\n", + ");\n", + "\n", + "const vectorStore = new SupabaseVectorStore(embeddings, {\n", + " client: supabaseClient,\n", + " tableName: \"documents\",\n", + " queryName: \"match_documents\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ 1, 2, 3, 4 ]\n" + ] + } + ], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "### Delete items from vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [\"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = { source: \"https://example.com\" };\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.165] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.148] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "180b0e66", + "metadata": {}, + "source": [ + "### Metadata Query Builder Filtering\n", + "\n", + "You can also use query builder-style filtering similar to how the [Supabase JavaScript library](https://supabase.com/docs/reference/javascript/using-filters) works instead of passing an object. Note that since most of the filter properties are in the metadata column, you need to use arrow operators (-> for integer or ->> for text) as defined in [Postgrest API documentation](https://postgrest.org/en/stable/references/api/tables_views.html#json-columns) and specify the data type of the property (e.g. the column should look something like `metadata->some_int_prop_name::int`)." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "e3287768", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "import { SupabaseFilterRPCCall } from \"@langchain/community/vectorstores/supabase\";\n", + "\n", + "const funcFilter: SupabaseFilterRPCCall = (rpc) =>\n", + " rpc.filter(\"metadata->>source\", \"eq\", \"https://example.com\");\n", + "\n", + "const funcFilterSearchResults = await vectorStore.similaritySearch(\"biology\", 2, funcFilter);\n", + "\n", + "for (const doc of funcFilterSearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `SupabaseVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_supabase.SupabaseVectorStore.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `SupabaseVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_supabase.SupabaseVectorStore.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/upstash.ipynb b/docs/core_docs/docs/integrations/vectorstores/upstash.ipynb index 03205093d5e4..66b0c7d4c264 100644 --- a/docs/core_docs/docs/integrations/vectorstores/upstash.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/upstash.ipynb @@ -1,363 +1,363 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Upstash Vector\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# UpstashVectorStore\n", - "\n", - "[Upstash Vector](https://upstash.com/) is a REST based serverless vector database, designed for working with vector embeddings.\n", - "\n", - "This guide provides a quick overview for getting started with Upstash [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `UpstashVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_upstash.UpstashVectorStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/upstash/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`UpstashVectorStore`](https://api.js.langchain.com/classes/langchain_community_vectorstores_upstash.UpstashVectorStore.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Upstash vector stores, you'll need to create an Upstash account, create an index, and install the `@langchain/community` integration package. You'll also need to install the [`@upstash/vector`](https://www.npmjs.com/package/@upstash/vector) package as a peer dependency.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/community @langchain/core @upstash/vector @langchain/openai\n", - "\n", - "```\n", - "\n", - "You can create an index from the [Upstash Console](https://console.upstash.com/login). For further reference, see [the official docs](https://upstash.com/docs/vector/overall/getstarted).\n", - "\n", - "### Credentials\n", - "\n", - "Once you've set up an index, set the following environment variables:\n", - "\n", - "```typescript\n", - "process.env.UPSTASH_VECTOR_REST_URL = \"your-rest-url\";\n", - "process.env.UPSTASH_VECTOR_REST_TOKEN = \"your-rest-token\";\n", - "```\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Make sure your index has the same dimension count as your embeddings. The default for OpenAI `text-embedding-3-small` is 1536." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { UpstashVectorStore } from \"@langchain/community/vectorstores/upstash\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "import { Index } from \"@upstash/vector\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const indexWithCredentials = new Index({\n", - " url: process.env.UPSTASH_VECTOR_REST_URL,\n", - " token: process.env.UPSTASH_VECTOR_REST_TOKEN,\n", - "});\n", - "\n", - "const vectorStore = new UpstashVectorStore(embeddings, {\n", - " index: indexWithCredentials,\n", - " // You can use namespaces to partition your data in an index\n", - " // namespace: \"test-namespace\",\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ '1', '2', '3', '4' ]\n" - ] - } - ], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "**Note:** After adding documents, there may be a slight delay before they become queryable.\n", - "\n", - "### Delete items from vector store" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [\"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Upstash Vector\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = \"source = 'https://example.com'\";\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "See [this page](https://upstash.com/docs/vector/features/filtering) for more on Upstash Vector filter syntax.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# UpstashVectorStore\n", + "\n", + "[Upstash Vector](https://upstash.com/) is a REST based serverless vector database, designed for working with vector embeddings.\n", + "\n", + "This guide provides a quick overview for getting started with Upstash [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `UpstashVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_upstash.UpstashVectorStore.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.576] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.557] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/upstash/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`UpstashVectorStore`](https://api.js.langchain.com/classes/langchain_community_vectorstores_upstash.UpstashVectorStore.html) | [`@langchain/community`](https://npmjs.com/@langchain/community) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Upstash vector stores, you'll need to create an Upstash account, create an index, and install the `@langchain/community` integration package. You'll also need to install the [`@upstash/vector`](https://www.npmjs.com/package/@upstash/vector) package as a peer dependency.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @langchain/core @upstash/vector @langchain/openai\n", + "\n", + "```\n", + "\n", + "You can create an index from the [Upstash Console](https://console.upstash.com/login). For further reference, see [the official docs](https://upstash.com/docs/vector/overall/getstarted).\n", + "\n", + "### Credentials\n", + "\n", + "Once you've set up an index, set the following environment variables:\n", + "\n", + "```typescript\n", + "process.env.UPSTASH_VECTOR_REST_URL = \"your-rest-url\";\n", + "process.env.UPSTASH_VECTOR_REST_TOKEN = \"your-rest-token\";\n", + "```\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Make sure your index has the same dimension count as your embeddings. The default for OpenAI `text-embedding-3-small` is 1536." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { UpstashVectorStore } from \"@langchain/community/vectorstores/upstash\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "import { Index } from \"@upstash/vector\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const indexWithCredentials = new Index({\n", + " url: process.env.UPSTASH_VECTOR_REST_URL,\n", + " token: process.env.UPSTASH_VECTOR_REST_TOKEN,\n", + "});\n", + "\n", + "const vectorStore = new UpstashVectorStore(embeddings, {\n", + " index: indexWithCredentials,\n", + " // You can use namespaces to partition your data in an index\n", + " // namespace: \"test-namespace\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ '1', '2', '3', '4' ]\n" + ] + } + ], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "**Note:** After adding documents, there may be a slight delay before they become queryable.\n", + "\n", + "### Delete items from vector store" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [\"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 7, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = \"source = 'https://example.com'\";\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "See [this page](https://upstash.com/docs/vector/features/filtering) for more on Upstash Vector filter syntax.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.576] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.557] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `UpstashVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_upstash.UpstashVectorStore.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `UpstashVectorStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_vectorstores_upstash.UpstashVectorStore.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/vectorstores/weaviate.ipynb b/docs/core_docs/docs/integrations/vectorstores/weaviate.ipynb index 7d12439565db..bcee4bf2b3dd 100644 --- a/docs/core_docs/docs/integrations/vectorstores/weaviate.ipynb +++ b/docs/core_docs/docs/integrations/vectorstores/weaviate.ipynb @@ -1,388 +1,388 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: Weaviate\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# WeaviateStore\n", - "\n", - "[Weaviate](https://weaviate.io/) is an open source vector database that stores both objects and vectors, allowing for combining vector search with structured filtering. LangChain connects to Weaviate via the weaviate-ts-client package, the official Typescript client for Weaviate.\n", - "\n", - "This guide provides a quick overview for getting started with Weaviate [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `WeaviateStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_weaviate.WeaviateStore.html)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/weaviate/) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`WeaviateStore`](https://api.js.langchain.com/classes/langchain_weaviate.WeaviateStore.html) | [`@langchain/weaviate`](https://npmjs.com/@langchain/weaviate) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/weaviate?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "To use Weaviate vector stores, you'll need to set up a Weaviate instance and install the `@langchain/weaviate` integration package. You should also install the `weaviate-ts-client` package to initialize a client to connect to your instance with, and the `uuid` package if you want to assign indexed documents ids.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " @langchain/weaviate @langchain/core weaviate-ts-client uuid @langchain/openai\n", - "\n", - "```\n", - "\n", - "You'll need to run Weaviate either locally or on a server. See [the Weaviate documentation](https://weaviate.io/developers/weaviate/installation) for more information.\n", - "\n", - "### Credentials\n", - "\n", - "Once you've set up your instance, set the following environment variables:\n", - "\n", - "```typescript\n", - "// http or https\n", - "process.env.WEAVIATE_SCHEME = \"\";\n", - "// If running locally, include port e.g. \"localhost:8080\"\n", - "process.env.WEAVIATE_HOST = \"YOUR_HOSTNAME\";\n", - "// Optional, for cloud deployments\n", - "process.env.WEAVIATE_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import { WeaviateStore } from \"@langchain/weaviate\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "import weaviate from \"weaviate-ts-client\";\n", - "// import { ApiKey } from \"weaviate-ts-client\"\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "// The Weaviate SDK has an issue with types\n", - "const weaviateClient = (weaviate as any).client({\n", - " scheme: process.env.WEAVIATE_SCHEME ?? \"http\",\n", - " host: process.env.WEAVIATE_HOST ?? \"localhost\",\n", - " // If necessary\n", - " // apiKey: new ApiKey(process.env.WEAVIATE_API_KEY ?? \"default\"),\n", - "});\n", - "\n", - "const vectorStore = new WeaviateStore(embeddings, {\n", - " client: weaviateClient,\n", - " // Must start with a capital letter\n", - " indexName: \"Langchainjs_test\",\n", - " // Default value\n", - " textKey: \"text\",\n", - " // Any keys you intend to set as metadata\n", - " metadataKeys: [\"source\"],\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store\n", - "\n", - "**Note:** If you want to associate ids with your indexed documents, they must be UUIDs." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "17f5efc0", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " '610f9b92-9bee-473f-a4db-8f2ca6e3442d',\n", - " '995160fa-441e-41a0-b476-cf3785518a0d',\n", - " '0cdbe6d4-0df8-4f99-9b67-184009fee9a2',\n", - " '18a8211c-0649-467b-a7c5-50ebb4b9ca9d'\n", - "]\n" - ] - } - ], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "import { v4 as uuidv4 } from \"uuid\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "const uuids = [uuidv4(), uuidv4(), uuidv4(), uuidv4()];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: uuids });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "### Delete items from vector store\n", - "\n", - "You can delete by id as by passing a `filter` param:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ef61e188", - "metadata": {}, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [uuids[3]] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "aa0a16fa", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Weaviate\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const filter = {\n", - " where: {\n", - " operator: \"Equal\" as const,\n", - " path: [\"source\"],\n", - " valueText: \"https://example.com\",\n", - " }\n", - "};\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "See [this page](https://weaviate.io/developers/weaviate/api/graphql/filters) for more on Weaviat filter syntax.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "5efd2eaa", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# WeaviateStore\n", + "\n", + "[Weaviate](https://weaviate.io/) is an open source vector database that stores both objects and vectors, allowing for combining vector search with structured filtering. LangChain connects to Weaviate via the weaviate-ts-client package, the official Typescript client for Weaviate.\n", + "\n", + "This guide provides a quick overview for getting started with Weaviate [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `WeaviateStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_weaviate.WeaviateStore.html)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", - "* [SIM=0.852] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" - ] - } - ], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. " - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "f3460093", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "| Class | Package | [PY support](https://python.langchain.com/docs/integrations/vectorstores/weaviate/) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`WeaviateStore`](https://api.js.langchain.com/classes/langchain_weaviate.WeaviateStore.html) | [`@langchain/weaviate`](https://npmjs.com/@langchain/weaviate) | ✅ | ![NPM - Version](https://img.shields.io/npm/v/@langchain/weaviate?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "To use Weaviate vector stores, you'll need to set up a Weaviate instance and install the `@langchain/weaviate` integration package. You should also install the `weaviate-ts-client` package to initialize a client to connect to your instance with, and the `uuid` package if you want to assign indexed documents ids.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/weaviate @langchain/core weaviate-ts-client uuid @langchain/openai\n", + "\n", + "```\n", + "\n", + "You'll need to run Weaviate either locally or on a server. See [the Weaviate documentation](https://weaviate.io/developers/weaviate/installation) for more information.\n", + "\n", + "### Credentials\n", + "\n", + "Once you've set up your instance, set the following environment variables:\n", + "\n", + "```typescript\n", + "// http or https\n", + "process.env.WEAVIATE_SCHEME = \"\";\n", + "// If running locally, include port e.g. \"localhost:8080\"\n", + "process.env.WEAVIATE_HOST = \"YOUR_HOSTNAME\";\n", + "// Optional, for cloud deployments\n", + "process.env.WEAVIATE_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import { WeaviateStore } from \"@langchain/weaviate\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "import weaviate from \"weaviate-ts-client\";\n", + "// import { ApiKey } from \"weaviate-ts-client\"\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "// The Weaviate SDK has an issue with types\n", + "const weaviateClient = (weaviate as any).client({\n", + " scheme: process.env.WEAVIATE_SCHEME ?? \"http\",\n", + " host: process.env.WEAVIATE_HOST ?? \"localhost\",\n", + " // If necessary\n", + " // apiKey: new ApiKey(process.env.WEAVIATE_API_KEY ?? \"default\"),\n", + "});\n", + "\n", + "const vectorStore = new WeaviateStore(embeddings, {\n", + " client: weaviateClient,\n", + " // Must start with a capital letter\n", + " indexName: \"Langchainjs_test\",\n", + " // Default value\n", + " textKey: \"text\",\n", + " // Any keys you intend to set as metadata\n", + " metadataKeys: [\"source\"],\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store\n", + "\n", + "**Note:** If you want to associate ids with your indexed documents, they must be UUIDs." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "17f5efc0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " '610f9b92-9bee-473f-a4db-8f2ca6e3442d',\n", + " '995160fa-441e-41a0-b476-cf3785518a0d',\n", + " '0cdbe6d4-0df8-4f99-9b67-184009fee9a2',\n", + " '18a8211c-0649-467b-a7c5-50ebb4b9ca9d'\n", + "]\n" + ] + } + ], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "import { v4 as uuidv4 } from \"uuid\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "const uuids = [uuidv4(), uuidv4(), uuidv4(), uuidv4()];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: uuids });" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "### Delete items from vector store\n", + "\n", + "You can delete by id as by passing a `filter` param:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ef61e188", + "metadata": {}, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [uuids[3]] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " Document {\n", - " pageContent: 'The powerhouse of the cell is the mitochondria',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Mitochondria are made out of lipids',\n", - " metadata: { source: 'https://example.com' },\n", - " id: undefined\n", - " }\n", - "]\n" - ] + "cell_type": "code", + "execution_count": 14, + "id": "aa0a16fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const filter = {\n", + " where: {\n", + " operator: \"Equal\" as const,\n", + " path: [\"source\"],\n", + " valueText: \"https://example.com\",\n", + " }\n", + "};\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "See [this page](https://weaviate.io/developers/weaviate/api/graphql/filters) for more on Weaviat filter syntax.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "5efd2eaa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* [SIM=0.835] The powerhouse of the cell is the mitochondria [{\"source\":\"https://example.com\"}]\n", + "* [SIM=0.852] Mitochondria are made out of lipids [{\"source\":\"https://example.com\"}]\n" + ] + } + ], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/retrievers) for easier usage in your chains. " + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "f3460093", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " Document {\n", + " pageContent: 'The powerhouse of the cell is the mitochondria',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Mitochondria are made out of lipids',\n", + " metadata: { source: 'https://example.com' },\n", + " id: undefined\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `WeaviateStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_weaviate.WeaviateStore.html)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `WeaviateStore` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_weaviate.WeaviateStore.html)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/introduction.mdx b/docs/core_docs/docs/introduction.mdx index 868895f490ae..d8a3df68ae83 100644 --- a/docs/core_docs/docs/introduction.mdx +++ b/docs/core_docs/docs/introduction.mdx @@ -8,8 +8,8 @@ sidebar_position: 0 LangChain simplifies every stage of the LLM application lifecycle: -- **Development**: Build your applications using LangChain's open-source [building blocks](/docs/concepts#langchain-expression-language), [components](/docs/concepts), and [third-party integrations](/docs/integrations/platforms/). - Use [LangGraph.js](/docs/concepts/#langgraphjs) to build stateful agents with first-class streaming and human-in-the-loop support. +- **Development**: Build your applications using LangChain's open-source [building blocks](/docs/concepts/lcel), [components](/docs/concepts), and [third-party integrations](/docs/integrations/platforms/). + Use [LangGraph.js](/docs/concepts/architecture#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support. - **Productionization**: Use [LangSmith](https://docs.smith.langchain.com/) to inspect, monitor and evaluate your chains, so that you can continuously optimize and deploy with confidence. - **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/). diff --git a/docs/core_docs/docs/troubleshooting/errors/INVALID_PROMPT_INPUT.mdx b/docs/core_docs/docs/troubleshooting/errors/INVALID_PROMPT_INPUT.mdx index 95d5c37b1517..b1884e0d9e63 100644 --- a/docs/core_docs/docs/troubleshooting/errors/INVALID_PROMPT_INPUT.mdx +++ b/docs/core_docs/docs/troubleshooting/errors/INVALID_PROMPT_INPUT.mdx @@ -1,6 +1,6 @@ # INVALID_PROMPT_INPUT -A [prompt template](/docs/concepts#prompt-templates) received missing or invalid input variables. +A [prompt template](/docs/concepts/prompt_templates) received missing or invalid input variables. One unexpected way this can occur is if you add a JSON object directly into a prompt template: @@ -51,7 +51,7 @@ The following may help resolve this error: - Double-check your prompt template to ensure that it is correct. - If you are using default formatting and you are using curly braces `{` anywhere in your template, they should be double escaped like this: `{{`, as shown above. -- If you are using a [`MessagesPlaceholder`](/docs/concepts/#messagesplaceholder), make sure that you are passing in an array of messages or message-like objects. +- If you are using a [`MessagesPlaceholder`](/docs/concepts/prompt_templates/#messagesplaceholder), make sure that you are passing in an array of messages or message-like objects. - If you are using shorthand tuples to declare your prompt template, make sure that the variable name is wrapped in curly braces (`["placeholder", "{messages}"]`). - Try viewing the inputs into your prompt template using [LangSmith](https://docs.smith.langchain.com/) or log statements to confirm they appear as expected. - If you are pulling a prompt from the [LangChain Prompt Hub](https://smith.langchain.com/prompts), try pulling and logging it or running it in isolation with a sample input to confirm that it is what you expect. diff --git a/docs/core_docs/docs/troubleshooting/errors/INVALID_TOOL_RESULTS.ipynb b/docs/core_docs/docs/troubleshooting/errors/INVALID_TOOL_RESULTS.ipynb index 4047de8f4de9..0047a07cfce1 100644 --- a/docs/core_docs/docs/troubleshooting/errors/INVALID_TOOL_RESULTS.ipynb +++ b/docs/core_docs/docs/troubleshooting/errors/INVALID_TOOL_RESULTS.ipynb @@ -1,448 +1,448 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# INVALID_TOOL_RESULTS\n", - "\n", - "You are passing too many, too few, or mismatched [`ToolMessages`](https://api.js.langchain.com/classes/_langchain_core.messages_tool.ToolMessage.html) to a model.\n", - "\n", - "When [using a model to call tools](/docs/concepts#functiontool-calling), the [`AIMessage`](https://api.js.langchain.com/classes/_langchain_core.messages.AIMessage.html)\n", - "the model responds with will contain a `tool_calls` array. To continue the flow, the next messages you pass back to the model must\n", - "be exactly one `ToolMessage` for each item in that array containing the result of that tool call. Each `ToolMessage` must have a `tool_call_id` field\n", - "that matches one of the `tool_calls` on the `AIMessage`.\n", - "\n", - "For example, given the following response from a model:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-AIgT1xUd6lkWAutThiiBsqjq7Ykj1\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_BknYpnY7xiARM17TPYqL7luj\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " },\n", - " {\n", - " \"id\": \"call_EHf8MIcTdsLCZcFVlcH4hxJw\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"promptTokens\": 42,\n", - " \"completionTokens\": 37,\n", - " \"totalTokens\": 79\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"usage\": {\n", - " \"prompt_tokens\": 42,\n", - " \"completion_tokens\": 37,\n", - " \"total_tokens\": 79,\n", - " \"prompt_tokens_details\": {\n", - " \"cached_tokens\": 0\n", - " },\n", - " \"completion_tokens_details\": {\n", - " \"reasoning_tokens\": 0\n", - " }\n", - " },\n", - " \"system_fingerprint\": \"fp_e2bde53e6e\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"foo\",\n", - " \"args\": {},\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_BknYpnY7xiARM17TPYqL7luj\"\n", - " },\n", - " {\n", - " \"name\": \"foo\",\n", - " \"args\": {},\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_EHf8MIcTdsLCZcFVlcH4hxJw\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"output_tokens\": 37,\n", - " \"input_tokens\": 42,\n", - " \"total_tokens\": 79,\n", - " \"input_token_details\": {\n", - " \"cache_read\": 0\n", - " },\n", - " \"output_token_details\": {\n", - " \"reasoning\": 0\n", - " }\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "import { z } from \"zod\";\n", - "import { tool } from \"@langchain/core/tools\";\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "import { BaseMessageLike } from \"@langchain/core/messages\";\n", - "\n", - "const model = new ChatOpenAI({\n", - " model: \"gpt-4o-mini\",\n", - "});\n", - "\n", - "const dummyTool = tool(\n", - " async () => {\n", - " return \"action complete!\";\n", - " },\n", - " {\n", - " name: \"foo\",\n", - " schema: z.object({}),\n", - " }\n", - ");\n", - "\n", - "const modelWithTools = model.bindTools([dummyTool]);\n", - "\n", - "const chatHistory: BaseMessageLike[] = [\n", - " {\n", - " role: \"user\",\n", - " content: `Call tool \"foo\" twice with no arguments`,\n", - " },\n", - "];\n", - "\n", - "const responseMessage = await modelWithTools.invoke(chatHistory);\n", - "\n", - "console.log(responseMessage);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Calling the model with only one tool response would result in an error:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# INVALID_TOOL_RESULTS\n", + "\n", + "You are passing too many, too few, or mismatched [`ToolMessages`](https://api.js.langchain.com/classes/_langchain_core.messages_tool.ToolMessage.html) to a model.\n", + "\n", + "When [using a model to call tools](/docs/concepts/tool_calling), the [`AIMessage`](https://api.js.langchain.com/classes/_langchain_core.messages.AIMessage.html)\n", + "the model responds with will contain a `tool_calls` array. To continue the flow, the next messages you pass back to the model must\n", + "be exactly one `ToolMessage` for each item in that array containing the result of that tool call. Each `ToolMessage` must have a `tool_call_id` field\n", + "that matches one of the `tool_calls` on the `AIMessage`.\n", + "\n", + "For example, given the following response from a model:" + ] + }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "BadRequestError: 400 An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_EHf8MIcTdsLCZcFVlcH4hxJw\n", - " at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)\n", - " at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)\n", - " at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)\n", - " at process.processTicksAndRejections (node:internal/process/task_queues:95:5)\n", - " at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29\n", - " at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {\n", - " status: 400,\n", - " headers: {\n", - " 'access-control-expose-headers': 'X-Request-ID',\n", - " 'alt-svc': 'h3=\":443\"; ma=86400',\n", - " 'cf-cache-status': 'DYNAMIC',\n", - " 'cf-ray': '8d31d4d95e2a0c96-EWR',\n", - " connection: 'keep-alive',\n", - " 'content-length': '315',\n", - " 'content-type': 'application/json',\n", - " date: 'Tue, 15 Oct 2024 18:21:53 GMT',\n", - " 'openai-organization': 'langchain',\n", - " 'openai-processing-ms': '16',\n", - " 'openai-version': '2020-10-01',\n", - " server: 'cloudflare',\n", - " 'set-cookie': '__cf_bm=e5.GX1bHiMVgr76YSvAKuECCGG7X_RXF0jDGSMXFGfU-1729016513-1.0.1.1-ZBYeVqX.M6jSNJB.wS696fEhX7V.es._M0WcWtQ9Qx8doEA5qMVKNE5iX6i7UKyPCg2GvDfM.MoDwRCXKMSkEA; path=/; expires=Tue, 15-Oct-24 18:51:53 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=J8gS08GodUA9hRTYuElen0YOCzMO3d4LW0ZT0k_kyj4-1729016513560-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',\n", - " 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',\n", - " 'x-content-type-options': 'nosniff',\n", - " 'x-ratelimit-limit-requests': '30000',\n", - " 'x-ratelimit-limit-tokens': '150000000',\n", - " 'x-ratelimit-remaining-requests': '29999',\n", - " 'x-ratelimit-remaining-tokens': '149999967',\n", - " 'x-ratelimit-reset-requests': '2ms',\n", - " 'x-ratelimit-reset-tokens': '0s',\n", - " 'x-request-id': 'req_f810058e7f047fafcb713575c4419161'\n", - " },\n", - " request_id: 'req_f810058e7f047fafcb713575c4419161',\n", - " error: {\n", - " message: \"An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_EHf8MIcTdsLCZcFVlcH4hxJw\",\n", - " type: 'invalid_request_error',\n", - " param: 'messages',\n", - " code: null\n", - " },\n", - " code: null,\n", - " param: 'messages',\n", - " type: 'invalid_request_error',\n", - " attemptNumber: 1,\n", - " retriesLeft: 6\n", - "}\n" - ] - } - ], - "source": [ - "const toolResponse1 = await dummyTool.invoke(responseMessage.tool_calls![0]);\n", - "\n", - "chatHistory.push(responseMessage);\n", - "chatHistory.push(toolResponse1);\n", - "\n", - "await modelWithTools.invoke(chatHistory);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we add a second response, the call will succeed as expected because we now have one tool response per tool call:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-AIgT1xUd6lkWAutThiiBsqjq7Ykj1\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_BknYpnY7xiARM17TPYqL7luj\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " },\n", + " {\n", + " \"id\": \"call_EHf8MIcTdsLCZcFVlcH4hxJw\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"promptTokens\": 42,\n", + " \"completionTokens\": 37,\n", + " \"totalTokens\": 79\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"usage\": {\n", + " \"prompt_tokens\": 42,\n", + " \"completion_tokens\": 37,\n", + " \"total_tokens\": 79,\n", + " \"prompt_tokens_details\": {\n", + " \"cached_tokens\": 0\n", + " },\n", + " \"completion_tokens_details\": {\n", + " \"reasoning_tokens\": 0\n", + " }\n", + " },\n", + " \"system_fingerprint\": \"fp_e2bde53e6e\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"foo\",\n", + " \"args\": {},\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_BknYpnY7xiARM17TPYqL7luj\"\n", + " },\n", + " {\n", + " \"name\": \"foo\",\n", + " \"args\": {},\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_EHf8MIcTdsLCZcFVlcH4hxJw\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"output_tokens\": 37,\n", + " \"input_tokens\": 42,\n", + " \"total_tokens\": 79,\n", + " \"input_token_details\": {\n", + " \"cache_read\": 0\n", + " },\n", + " \"output_token_details\": {\n", + " \"reasoning\": 0\n", + " }\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { BaseMessageLike } from \"@langchain/core/messages\";\n", + "\n", + "const model = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + "});\n", + "\n", + "const dummyTool = tool(\n", + " async () => {\n", + " return \"action complete!\";\n", + " },\n", + " {\n", + " name: \"foo\",\n", + " schema: z.object({}),\n", + " }\n", + ");\n", + "\n", + "const modelWithTools = model.bindTools([dummyTool]);\n", + "\n", + "const chatHistory: BaseMessageLike[] = [\n", + " {\n", + " role: \"user\",\n", + " content: `Call tool \"foo\" twice with no arguments`,\n", + " },\n", + "];\n", + "\n", + "const responseMessage = await modelWithTools.invoke(chatHistory);\n", + "\n", + "console.log(responseMessage);" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-AIgTPDBm1epnnLHx0tPFTgpsf8Ay6\",\n", - " \"content\": \"The tool \\\"foo\\\" was called twice, and both times returned the result: \\\"action complete!\\\".\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"promptTokens\": 98,\n", - " \"completionTokens\": 21,\n", - " \"totalTokens\": 119\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"usage\": {\n", - " \"prompt_tokens\": 98,\n", - " \"completion_tokens\": 21,\n", - " \"total_tokens\": 119,\n", - " \"prompt_tokens_details\": {\n", - " \"cached_tokens\": 0\n", - " },\n", - " \"completion_tokens_details\": {\n", - " \"reasoning_tokens\": 0\n", - " }\n", - " },\n", - " \"system_fingerprint\": \"fp_e2bde53e6e\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"output_tokens\": 21,\n", - " \"input_tokens\": 98,\n", - " \"total_tokens\": 119,\n", - " \"input_token_details\": {\n", - " \"cache_read\": 0\n", - " },\n", - " \"output_token_details\": {\n", - " \"reasoning\": 0\n", - " }\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const toolResponse2 = await dummyTool.invoke(responseMessage.tool_calls![1]);\n", - "\n", - "chatHistory.push(toolResponse2);\n", - "\n", - "await modelWithTools.invoke(chatHistory);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "But if we add a duplicate, extra tool response, the call will fail again:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Calling the model with only one tool response would result in an error:" + ] + }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "BadRequestError: 400 Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\n", - " at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)\n", - " at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)\n", - " at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)\n", - " at process.processTicksAndRejections (node:internal/process/task_queues:95:5)\n", - " at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29\n", - " at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {\n", - " status: 400,\n", - " headers: {\n", - " 'access-control-expose-headers': 'X-Request-ID',\n", - " 'alt-svc': 'h3=\":443\"; ma=86400',\n", - " 'cf-cache-status': 'DYNAMIC',\n", - " 'cf-ray': '8d31d57dff5e0f3b-EWR',\n", - " connection: 'keep-alive',\n", - " 'content-length': '233',\n", - " 'content-type': 'application/json',\n", - " date: 'Tue, 15 Oct 2024 18:22:19 GMT',\n", - " 'openai-organization': 'langchain',\n", - " 'openai-processing-ms': '36',\n", - " 'openai-version': '2020-10-01',\n", - " server: 'cloudflare',\n", - " 'set-cookie': '__cf_bm=QUsNlSGxVeIbscI0rm2YR3U9aUFLNxxqh1i_3aYBGN4-1729016539-1.0.1.1-sKRUvxHkQXvlb5LaqASkGtIwPMWUF5x9kF0ut8NLP6e0FVKEhdIEkEe6lYA1toW45JGTwp98xahaX7wt9CO4AA; path=/; expires=Tue, 15-Oct-24 18:52:19 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=J6fN8u8HUieCeyLDI59mi_0r_W0DgiO207wEtvrmT9Y-1729016539919-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',\n", - " 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',\n", - " 'x-content-type-options': 'nosniff',\n", - " 'x-ratelimit-limit-requests': '30000',\n", - " 'x-ratelimit-limit-tokens': '150000000',\n", - " 'x-ratelimit-remaining-requests': '29999',\n", - " 'x-ratelimit-remaining-tokens': '149999956',\n", - " 'x-ratelimit-reset-requests': '2ms',\n", - " 'x-ratelimit-reset-tokens': '0s',\n", - " 'x-request-id': 'req_aebfebbb9af2feaf2e9683948e431676'\n", - " },\n", - " request_id: 'req_aebfebbb9af2feaf2e9683948e431676',\n", - " error: {\n", - " message: \"Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\",\n", - " type: 'invalid_request_error',\n", - " param: 'messages.[4].role',\n", - " code: null\n", - " },\n", - " code: null,\n", - " param: 'messages.[4].role',\n", - " type: 'invalid_request_error',\n", - " attemptNumber: 1,\n", - " retriesLeft: 6\n", - "}\n" - ] - } - ], - "source": [ - "const duplicateToolResponse2 = await dummyTool.invoke(responseMessage.tool_calls![1]);\n", - "\n", - "chatHistory.push(duplicateToolResponse2);\n", - "\n", - "await modelWithTools.invoke(chatHistory);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You should additionally not pass `ToolMessages` back to to a model if they are not preceded by an `AIMessage` with tool calls. For example, this will fail:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "BadRequestError: 400 An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_EHf8MIcTdsLCZcFVlcH4hxJw\n", + " at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)\n", + " at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)\n", + " at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)\n", + " at process.processTicksAndRejections (node:internal/process/task_queues:95:5)\n", + " at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29\n", + " at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {\n", + " status: 400,\n", + " headers: {\n", + " 'access-control-expose-headers': 'X-Request-ID',\n", + " 'alt-svc': 'h3=\":443\"; ma=86400',\n", + " 'cf-cache-status': 'DYNAMIC',\n", + " 'cf-ray': '8d31d4d95e2a0c96-EWR',\n", + " connection: 'keep-alive',\n", + " 'content-length': '315',\n", + " 'content-type': 'application/json',\n", + " date: 'Tue, 15 Oct 2024 18:21:53 GMT',\n", + " 'openai-organization': 'langchain',\n", + " 'openai-processing-ms': '16',\n", + " 'openai-version': '2020-10-01',\n", + " server: 'cloudflare',\n", + " 'set-cookie': '__cf_bm=e5.GX1bHiMVgr76YSvAKuECCGG7X_RXF0jDGSMXFGfU-1729016513-1.0.1.1-ZBYeVqX.M6jSNJB.wS696fEhX7V.es._M0WcWtQ9Qx8doEA5qMVKNE5iX6i7UKyPCg2GvDfM.MoDwRCXKMSkEA; path=/; expires=Tue, 15-Oct-24 18:51:53 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=J8gS08GodUA9hRTYuElen0YOCzMO3d4LW0ZT0k_kyj4-1729016513560-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',\n", + " 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',\n", + " 'x-content-type-options': 'nosniff',\n", + " 'x-ratelimit-limit-requests': '30000',\n", + " 'x-ratelimit-limit-tokens': '150000000',\n", + " 'x-ratelimit-remaining-requests': '29999',\n", + " 'x-ratelimit-remaining-tokens': '149999967',\n", + " 'x-ratelimit-reset-requests': '2ms',\n", + " 'x-ratelimit-reset-tokens': '0s',\n", + " 'x-request-id': 'req_f810058e7f047fafcb713575c4419161'\n", + " },\n", + " request_id: 'req_f810058e7f047fafcb713575c4419161',\n", + " error: {\n", + " message: \"An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_EHf8MIcTdsLCZcFVlcH4hxJw\",\n", + " type: 'invalid_request_error',\n", + " param: 'messages',\n", + " code: null\n", + " },\n", + " code: null,\n", + " param: 'messages',\n", + " type: 'invalid_request_error',\n", + " attemptNumber: 1,\n", + " retriesLeft: 6\n", + "}\n" + ] + } + ], + "source": [ + "const toolResponse1 = await dummyTool.invoke(responseMessage.tool_calls![0]);\n", + "\n", + "chatHistory.push(responseMessage);\n", + "chatHistory.push(toolResponse1);\n", + "\n", + "await modelWithTools.invoke(chatHistory);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we add a second response, the call will succeed as expected because we now have one tool response per tool call:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-AIgTPDBm1epnnLHx0tPFTgpsf8Ay6\",\n", + " \"content\": \"The tool \\\"foo\\\" was called twice, and both times returned the result: \\\"action complete!\\\".\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"promptTokens\": 98,\n", + " \"completionTokens\": 21,\n", + " \"totalTokens\": 119\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"usage\": {\n", + " \"prompt_tokens\": 98,\n", + " \"completion_tokens\": 21,\n", + " \"total_tokens\": 119,\n", + " \"prompt_tokens_details\": {\n", + " \"cached_tokens\": 0\n", + " },\n", + " \"completion_tokens_details\": {\n", + " \"reasoning_tokens\": 0\n", + " }\n", + " },\n", + " \"system_fingerprint\": \"fp_e2bde53e6e\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"output_tokens\": 21,\n", + " \"input_tokens\": 98,\n", + " \"total_tokens\": 119,\n", + " \"input_token_details\": {\n", + " \"cache_read\": 0\n", + " },\n", + " \"output_token_details\": {\n", + " \"reasoning\": 0\n", + " }\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const toolResponse2 = await dummyTool.invoke(responseMessage.tool_calls![1]);\n", + "\n", + "chatHistory.push(toolResponse2);\n", + "\n", + "await modelWithTools.invoke(chatHistory);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But if we add a duplicate, extra tool response, the call will fail again:" + ] + }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "BadRequestError: 400 Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\n", - " at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)\n", - " at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)\n", - " at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)\n", - " at process.processTicksAndRejections (node:internal/process/task_queues:95:5)\n", - " at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29\n", - " at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {\n", - " status: 400,\n", - " headers: {\n", - " 'access-control-expose-headers': 'X-Request-ID',\n", - " 'alt-svc': 'h3=\":443\"; ma=86400',\n", - " 'cf-cache-status': 'DYNAMIC',\n", - " 'cf-ray': '8d31d5da7fba19aa-EWR',\n", - " connection: 'keep-alive',\n", - " 'content-length': '233',\n", - " 'content-type': 'application/json',\n", - " date: 'Tue, 15 Oct 2024 18:22:34 GMT',\n", - " 'openai-organization': 'langchain',\n", - " 'openai-processing-ms': '25',\n", - " 'openai-version': '2020-10-01',\n", - " server: 'cloudflare',\n", - " 'set-cookie': '__cf_bm=qK6.PWACr7IYuMafLpxumD4CrFnwHQiJn4TiGkrNTBk-1729016554-1.0.1.1-ECIk0cvh1wOfsK41a1Ce7npngsUDRRG93_yinP4.kVIWu1eX0CFG19iZ8yfGXedyPo6Wh1CKTGLk_3Qwrg.blA; path=/; expires=Tue, 15-Oct-24 18:52:34 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=IVTqysqHo4VUVJ.tVTcGg0rnXGWTbSSzX5mcUVrw8BU-1729016554732-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',\n", - " 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',\n", - " 'x-content-type-options': 'nosniff',\n", - " 'x-ratelimit-limit-requests': '30000',\n", - " 'x-ratelimit-limit-tokens': '150000000',\n", - " 'x-ratelimit-remaining-requests': '29999',\n", - " 'x-ratelimit-remaining-tokens': '149999978',\n", - " 'x-ratelimit-reset-requests': '2ms',\n", - " 'x-ratelimit-reset-tokens': '0s',\n", - " 'x-request-id': 'req_59339f8163ef5bd3f0308a212611dfea'\n", - " },\n", - " request_id: 'req_59339f8163ef5bd3f0308a212611dfea',\n", - " error: {\n", - " message: \"Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\",\n", - " type: 'invalid_request_error',\n", - " param: 'messages.[0].role',\n", - " code: null\n", - " },\n", - " code: null,\n", - " param: 'messages.[0].role',\n", - " type: 'invalid_request_error',\n", - " attemptNumber: 1,\n", - " retriesLeft: 6\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "BadRequestError: 400 Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\n", + " at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)\n", + " at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)\n", + " at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)\n", + " at process.processTicksAndRejections (node:internal/process/task_queues:95:5)\n", + " at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29\n", + " at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {\n", + " status: 400,\n", + " headers: {\n", + " 'access-control-expose-headers': 'X-Request-ID',\n", + " 'alt-svc': 'h3=\":443\"; ma=86400',\n", + " 'cf-cache-status': 'DYNAMIC',\n", + " 'cf-ray': '8d31d57dff5e0f3b-EWR',\n", + " connection: 'keep-alive',\n", + " 'content-length': '233',\n", + " 'content-type': 'application/json',\n", + " date: 'Tue, 15 Oct 2024 18:22:19 GMT',\n", + " 'openai-organization': 'langchain',\n", + " 'openai-processing-ms': '36',\n", + " 'openai-version': '2020-10-01',\n", + " server: 'cloudflare',\n", + " 'set-cookie': '__cf_bm=QUsNlSGxVeIbscI0rm2YR3U9aUFLNxxqh1i_3aYBGN4-1729016539-1.0.1.1-sKRUvxHkQXvlb5LaqASkGtIwPMWUF5x9kF0ut8NLP6e0FVKEhdIEkEe6lYA1toW45JGTwp98xahaX7wt9CO4AA; path=/; expires=Tue, 15-Oct-24 18:52:19 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=J6fN8u8HUieCeyLDI59mi_0r_W0DgiO207wEtvrmT9Y-1729016539919-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',\n", + " 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',\n", + " 'x-content-type-options': 'nosniff',\n", + " 'x-ratelimit-limit-requests': '30000',\n", + " 'x-ratelimit-limit-tokens': '150000000',\n", + " 'x-ratelimit-remaining-requests': '29999',\n", + " 'x-ratelimit-remaining-tokens': '149999956',\n", + " 'x-ratelimit-reset-requests': '2ms',\n", + " 'x-ratelimit-reset-tokens': '0s',\n", + " 'x-request-id': 'req_aebfebbb9af2feaf2e9683948e431676'\n", + " },\n", + " request_id: 'req_aebfebbb9af2feaf2e9683948e431676',\n", + " error: {\n", + " message: \"Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\",\n", + " type: 'invalid_request_error',\n", + " param: 'messages.[4].role',\n", + " code: null\n", + " },\n", + " code: null,\n", + " param: 'messages.[4].role',\n", + " type: 'invalid_request_error',\n", + " attemptNumber: 1,\n", + " retriesLeft: 6\n", + "}\n" + ] + } + ], + "source": [ + "const duplicateToolResponse2 = await dummyTool.invoke(responseMessage.tool_calls![1]);\n", + "\n", + "chatHistory.push(duplicateToolResponse2);\n", + "\n", + "await modelWithTools.invoke(chatHistory);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should additionally not pass `ToolMessages` back to to a model if they are not preceded by an `AIMessage` with tool calls. For example, this will fail:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "BadRequestError: 400 Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\n", + " at APIError.generate (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/error.js:45:20)\n", + " at OpenAI.makeStatusError (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:291:33)\n", + " at OpenAI.makeRequest (/Users/jacoblee/langchain/langchainjs/libs/langchain-openai/node_modules/openai/core.js:335:30)\n", + " at process.processTicksAndRejections (node:internal/process/task_queues:95:5)\n", + " at async /Users/jacoblee/langchain/langchainjs/libs/langchain-openai/dist/chat_models.cjs:1441:29\n", + " at async RetryOperation._fn (/Users/jacoblee/langchain/langchainjs/node_modules/p-retry/index.js:50:12) {\n", + " status: 400,\n", + " headers: {\n", + " 'access-control-expose-headers': 'X-Request-ID',\n", + " 'alt-svc': 'h3=\":443\"; ma=86400',\n", + " 'cf-cache-status': 'DYNAMIC',\n", + " 'cf-ray': '8d31d5da7fba19aa-EWR',\n", + " connection: 'keep-alive',\n", + " 'content-length': '233',\n", + " 'content-type': 'application/json',\n", + " date: 'Tue, 15 Oct 2024 18:22:34 GMT',\n", + " 'openai-organization': 'langchain',\n", + " 'openai-processing-ms': '25',\n", + " 'openai-version': '2020-10-01',\n", + " server: 'cloudflare',\n", + " 'set-cookie': '__cf_bm=qK6.PWACr7IYuMafLpxumD4CrFnwHQiJn4TiGkrNTBk-1729016554-1.0.1.1-ECIk0cvh1wOfsK41a1Ce7npngsUDRRG93_yinP4.kVIWu1eX0CFG19iZ8yfGXedyPo6Wh1CKTGLk_3Qwrg.blA; path=/; expires=Tue, 15-Oct-24 18:52:34 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=IVTqysqHo4VUVJ.tVTcGg0rnXGWTbSSzX5mcUVrw8BU-1729016554732-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None',\n", + " 'strict-transport-security': 'max-age=31536000; includeSubDomains; preload',\n", + " 'x-content-type-options': 'nosniff',\n", + " 'x-ratelimit-limit-requests': '30000',\n", + " 'x-ratelimit-limit-tokens': '150000000',\n", + " 'x-ratelimit-remaining-requests': '29999',\n", + " 'x-ratelimit-remaining-tokens': '149999978',\n", + " 'x-ratelimit-reset-requests': '2ms',\n", + " 'x-ratelimit-reset-tokens': '0s',\n", + " 'x-request-id': 'req_59339f8163ef5bd3f0308a212611dfea'\n", + " },\n", + " request_id: 'req_59339f8163ef5bd3f0308a212611dfea',\n", + " error: {\n", + " message: \"Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.\",\n", + " type: 'invalid_request_error',\n", + " param: 'messages.[0].role',\n", + " code: null\n", + " },\n", + " code: null,\n", + " param: 'messages.[0].role',\n", + " type: 'invalid_request_error',\n", + " attemptNumber: 1,\n", + " retriesLeft: 6\n", + "}\n" + ] + } + ], + "source": [ + "await modelWithTools.invoke([{\n", + " role: \"tool\",\n", + " content: \"action completed!\",\n", + " tool_call_id: \"dummy\",\n", + "}])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See [this guide](/docs/how_to/tool_results_pass_to_model/) for more details on tool calling.\n", + "\n", + "## Troubleshooting\n", + "\n", + "The following may help resolve this error:\n", + "\n", + "- If you are using a custom executor rather than a prebuilt one like LangGraph's [`ToolNode`](https://langchain-ai.github.io/langgraphjs/reference/classes/langgraph_prebuilt.ToolNode.html)\n", + " or the legacy LangChain [AgentExecutor](/docs/how_to/agent_executor), verify that you are invoking and returning the result for one tool per tool call.\n", + "- If you are using [few-shot tool call examples](/docs/how_to/tools_few_shot) with messages that you manually create, and you want to simulate a failure,\n", + " you still need to pass back a `ToolMessage` whose content indicates that failure.\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "await modelWithTools.invoke([{\n", - " role: \"tool\",\n", - " content: \"action completed!\",\n", - " tool_call_id: \"dummy\",\n", - "}])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "See [this guide](/docs/how_to/tool_results_pass_to_model/) for more details on tool calling.\n", - "\n", - "## Troubleshooting\n", - "\n", - "The following may help resolve this error:\n", - "\n", - "- If you are using a custom executor rather than a prebuilt one like LangGraph's [`ToolNode`](https://langchain-ai.github.io/langgraphjs/reference/classes/langgraph_prebuilt.ToolNode.html)\n", - " or the legacy LangChain [AgentExecutor](/docs/how_to/agent_executor), verify that you are invoking and returning the result for one tool per tool call.\n", - "- If you are using [few-shot tool call examples](/docs/how_to/tools_few_shot) with messages that you manually create, and you want to simulate a failure,\n", - " you still need to pass back a `ToolMessage` whose content indicates that failure.\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/troubleshooting/errors/MESSAGE_COERCION_FAILURE.mdx b/docs/core_docs/docs/troubleshooting/errors/MESSAGE_COERCION_FAILURE.mdx index 8ee5544bc201..ba8f75d65b2f 100644 --- a/docs/core_docs/docs/troubleshooting/errors/MESSAGE_COERCION_FAILURE.mdx +++ b/docs/core_docs/docs/troubleshooting/errors/MESSAGE_COERCION_FAILURE.mdx @@ -1,8 +1,8 @@ # MESSAGE_COERCION_FAILURE Several modules in LangChain take [`MessageLike`](https://api.js.langchain.com/types/_langchain_core.messages.BaseMessageLike.html) -objects in place of formal [`BaseMessage`](/docs/concepts#message-types) classes. These include OpenAI style message objects (`{ role: "user", content: "Hello world!" }`), -tuples, and plain strings (which are converted to [`HumanMessages`](/docs/concepts#humanmessage)). +objects in place of formal [`BaseMessage`](/docs/concepts/messages) classes. These include OpenAI style message objects (`{ role: "user", content: "Hello world!" }`), +tuples, and plain strings (which are converted to [`HumanMessages`](/docs/concepts/messages/#humanmessage)). If one of these modules receives a value outside of one of these formats, you will receive an error like the following: diff --git a/docs/core_docs/docs/troubleshooting/errors/OUTPUT_PARSING_FAILURE.mdx b/docs/core_docs/docs/troubleshooting/errors/OUTPUT_PARSING_FAILURE.mdx index f27147d080ee..42adc52a193b 100644 --- a/docs/core_docs/docs/troubleshooting/errors/OUTPUT_PARSING_FAILURE.mdx +++ b/docs/core_docs/docs/troubleshooting/errors/OUTPUT_PARSING_FAILURE.mdx @@ -1,6 +1,6 @@ # OUTPUT_PARSING_FAILURE -An [output parser](/docs/concepts#output-parsers) was unable to handle model output as expected. +An [output parser](/docs/concepts/output_parsers) was unable to handle model output as expected. To illustrate this, let's say you have an output parser that expects a chat model to output JSON surrounded by a markdown code tag (triple backticks). Here would be an example of good input: diff --git a/docs/core_docs/docs/tutorials/chatbot.ipynb b/docs/core_docs/docs/tutorials/chatbot.ipynb index 91490c7fa1d0..336bcf5ced5c 100644 --- a/docs/core_docs/docs/tutorials/chatbot.ipynb +++ b/docs/core_docs/docs/tutorials/chatbot.ipynb @@ -1,1149 +1,1149 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_position: 1\n", - "keywords: [conversationchain]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Build a Chatbot\n", - "\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat Models](/docs/concepts/#chat-models)\n", - "- [Prompt Templates](/docs/concepts/#prompt-templates)\n", - "- [Chat History](/docs/concepts/#chat-history)\n", - "\n", - "This guide requires `langgraph >= 0.2.28`.\n", - "\n", - ":::\n", - "\n", - "\n", - "```{=mdx}\n", - "\n", - ":::note\n", - "\n", - "This tutorial previously built a chatbot using [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/tutorials/chatbot/).\n", - "\n", - "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", - "\n", - ":::\n", - "\n", - "```\n", - "\n", - "## Overview\n", - "\n", - "We'll go over an example of how to design and implement an LLM-powered chatbot. \n", - "This chatbot will be able to have a conversation and remember previous interactions.\n", - "\n", - "\n", - "Note that this chatbot that we build will only use the language model to have a conversation.\n", - "There are several other related concepts that you may be looking for:\n", - "\n", - "- [Conversational RAG](/docs/tutorials/qa_chat_history): Enable a chatbot experience over an external source of data\n", - "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/): Build a chatbot that can take actions\n", - "\n", - "This tutorial will cover the basics which will be helpful for those two more advanced topics, but feel free to skip directly to there should you choose.\n", - "\n", - "## Setup\n", - "\n", - "### Jupyter Notebook\n", - "\n", - "This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n", - "\n", - "This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n", - "\n", - "### Installation\n", - "\n", - "For this tutorial we will need `@langchain/core` and `langgraph`:\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", - "\n", - "\n", - " @langchain/core @langchain/langgraph uuid\n", - "\n", - "```\n", - "\n", - "For more details, see our [Installation guide](/docs/how_to/installation).\n", - "\n", - "### LangSmith\n", - "\n", - "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n", - "As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n", - "The best way to do this is with [LangSmith](https://smith.langchain.com).\n", - "\n", - "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", - "\n", - "```typescript\n", - "process.env.LANGCHAIN_TRACING_V2 = \"true\"\n", - "process.env.LANGCHAIN_API_KEY = \"...\"\n", - "```\n", - "\n", - "## Quickstart\n", - "\n", - "First up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangeably - select the one you want to use below!\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```\n" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({ model: \"gpt-4o-mini\" })" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's first use the model directly. `ChatModel`s are instances of LangChain \"Runnables\", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method." - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXeSO4JQpxO96lj7iudUptJ6nfW\",\n", - " \"content\": \"Hi Bob! How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 10,\n", - " \"promptTokens\": 10,\n", - " \"totalTokens\": 20\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 10,\n", - " \"output_tokens\": 10,\n", - " \"total_tokens\": 20\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "await llm.invoke([{ role: \"user\", content: \"Hi im bob\" }])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The model on its own does not have any concept of state. For example, if you ask a followup question:" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_position: 1\n", + "keywords: [conversationchain]\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXe1Zih4gMe3XgotWL83xeWub2h\",\n", - " \"content\": \"I'm sorry, but I don't have access to personal information about individuals unless it has been shared with me during our conversation. If you'd like to tell me your name, feel free to do so!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 39,\n", - " \"promptTokens\": 10,\n", - " \"totalTokens\": 49\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 10,\n", - " \"output_tokens\": 39,\n", - " \"total_tokens\": 49\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "await llm.invoke([{ role: \"user\", content: \"Whats my name\" }])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's take a look at the example [LangSmith trace](https://smith.langchain.com/public/3b768e44-a319-453a-bd6e-30f9df75f16a/r)\n", - "\n", - "We can see that it doesn't take the previous conversation turn into context, and cannot answer the question.\n", - "This makes for a terrible chatbot experience!\n", - "\n", - "To get around this, we need to pass the entire conversation history into the model. Let's see what happens when we do that:" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Build a Chatbot\n", + "\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat Models](/docs/concepts/chat_models)\n", + "- [Prompt Templates](/docs/concepts/prompt_templates)\n", + "- [Chat History](/docs/concepts/chat_history)\n", + "\n", + "This guide requires `langgraph >= 0.2.28`.\n", + "\n", + ":::\n", + "\n", + "\n", + "```{=mdx}\n", + "\n", + ":::note\n", + "\n", + "This tutorial previously built a chatbot using [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html). You can access this version of the tutorial in the [v0.2 docs](https://js.langchain.com/v0.2/docs/tutorials/chatbot/).\n", + "\n", + "The LangGraph implementation offers a number of advantages over `RunnableWithMessageHistory`, including the ability to persist arbitrary components of an application's state (instead of only messages).\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "## Overview\n", + "\n", + "We'll go over an example of how to design and implement an LLM-powered chatbot. \n", + "This chatbot will be able to have a conversation and remember previous interactions.\n", + "\n", + "\n", + "Note that this chatbot that we build will only use the language model to have a conversation.\n", + "There are several other related concepts that you may be looking for:\n", + "\n", + "- [Conversational RAG](/docs/tutorials/qa_chat_history): Enable a chatbot experience over an external source of data\n", + "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/): Build a chatbot that can take actions\n", + "\n", + "This tutorial will cover the basics which will be helpful for those two more advanced topics, but feel free to skip directly to there should you choose.\n", + "\n", + "## Setup\n", + "\n", + "### Jupyter Notebook\n", + "\n", + "This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n", + "\n", + "This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n", + "\n", + "### Installation\n", + "\n", + "For this tutorial we will need `@langchain/core` and `langgraph`:\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " @langchain/core @langchain/langgraph uuid\n", + "\n", + "```\n", + "\n", + "For more details, see our [Installation guide](/docs/how_to/installation).\n", + "\n", + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n", + "As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n", + "The best way to do this is with [LangSmith](https://smith.langchain.com).\n", + "\n", + "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "```typescript\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\"\n", + "process.env.LANGCHAIN_API_KEY = \"...\"\n", + "```\n", + "\n", + "## Quickstart\n", + "\n", + "First up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangeably - select the one you want to use below!\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXfX4Fnp247rOxyPlBUYMQgahj2\",\n", - " \"content\": \"Your name is Bob! How can I help you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 12,\n", - " \"promptTokens\": 33,\n", - " \"totalTokens\": 45\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 33,\n", - " \"output_tokens\": 12,\n", - " \"total_tokens\": 45\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "await llm.invoke([\n", - " { role: \"user\", content: \"Hi! I'm Bob\" },\n", - " { role: \"assistant\", content: \"Hello Bob! How can I assist you today?\" },\n", - " { role: \"user\", content: \"What's my name?\" }\n", - "]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And now we can see that we get a good response!\n", - "\n", - "This is the basic idea underpinning a chatbot's ability to interact conversationally.\n", - "So how do we best implement this?" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Message persistence\n", - "\n", - "[LangGraph](https://langchain-ai.github.io/langgraphjs/) implements a built-in persistence layer, making it ideal for chat applications that support multiple conversational turns.\n", - "\n", - "Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n", - "\n", - "LangGraph comes with a simple in-memory checkpointer, which we use below." - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", - "\n", - "// Define the function that calls the model\n", - "const callModel = async (state: typeof MessagesAnnotation.State) => {\n", - " const response = await llm.invoke(state.messages);\n", - " return { messages: response };\n", - "};\n", - "\n", - "// Define a new graph\n", - "const workflow = new StateGraph(MessagesAnnotation)\n", - " // Define the node and edge\n", - " .addNode(\"model\", callModel)\n", - " .addEdge(START, \"model\")\n", - " .addEdge(\"model\", END);\n", - "\n", - "// Add memory\n", - "const memory = new MemorySaver();\n", - "const app = workflow.compile({ checkpointer: memory });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now need to create a `config` that we pass into the runnable every time. This config contains information that is not part of the input directly, but is still useful. In this case, we want to include a `thread_id`. This should look like:" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [], - "source": [ - "import { v4 as uuidv4 } from \"uuid\";\n", - "\n", - "const config = { configurable: { thread_id: uuidv4() } };" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This enables us to support multiple conversation threads with a single application, a common requirement when your application has multiple users.\n", - "\n", - "We can then invoke the application:" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-4o-mini\" })" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXfjqCno78CGXCHoAgamqXG1pnZ\",\n", - " \"content\": \"Hi Bob! How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 10,\n", - " \"promptTokens\": 12,\n", - " \"totalTokens\": 22\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 12,\n", - " \"output_tokens\": 10,\n", - " \"total_tokens\": 22\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const input = [\n", - " {\n", - " role: \"user\",\n", - " content: \"Hi! I'm Bob.\",\n", - " }\n", - "]\n", - "const output = await app.invoke({ messages: input }, config)\n", - "// The output contains all messages in the state.\n", - "// This will long the last message in the conversation.\n", - "console.log(output.messages[output.messages.length - 1]);" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's first use the model directly. `ChatModel`s are instances of LangChain \"Runnables\", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXgzHFHk4KsaNmDJyvflHq4JY2L\",\n", - " \"content\": \"Your name is Bob! How can I help you today, Bob?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 14,\n", - " \"promptTokens\": 34,\n", - " \"totalTokens\": 48\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 34,\n", - " \"output_tokens\": 14,\n", - " \"total_tokens\": 48\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const input2 = [\n", - " {\n", - " role: \"user\",\n", - " content: \"What's my name?\",\n", - " }\n", - "]\n", - "const output2 = await app.invoke({ messages: input2 }, config)\n", - "console.log(output2.messages[output2.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Great! Our chatbot now remembers things about us. If we change the config to reference a different `thread_id`, we can see that it starts the conversation fresh." - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXeSO4JQpxO96lj7iudUptJ6nfW\",\n", + " \"content\": \"Hi Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 10,\n", + " \"promptTokens\": 10,\n", + " \"totalTokens\": 20\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 10,\n", + " \"output_tokens\": 10,\n", + " \"total_tokens\": 20\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "await llm.invoke([{ role: \"user\", content: \"Hi im bob\" }])" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXhT4EVx8mGgmKXJ1s132qEluxR\",\n", - " \"content\": \"I'm sorry, but I don’t have access to personal data about individuals unless it has been shared in the course of our conversation. Therefore, I don't know your name. How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 41,\n", - " \"promptTokens\": 11,\n", - " \"totalTokens\": 52\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 11,\n", - " \"output_tokens\": 41,\n", - " \"total_tokens\": 52\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const config2 = { configurable: { thread_id: uuidv4() } }\n", - "const input3 = [\n", - " {\n", - " role: \"user\",\n", - " content: \"What's my name?\",\n", - " }\n", - "]\n", - "const output3 = await app.invoke({ messages: input3 }, config2)\n", - "console.log(output3.messages[output3.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "However, we can always go back to the original conversation (since we are persisting it in a database)" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The model on its own does not have any concept of state. For example, if you ask a followup question:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXhZmtzvV3kqKig47xxhKEnvVfH\",\n", - " \"content\": \"Your name is Bob! If there's anything else you'd like to talk about or ask, feel free!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 20,\n", - " \"promptTokens\": 60,\n", - " \"totalTokens\": 80\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 60,\n", - " \"output_tokens\": 20,\n", - " \"total_tokens\": 80\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const output4 = await app.invoke({ messages: input2 }, config)\n", - "console.log(output4.messages[output4.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This is how we can support a chatbot having conversations with many users!\n", - "\n", - "Right now, all we've done is add a simple persistence layer around the model. We can start to make the more complicated and personalized by adding in a prompt template.\n", - "\n", - "## Prompt templates\n", - "\n", - "Prompt Templates help to turn raw user information into a format that the LLM can work with. In this case, the raw user input is just a message, which we are passing to the LLM. Let's now make that a bit more complicated. First, let's add in a system message with some custom instructions (but still taking messages as input). Next, we'll add in more input besides just the messages.\n", - "\n", - "To add in a system message, we will create a `ChatPromptTemplate`. We will utilize `MessagesPlaceholder` to pass all the messages in." - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You talk like a pirate. Answer all questions to the best of your ability.\"],\n", - " new MessagesPlaceholder(\"messages\"),\n", - "]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now update our application to incorporate this template:" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [], - "source": [ - "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", - "\n", - "// Define the function that calls the model\n", - "const callModel2 = async (state: typeof MessagesAnnotation.State) => {\n", - " // highlight-start\n", - " const chain = prompt.pipe(llm);\n", - " const response = await chain.invoke(state);\n", - " // highlight-end\n", - " // Update message history with response:\n", - " return { messages: [response] };\n", - "};\n", - "\n", - "// Define a new graph\n", - "const workflow2 = new StateGraph(MessagesAnnotation)\n", - " // Define the (single) node in the graph\n", - " .addNode(\"model\", callModel2)\n", - " .addEdge(START, \"model\")\n", - " .addEdge(\"model\", END);\n", - "\n", - "// Add memory\n", - "const app2 = workflow2.compile({ checkpointer: new MemorySaver() });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We invoke the application in the same way:" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXe1Zih4gMe3XgotWL83xeWub2h\",\n", + " \"content\": \"I'm sorry, but I don't have access to personal information about individuals unless it has been shared with me during our conversation. If you'd like to tell me your name, feel free to do so!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 39,\n", + " \"promptTokens\": 10,\n", + " \"totalTokens\": 49\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 10,\n", + " \"output_tokens\": 39,\n", + " \"total_tokens\": 49\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "await llm.invoke([{ role: \"user\", content: \"Whats my name\" }])" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXio2Vy1YNRDiFdKKEyN3Yw1B9I\",\n", - " \"content\": \"Ahoy, Jim! What brings ye to these treacherous waters today? Speak up, matey!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 22,\n", - " \"promptTokens\": 32,\n", - " \"totalTokens\": 54\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 32,\n", - " \"output_tokens\": 22,\n", - " \"total_tokens\": 54\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const config3 = { configurable: { thread_id: uuidv4() } }\n", - "const input4 = [\n", - " {\n", - " role: \"user\",\n", - " content: \"Hi! I'm Jim.\",\n", - " }\n", - "]\n", - "const output5 = await app2.invoke({ messages: input4 }, config3)\n", - "console.log(output5.messages[output5.messages.length - 1]);" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at the example [LangSmith trace](https://smith.langchain.com/public/3b768e44-a319-453a-bd6e-30f9df75f16a/r)\n", + "\n", + "We can see that it doesn't take the previous conversation turn into context, and cannot answer the question.\n", + "This makes for a terrible chatbot experience!\n", + "\n", + "To get around this, we need to pass the entire conversation history into the model. Let's see what happens when we do that:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXjZNHiT5g7eTf52auWGXDUUcDs\",\n", - " \"content\": \"Ye be callin' yerself Jim, if me memory serves me right! Arrr, what else can I do fer ye, matey?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 31,\n", - " \"promptTokens\": 67,\n", - " \"totalTokens\": 98\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_3a215618e8\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 67,\n", - " \"output_tokens\": 31,\n", - " \"total_tokens\": 98\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const input5 = [\n", - " {\n", - " role: \"user\",\n", - " content: \"What is my name?\"\n", - " }\n", - "]\n", - "const output6 = await app2.invoke({ messages: input5 }, config3)\n", - "console.log(output6.messages[output6.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Awesome! Let's now make our prompt a little bit more complicated. Let's assume that the prompt template now looks something like this:" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [], - "source": [ - "const prompt2 = ChatPromptTemplate.fromMessages([\n", - " [\"system\", \"You are a helpful assistant. Answer all questions to the best of your ability in {language}.\"],\n", - " new MessagesPlaceholder(\"messages\"),\n", - "]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that we have added a new `language` input to the prompt. Our application now has two parameters-- the input `messages` and `language`. We should update our application's state to reflect this:" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [], - "source": [ - "import { START, END, StateGraph, MemorySaver, MessagesAnnotation, Annotation } from \"@langchain/langgraph\";\n", - "\n", - "// Define the State\n", - "const GraphAnnotation = Annotation.Root({\n", - " ...MessagesAnnotation.spec,\n", - " language: Annotation(),\n", - "});\n", - "\n", - "// Define the function that calls the model\n", - "const callModel3 = async (state: typeof GraphAnnotation.State) => {\n", - " const chain = prompt2.pipe(llm);\n", - " const response = await chain.invoke(state);\n", - " return { messages: [response] };\n", - "};\n", - "\n", - "const workflow3 = new StateGraph(GraphAnnotation)\n", - " .addNode(\"model\", callModel3)\n", - " .addEdge(START, \"model\")\n", - " .addEdge(\"model\", END);\n", - "\n", - "const app3 = workflow3.compile({ checkpointer: new MemorySaver() });" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXfX4Fnp247rOxyPlBUYMQgahj2\",\n", + " \"content\": \"Your name is Bob! How can I help you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 12,\n", + " \"promptTokens\": 33,\n", + " \"totalTokens\": 45\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 33,\n", + " \"output_tokens\": 12,\n", + " \"total_tokens\": 45\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "await llm.invoke([\n", + " { role: \"user\", content: \"Hi! I'm Bob\" },\n", + " { role: \"assistant\", content: \"Hello Bob! How can I assist you today?\" },\n", + " { role: \"user\", content: \"What's my name?\" }\n", + "]);" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXkq2ZV9xmOBSM2iJbYSn8Epvqa\",\n", - " \"content\": \"¡Hola, Bob! ¿En qué puedo ayudarte hoy?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 12,\n", - " \"promptTokens\": 32,\n", - " \"totalTokens\": 44\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 32,\n", - " \"output_tokens\": 12,\n", - " \"total_tokens\": 44\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const config4 = { configurable: { thread_id: uuidv4() } }\n", - "const input6 = {\n", - " messages: [\n", - " {\n", - " role: \"user\",\n", - " content: \"Hi im bob\"\n", - " }\n", - " ],\n", - " language: \"Spanish\"\n", - "}\n", - "const output7 = await app3.invoke(input6, config4)\n", - "console.log(output7.messages[output7.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that the entire state is persisted, so we can omit parameters like `language` if no changes are desired:" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And now we can see that we get a good response!\n", + "\n", + "This is the basic idea underpinning a chatbot's ability to interact conversationally.\n", + "So how do we best implement this?" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUXk9Ccr1dhmA9lZ1VmZ998PFyJF\",\n", - " \"content\": \"Tu nombre es Bob. ¿Hay algo más en lo que te pueda ayudar?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 16,\n", - " \"promptTokens\": 57,\n", - " \"totalTokens\": 73\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 57,\n", - " \"output_tokens\": 16,\n", - " \"total_tokens\": 73\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const input7 = {\n", - " messages: [\n", - " {\n", - " role: \"user\",\n", - " content: \"What is my name?\"\n", - " }\n", - " ],\n", - "}\n", - "const output8 = await app3.invoke(input7, config4)\n", - "console.log(output8.messages[output8.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To help you understand what's happening internally, check out [this LangSmith trace](https://smith.langchain.com/public/d61630b7-6a52-4dc9-974c-8452008c498a/r)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Managing Conversation History\n", - "\n", - "One important concept to understand when building chatbots is how to manage conversation history. If left unmanaged, the list of messages will grow unbounded and potentially overflow the context window of the LLM. Therefore, it is important to add a step that limits the size of the messages you are passing in.\n", - "\n", - "**Importantly, you will want to do this BEFORE the prompt template but AFTER you load previous messages from Message History.**\n", - "\n", - "We can do this by adding a simple step in front of the prompt that modifies the `messages` key appropriately, and then wrap that new chain in the Message History class. \n", - "\n", - "LangChain comes with a few built-in helpers for [managing a list of messages](/docs/how_to/#messages). In this case we'll use the [trimMessages](/docs/how_to/trim_messages/) helper to reduce how many messages we're sending to the model. The trimmer allows us to specify how many tokens we want to keep, along with other parameters like if we want to always keep the system message and whether to allow partial messages:" - ] - }, - { - "cell_type": "code", - "execution_count": 54, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Message persistence\n", + "\n", + "[LangGraph](https://langchain-ai.github.io/langgraphjs/) implements a built-in persistence layer, making it ideal for chat applications that support multiple conversational turns.\n", + "\n", + "Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n", + "\n", + "LangGraph comes with a simple in-memory checkpointer, which we use below." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[\n", - " SystemMessage {\n", - " \"content\": \"you're a good assistant\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " HumanMessage {\n", - " \"content\": \"I like vanilla ice cream\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"content\": \"nice\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " },\n", - " HumanMessage {\n", - " \"content\": \"whats 2 + 2\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"content\": \"4\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " },\n", - " HumanMessage {\n", - " \"content\": \"thanks\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"content\": \"no problem!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " },\n", - " HumanMessage {\n", - " \"content\": \"having fun?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {}\n", - " },\n", - " AIMessage {\n", - " \"content\": \"yes!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": []\n", - " }\n", - "]\n" - ] - } - ], - "source": [ - "import { SystemMessage, HumanMessage, AIMessage, trimMessages } from \"@langchain/core/messages\"\n", - "\n", - "const trimmer = trimMessages({\n", - " maxTokens: 10,\n", - " strategy: \"last\",\n", - " tokenCounter: (msgs) => msgs.length,\n", - " includeSystem: true,\n", - " allowPartial: false,\n", - " startOn: \"human\",\n", - "})\n", - "\n", - "const messages = [\n", - " new SystemMessage(\"you're a good assistant\"),\n", - " new HumanMessage(\"hi! I'm bob\"),\n", - " new AIMessage(\"hi!\"),\n", - " new HumanMessage(\"I like vanilla ice cream\"),\n", - " new AIMessage(\"nice\"),\n", - " new HumanMessage(\"whats 2 + 2\"),\n", - " new AIMessage(\"4\"),\n", - " new HumanMessage(\"thanks\"),\n", - " new AIMessage(\"no problem!\"),\n", - " new HumanMessage(\"having fun?\"),\n", - " new AIMessage(\"yes!\"),\n", - "]\n", - "\n", - "await trimmer.invoke(messages)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To use it in our chain, we just need to run the trimmer before we pass the `messages` input to our prompt. " - ] - }, - { - "cell_type": "code", - "execution_count": 55, - "metadata": {}, - "outputs": [], - "source": [ - "const callModel4 = async (state: typeof GraphAnnotation.State) => {\n", - " const chain = prompt2.pipe(llm);\n", - " // highlight-start\n", - " const trimmedMessage = await trimmer.invoke(state.messages);\n", - " const response = await chain.invoke({ messages: trimmedMessage, language: state.language });\n", - " // highlight-end\n", - " return { messages: [response] };\n", - "};\n", - "\n", - "\n", - "const workflow4 = new StateGraph(GraphAnnotation)\n", - " .addNode(\"model\", callModel4)\n", - " .addEdge(START, \"model\")\n", - " .addEdge(\"model\", END);\n", - "\n", - "const app4 = workflow4.compile({ checkpointer: new MemorySaver() });" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now if we try asking the model our name, it won't know it since we trimmed that part of the chat history:" - ] - }, - { - "cell_type": "code", - "execution_count": 56, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", + "\n", + "// Define the function that calls the model\n", + "const callModel = async (state: typeof MessagesAnnotation.State) => {\n", + " const response = await llm.invoke(state.messages);\n", + " return { messages: response };\n", + "};\n", + "\n", + "// Define a new graph\n", + "const workflow = new StateGraph(MessagesAnnotation)\n", + " // Define the node and edge\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Add memory\n", + "const memory = new MemorySaver();\n", + "const app = workflow.compile({ checkpointer: memory });" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUdCOvzRAvgoxd2sf93oGKQfA9vh\",\n", - " \"content\": \"I don’t know your name, but I’d be happy to learn it if you’d like to share!\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 22,\n", - " \"promptTokens\": 97,\n", - " \"totalTokens\": 119\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 97,\n", - " \"output_tokens\": 22,\n", - " \"total_tokens\": 119\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "const config5 = { configurable: { thread_id: uuidv4() }}\n", - "const input8 = {\n", - " // highlight-next-line\n", - " messages: [...messages, new HumanMessage(\"What is my name?\")],\n", - " language: \"English\"\n", - "}\n", - "\n", - "const output9 = await app4.invoke(\n", - " input8,\n", - " config5,\n", - ")\n", - "console.log(output9.messages[output9.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "But if we ask about information that is within the last few messages, it remembers:" - ] - }, - { - "cell_type": "code", - "execution_count": 57, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now need to create a `config` that we pass into the runnable every time. This config contains information that is not part of the input directly, but is still useful. In this case, we want to include a `thread_id`. This should look like:" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "import { v4 as uuidv4 } from \"uuid\";\n", + "\n", + "const config = { configurable: { thread_id: uuidv4() } };" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This enables us to support multiple conversation threads with a single application, a common requirement when your application has multiple users.\n", + "\n", + "We can then invoke the application:" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXfjqCno78CGXCHoAgamqXG1pnZ\",\n", + " \"content\": \"Hi Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 10,\n", + " \"promptTokens\": 12,\n", + " \"totalTokens\": 22\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 12,\n", + " \"output_tokens\": 10,\n", + " \"total_tokens\": 22\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const input = [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi! I'm Bob.\",\n", + " }\n", + "]\n", + "const output = await app.invoke({ messages: input }, config)\n", + "// The output contains all messages in the state.\n", + "// This will long the last message in the conversation.\n", + "console.log(output.messages[output.messages.length - 1]);" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXgzHFHk4KsaNmDJyvflHq4JY2L\",\n", + " \"content\": \"Your name is Bob! How can I help you today, Bob?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 14,\n", + " \"promptTokens\": 34,\n", + " \"totalTokens\": 48\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 34,\n", + " \"output_tokens\": 14,\n", + " \"total_tokens\": 48\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const input2 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + "]\n", + "const output2 = await app.invoke({ messages: input2 }, config)\n", + "console.log(output2.messages[output2.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Great! Our chatbot now remembers things about us. If we change the config to reference a different `thread_id`, we can see that it starts the conversation fresh." + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXhT4EVx8mGgmKXJ1s132qEluxR\",\n", + " \"content\": \"I'm sorry, but I don’t have access to personal data about individuals unless it has been shared in the course of our conversation. Therefore, I don't know your name. How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 41,\n", + " \"promptTokens\": 11,\n", + " \"totalTokens\": 52\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 11,\n", + " \"output_tokens\": 41,\n", + " \"total_tokens\": 52\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config2 = { configurable: { thread_id: uuidv4() } }\n", + "const input3 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What's my name?\",\n", + " }\n", + "]\n", + "const output3 = await app.invoke({ messages: input3 }, config2)\n", + "console.log(output3.messages[output3.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "However, we can always go back to the original conversation (since we are persisting it in a database)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXhZmtzvV3kqKig47xxhKEnvVfH\",\n", + " \"content\": \"Your name is Bob! If there's anything else you'd like to talk about or ask, feel free!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 20,\n", + " \"promptTokens\": 60,\n", + " \"totalTokens\": 80\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 60,\n", + " \"output_tokens\": 20,\n", + " \"total_tokens\": 80\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const output4 = await app.invoke({ messages: input2 }, config)\n", + "console.log(output4.messages[output4.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is how we can support a chatbot having conversations with many users!\n", + "\n", + "Right now, all we've done is add a simple persistence layer around the model. We can start to make the more complicated and personalized by adding in a prompt template.\n", + "\n", + "## Prompt templates\n", + "\n", + "Prompt Templates help to turn raw user information into a format that the LLM can work with. In this case, the raw user input is just a message, which we are passing to the LLM. Let's now make that a bit more complicated. First, let's add in a system message with some custom instructions (but still taking messages as input). Next, we'll add in more input besides just the messages.\n", + "\n", + "To add in a system message, we will create a `ChatPromptTemplate`. We will utilize `MessagesPlaceholder` to pass all the messages in." + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You talk like a pirate. Answer all questions to the best of your ability.\"],\n", + " new MessagesPlaceholder(\"messages\"),\n", + "]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now update our application to incorporate this template:" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [], + "source": [ + "import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from \"@langchain/langgraph\";\n", + "\n", + "// Define the function that calls the model\n", + "const callModel2 = async (state: typeof MessagesAnnotation.State) => {\n", + " // highlight-start\n", + " const chain = prompt.pipe(llm);\n", + " const response = await chain.invoke(state);\n", + " // highlight-end\n", + " // Update message history with response:\n", + " return { messages: [response] };\n", + "};\n", + "\n", + "// Define a new graph\n", + "const workflow2 = new StateGraph(MessagesAnnotation)\n", + " // Define the (single) node in the graph\n", + " .addNode(\"model\", callModel2)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "// Add memory\n", + "const app2 = workflow2.compile({ checkpointer: new MemorySaver() });" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "AIMessage {\n", - " \"id\": \"chatcmpl-ABUdChq5JOMhcFA1dB7PvCHLyliwM\",\n", - " \"content\": \"You asked for the solution to the math problem \\\"what's 2 + 2,\\\" and I answered that it equals 4.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 27,\n", - " \"promptTokens\": 99,\n", - " \"totalTokens\": 126\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_1bb46167f9\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 99,\n", - " \"output_tokens\": 27,\n", - " \"total_tokens\": 126\n", - " }\n", - "}\n" - ] + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We invoke the application in the same way:" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXio2Vy1YNRDiFdKKEyN3Yw1B9I\",\n", + " \"content\": \"Ahoy, Jim! What brings ye to these treacherous waters today? Speak up, matey!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 22,\n", + " \"promptTokens\": 32,\n", + " \"totalTokens\": 54\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 32,\n", + " \"output_tokens\": 22,\n", + " \"total_tokens\": 54\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config3 = { configurable: { thread_id: uuidv4() } }\n", + "const input4 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi! I'm Jim.\",\n", + " }\n", + "]\n", + "const output5 = await app2.invoke({ messages: input4 }, config3)\n", + "console.log(output5.messages[output5.messages.length - 1]);" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXjZNHiT5g7eTf52auWGXDUUcDs\",\n", + " \"content\": \"Ye be callin' yerself Jim, if me memory serves me right! Arrr, what else can I do fer ye, matey?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 31,\n", + " \"promptTokens\": 67,\n", + " \"totalTokens\": 98\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_3a215618e8\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 67,\n", + " \"output_tokens\": 31,\n", + " \"total_tokens\": 98\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const input5 = [\n", + " {\n", + " role: \"user\",\n", + " content: \"What is my name?\"\n", + " }\n", + "]\n", + "const output6 = await app2.invoke({ messages: input5 }, config3)\n", + "console.log(output6.messages[output6.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Awesome! Let's now make our prompt a little bit more complicated. Let's assume that the prompt template now looks something like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "const prompt2 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant. Answer all questions to the best of your ability in {language}.\"],\n", + " new MessagesPlaceholder(\"messages\"),\n", + "]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that we have added a new `language` input to the prompt. Our application now has two parameters-- the input `messages` and `language`. We should update our application's state to reflect this:" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [], + "source": [ + "import { START, END, StateGraph, MemorySaver, MessagesAnnotation, Annotation } from \"@langchain/langgraph\";\n", + "\n", + "// Define the State\n", + "const GraphAnnotation = Annotation.Root({\n", + " ...MessagesAnnotation.spec,\n", + " language: Annotation(),\n", + "});\n", + "\n", + "// Define the function that calls the model\n", + "const callModel3 = async (state: typeof GraphAnnotation.State) => {\n", + " const chain = prompt2.pipe(llm);\n", + " const response = await chain.invoke(state);\n", + " return { messages: [response] };\n", + "};\n", + "\n", + "const workflow3 = new StateGraph(GraphAnnotation)\n", + " .addNode(\"model\", callModel3)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "const app3 = workflow3.compile({ checkpointer: new MemorySaver() });" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXkq2ZV9xmOBSM2iJbYSn8Epvqa\",\n", + " \"content\": \"¡Hola, Bob! ¿En qué puedo ayudarte hoy?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 12,\n", + " \"promptTokens\": 32,\n", + " \"totalTokens\": 44\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 32,\n", + " \"output_tokens\": 12,\n", + " \"total_tokens\": 44\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config4 = { configurable: { thread_id: uuidv4() } }\n", + "const input6 = {\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"Hi im bob\"\n", + " }\n", + " ],\n", + " language: \"Spanish\"\n", + "}\n", + "const output7 = await app3.invoke(input6, config4)\n", + "console.log(output7.messages[output7.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the entire state is persisted, so we can omit parameters like `language` if no changes are desired:" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUXk9Ccr1dhmA9lZ1VmZ998PFyJF\",\n", + " \"content\": \"Tu nombre es Bob. ¿Hay algo más en lo que te pueda ayudar?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 16,\n", + " \"promptTokens\": 57,\n", + " \"totalTokens\": 73\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 57,\n", + " \"output_tokens\": 16,\n", + " \"total_tokens\": 73\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const input7 = {\n", + " messages: [\n", + " {\n", + " role: \"user\",\n", + " content: \"What is my name?\"\n", + " }\n", + " ],\n", + "}\n", + "const output8 = await app3.invoke(input7, config4)\n", + "console.log(output8.messages[output8.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To help you understand what's happening internally, check out [this LangSmith trace](https://smith.langchain.com/public/d61630b7-6a52-4dc9-974c-8452008c498a/r)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Managing Conversation History\n", + "\n", + "One important concept to understand when building chatbots is how to manage conversation history. If left unmanaged, the list of messages will grow unbounded and potentially overflow the context window of the LLM. Therefore, it is important to add a step that limits the size of the messages you are passing in.\n", + "\n", + "**Importantly, you will want to do this BEFORE the prompt template but AFTER you load previous messages from Message History.**\n", + "\n", + "We can do this by adding a simple step in front of the prompt that modifies the `messages` key appropriately, and then wrap that new chain in the Message History class. \n", + "\n", + "LangChain comes with a few built-in helpers for [managing a list of messages](/docs/how_to/#messages). In this case we'll use the [trimMessages](/docs/how_to/trim_messages/) helper to reduce how many messages we're sending to the model. The trimmer allows us to specify how many tokens we want to keep, along with other parameters like if we want to always keep the system message and whether to allow partial messages:" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " SystemMessage {\n", + " \"content\": \"you're a good assistant\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"I like vanilla ice cream\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"nice\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"whats 2 + 2\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"4\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"thanks\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"no problem!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " },\n", + " HumanMessage {\n", + " \"content\": \"having fun?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {}\n", + " },\n", + " AIMessage {\n", + " \"content\": \"yes!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { SystemMessage, HumanMessage, AIMessage, trimMessages } from \"@langchain/core/messages\"\n", + "\n", + "const trimmer = trimMessages({\n", + " maxTokens: 10,\n", + " strategy: \"last\",\n", + " tokenCounter: (msgs) => msgs.length,\n", + " includeSystem: true,\n", + " allowPartial: false,\n", + " startOn: \"human\",\n", + "})\n", + "\n", + "const messages = [\n", + " new SystemMessage(\"you're a good assistant\"),\n", + " new HumanMessage(\"hi! I'm bob\"),\n", + " new AIMessage(\"hi!\"),\n", + " new HumanMessage(\"I like vanilla ice cream\"),\n", + " new AIMessage(\"nice\"),\n", + " new HumanMessage(\"whats 2 + 2\"),\n", + " new AIMessage(\"4\"),\n", + " new HumanMessage(\"thanks\"),\n", + " new AIMessage(\"no problem!\"),\n", + " new HumanMessage(\"having fun?\"),\n", + " new AIMessage(\"yes!\"),\n", + "]\n", + "\n", + "await trimmer.invoke(messages)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To use it in our chain, we just need to run the trimmer before we pass the `messages` input to our prompt. " + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [], + "source": [ + "const callModel4 = async (state: typeof GraphAnnotation.State) => {\n", + " const chain = prompt2.pipe(llm);\n", + " // highlight-start\n", + " const trimmedMessage = await trimmer.invoke(state.messages);\n", + " const response = await chain.invoke({ messages: trimmedMessage, language: state.language });\n", + " // highlight-end\n", + " return { messages: [response] };\n", + "};\n", + "\n", + "\n", + "const workflow4 = new StateGraph(GraphAnnotation)\n", + " .addNode(\"model\", callModel4)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "const app4 = workflow4.compile({ checkpointer: new MemorySaver() });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now if we try asking the model our name, it won't know it since we trimmed that part of the chat history:" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUdCOvzRAvgoxd2sf93oGKQfA9vh\",\n", + " \"content\": \"I don’t know your name, but I’d be happy to learn it if you’d like to share!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 22,\n", + " \"promptTokens\": 97,\n", + " \"totalTokens\": 119\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 97,\n", + " \"output_tokens\": 22,\n", + " \"total_tokens\": 119\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config5 = { configurable: { thread_id: uuidv4() }}\n", + "const input8 = {\n", + " // highlight-next-line\n", + " messages: [...messages, new HumanMessage(\"What is my name?\")],\n", + " language: \"English\"\n", + "}\n", + "\n", + "const output9 = await app4.invoke(\n", + " input8,\n", + " config5,\n", + ")\n", + "console.log(output9.messages[output9.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But if we ask about information that is within the last few messages, it remembers:" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-ABUdChq5JOMhcFA1dB7PvCHLyliwM\",\n", + " \"content\": \"You asked for the solution to the math problem \\\"what's 2 + 2,\\\" and I answered that it equals 4.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 27,\n", + " \"promptTokens\": 99,\n", + " \"totalTokens\": 126\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_1bb46167f9\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 99,\n", + " \"output_tokens\": 27,\n", + " \"total_tokens\": 126\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const config6 = { configurable: { thread_id: uuidv4() }}\n", + "const input9 = {\n", + " // highlight-next-line\n", + " messages: [...messages, new HumanMessage(\"What math problem did I ask?\")],\n", + " language: \"English\"\n", + "}\n", + "\n", + "const output10 = await app4.invoke(\n", + " input9,\n", + " config6,\n", + ")\n", + "console.log(output10.messages[output10.messages.length - 1]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you take a look at LangSmith, you can see exactly what is happening under the hood in the [LangSmith trace](https://smith.langchain.com/public/bf1b1a10-0fe0-42f6-9f0f-b70d9f7520dc/r)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Steps\n", + "\n", + "Now that you understand the basics of how to create a chatbot in LangChain, some more advanced tutorials you may be interested in are:\n", + "\n", + "- [Conversational RAG](/docs/tutorials/qa_chat_history): Enable a chatbot experience over an external source of data\n", + "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/): Build a chatbot that can take actions\n", + "\n", + "If you want to dive deeper on specifics, some things worth checking out are:\n", + "\n", + "- [Streaming](/docs/how_to/streaming): streaming is *crucial* for chat applications\n", + "- [How to add message history](/docs/how_to/message_history): for a deeper dive into all things related to message history\n", + "- [How to manage large message history](/docs/how_to/trim_messages/): more techniques for managing a large chat history\n", + "- [LangGraph main docs](https://langchain-ai.github.io/langgraphjs/): for more detail on building with LangGraph" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const config6 = { configurable: { thread_id: uuidv4() }}\n", - "const input9 = {\n", - " // highlight-next-line\n", - " messages: [...messages, new HumanMessage(\"What math problem did I ask?\")],\n", - " language: \"English\"\n", - "}\n", - "\n", - "const output10 = await app4.invoke(\n", - " input9,\n", - " config6,\n", - ")\n", - "console.log(output10.messages[output10.messages.length - 1]);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you take a look at LangSmith, you can see exactly what is happening under the hood in the [LangSmith trace](https://smith.langchain.com/public/bf1b1a10-0fe0-42f6-9f0f-b70d9f7520dc/r)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Next Steps\n", - "\n", - "Now that you understand the basics of how to create a chatbot in LangChain, some more advanced tutorials you may be interested in are:\n", - "\n", - "- [Conversational RAG](/docs/tutorials/qa_chat_history): Enable a chatbot experience over an external source of data\n", - "- [Agents](https://langchain-ai.github.io/langgraphjs/tutorials/multi_agent/agent_supervisor/): Build a chatbot that can take actions\n", - "\n", - "If you want to dive deeper on specifics, some things worth checking out are:\n", - "\n", - "- [Streaming](/docs/how_to/streaming): streaming is *crucial* for chat applications\n", - "- [How to add message history](/docs/how_to/message_history): for a deeper dive into all things related to message history\n", - "- [How to manage large message history](/docs/how_to/trim_messages/): more techniques for managing a large chat history\n", - "- [LangGraph main docs](https://langchain-ai.github.io/langgraph/): for more detail on building with LangGraph" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/core_docs/docs/tutorials/extraction.ipynb b/docs/core_docs/docs/tutorials/extraction.ipynb index 3e0801bc73f9..9f7b23939fca 100644 --- a/docs/core_docs/docs/tutorials/extraction.ipynb +++ b/docs/core_docs/docs/tutorials/extraction.ipynb @@ -1,367 +1,367 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "df29b30a-fd27-4e08-8269-870df5631f9e", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 4\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "d28530a6-ddfd-49c0-85dc-b723551f6614", - "metadata": {}, - "source": [ - "# Build an Extraction Chain\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat Models](/docs/concepts/#chat-models)\n", - "- [Tools](/docs/concepts/#tools)\n", - "- [Tool calling](/docs/concepts/#function-tool-calling)\n", - "\n", - ":::\n", - "\n", - "In this tutorial, we will build a chain to extract structured information from unstructured text. \n", - "\n", - ":::{.callout-important}\n", - "This tutorial will only work with models that support **function/tool calling**\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "id": "4412def2-38e3-4bd0-bbf0-fb09ff9e5985", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "### Installation\n", - "\n", - "To install LangChain run:\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from '@theme/Npm2Yarn';\n", - "\n", - "\n", - " langchain @langchain/core\n", - "\n", - "```\n", - "\n", - "For more details, see our [Installation guide](/docs/how_to/installation/).\n", - "\n", - "### LangSmith\n", - "\n", - "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n", - "As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n", - "The best way to do this is with [LangSmith](https://smith.langchain.com).\n", - "\n", - "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", - "\n", - "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", - "\n", - "# Reduce tracing latency if you are not in a serverless environment\n", - "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "54d6b970-2ea3-4192-951e-21237212b359", - "metadata": {}, - "source": [ - "## The Schema\n", - "\n", - "First, we need to describe what information we want to extract from the text.\n", - "\n", - "We'll use [Zod](https://zod.dev) to define an example schema that extracts personal information.\n", - "\n", - "```{=mdx}\n", - "\n", - " zod @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "c141084c-fb94-4093-8d6a-81175d688e40", - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "\n", - "const personSchema = z.object({\n", - " name: z.string().nullish().describe('The name of the person'),\n", - " hair_color: z.string().nullish().describe(\"The color of the person's hair if known\"),\n", - " height_in_meters: z.string().nullish().describe('Height measured in meters'),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "f248dd54-e36d-435a-b154-394ab4ed6792", - "metadata": {}, - "source": [ - "There are two best practices when defining schema:\n", - "\n", - "1. Document the **attributes** and the **schema** itself: This information is sent to the LLM and is used to improve the quality of information extraction.\n", - "2. Do not force the LLM to make up information! Above we used `.nullish()` for the attributes allowing the LLM to output `null` or `undefined` if it doesn't know the answer.\n", - "\n", - ":::{.callout-important}\n", - "For best performance, document the schema well and make sure the model isn't force to return results if there's no information to be extracted in the text.\n", - ":::\n", - "\n", - "## The Extractor\n", - "\n", - "Let's create an information extractor using the schema we defined above." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "a5e490f6-35ad-455e-8ae4-2bae021583ff", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "// Define a custom prompt to provide instructions and any additional context.\n", - "// 1) You can add examples into the prompt template to improve extraction quality\n", - "// 2) Introduce additional parameters to take context into account (e.g., include metadata\n", - "// about the document from which the text was extracted.)\n", - "const prompt = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\n", - " \"system\",\n", - " `You are an expert extraction algorithm.\n", - "Only extract relevant information from the text.\n", - "If you do not know the value of an attribute asked to extract,\n", - "return null for the attribute's value.`,\n", - " ],\n", - " // Please see the how-to about improving performance with\n", - " // reference examples.\n", - " // [\"placeholder\", \"{examples}\"],\n", - " [\"human\", \"{text}\"],\n", - " ],\n", - ");" - ] - }, - { - "cell_type": "markdown", - "id": "832bf6a1-8e0c-4b6a-aa37-12fe9c42a6d9", - "metadata": {}, - "source": [ - "We need to use a model that supports function/tool calling.\n", - "\n", - "Please review [the documentation](/docs/concepts#function-tool-calling) for list of some models that can be used with this API." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "04d846a6-d5cb-4009-ac19-61e3aac0177e", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "{ name: \u001b[32m\"Alan Smith\"\u001b[39m, hair_color: \u001b[32m\"blond\"\u001b[39m, height_in_meters: \u001b[32m\"1.83\"\u001b[39m }" + "cell_type": "raw", + "id": "df29b30a-fd27-4e08-8269-870df5631f9e", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 4\n", + "---" ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "\n", - "const llm = new ChatAnthropic({\n", - " model: \"claude-3-sonnet-20240229\",\n", - " temperature: 0\n", - "})\n", - "\n", - "const runnable = prompt.pipe(llm.withStructuredOutput(personSchema))\n", - "\n", - "const text = \"Alan Smith is 6 feet tall and has blond hair.\"\n", - "await runnable.invoke({ text })" - ] - }, - { - "cell_type": "markdown", - "id": "bd1c493d-f9dc-4236-8da9-50f6919f5710", - "metadata": {}, - "source": [ - ":::{.callout-important} \n", - "\n", - "Extraction is Generative 🤯\n", - "\n", - "LLMs are generative models, so they can do some pretty cool things like correctly extract the height of the person in meters\n", - "even though it was provided in feet!\n", - ":::\n", - "\n", - "We can see the LangSmith trace [here](https://smith.langchain.com/public/3d44b7e8-e7ca-4e02-951d-3290ccc89d64/r).\n", - "\n", - "Even though we defined our schema with the variable name `personSchema`, Zod is unable to infer this name and therefore does not pass it along to the model. To help give the LLM more clues as to what your provided schema represents, you can also give the schema you pass to `withStructuredOutput()` a name:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "02f44203", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "{ name: \u001b[32m\"Alan Smith\"\u001b[39m, hair_color: \u001b[32m\"blond\"\u001b[39m, height_in_meters: \u001b[32m\"1.83\"\u001b[39m }" + "cell_type": "markdown", + "id": "d28530a6-ddfd-49c0-85dc-b723551f6614", + "metadata": {}, + "source": [ + "# Build an Extraction Chain\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat Models](/docs/concepts/chat_models)\n", + "- [Tools](/docs/concepts/tools)\n", + "- [Tool calling](/docs/concepts/tool_calling)\n", + "\n", + ":::\n", + "\n", + "In this tutorial, we will build a chain to extract structured information from unstructured text. \n", + "\n", + ":::{.callout-important}\n", + "This tutorial will only work with models that support **function/tool calling**\n", + ":::" ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const runnableWithName = prompt.pipe(llm.withStructuredOutput(personSchema, { name: \"person\" }));\n", - "\n", - "const text2 = \"Alan Smith is 6 feet tall and has blond hair.\";\n", - "\n", - "await runnableWithName.invoke({ text: text2 });" - ] - }, - { - "cell_type": "markdown", - "id": "bfe7d31e", - "metadata": {}, - "source": [ - "This can improve performance in many cases." - ] - }, - { - "cell_type": "markdown", - "id": "28c5ef0c-b8d1-4e12-bd0e-e2528de87fcc", - "metadata": {}, - "source": [ - "## Multiple Entities\n", - "\n", - "In **most cases**, you should be extracting a list of entities rather than a single entity.\n", - "\n", - "This can be easily achieved using Zod by nesting models inside one another." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "591a0c16-7a17-4883-91ee-0d6d2fdb265c", - "metadata": {}, - "outputs": [], - "source": [ - "import { z } from \"zod\";\n", - "\n", - "const person = z.object({\n", - " name: z.string().nullish().describe('The name of the person'),\n", - " hair_color: z.string().nullish().describe(\"The color of the person's hair if known\"),\n", - " height_in_meters: z.number().nullish().describe('Height measured in meters'),\n", - "});\n", - " \n", - "const dataSchema = z.object({\n", - " people: z.array(person).describe('Extracted data about people'),\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "5f5cda33-fd7b-481e-956a-703f45e40e1d", - "metadata": {}, - "source": [ - ":::{.callout-important}\n", - "Extraction might not be perfect here. Please continue to see how to use **Reference Examples** to improve the quality of extraction, and see the **guidelines** section!\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "cf7062cc-1d1d-4a37-9122-509d1b87f0a6", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "markdown", + "id": "4412def2-38e3-4bd0-bbf0-fb09ff9e5985", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "### Installation\n", + "\n", + "To install LangChain run:\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from '@theme/Npm2Yarn';\n", + "\n", + "\n", + " langchain @langchain/core\n", + "\n", + "```\n", + "\n", + "For more details, see our [Installation guide](/docs/how_to/installation/).\n", + "\n", + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n", + "As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n", + "The best way to do this is with [LangSmith](https://smith.langchain.com).\n", + "\n", + "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "```shell\n", + "export LANGCHAIN_TRACING_V2=\"true\"\n", + "export LANGCHAIN_API_KEY=\"...\"\n", + "\n", + "# Reduce tracing latency if you are not in a serverless environment\n", + "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "54d6b970-2ea3-4192-951e-21237212b359", + "metadata": {}, + "source": [ + "## The Schema\n", + "\n", + "First, we need to describe what information we want to extract from the text.\n", + "\n", + "We'll use [Zod](https://zod.dev) to define an example schema that extracts personal information.\n", + "\n", + "```{=mdx}\n", + "\n", + " zod @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "c141084c-fb94-4093-8d6a-81175d688e40", + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "\n", + "const personSchema = z.object({\n", + " name: z.string().nullish().describe('The name of the person'),\n", + " hair_color: z.string().nullish().describe(\"The color of the person's hair if known\"),\n", + " height_in_meters: z.string().nullish().describe('Height measured in meters'),\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "f248dd54-e36d-435a-b154-394ab4ed6792", + "metadata": {}, + "source": [ + "There are two best practices when defining schema:\n", + "\n", + "1. Document the **attributes** and the **schema** itself: This information is sent to the LLM and is used to improve the quality of information extraction.\n", + "2. Do not force the LLM to make up information! Above we used `.nullish()` for the attributes allowing the LLM to output `null` or `undefined` if it doesn't know the answer.\n", + "\n", + ":::{.callout-important}\n", + "For best performance, document the schema well and make sure the model isn't force to return results if there's no information to be extracted in the text.\n", + ":::\n", + "\n", + "## The Extractor\n", + "\n", + "Let's create an information extractor using the schema we defined above." + ] + }, { - "data": { - "text/plain": [ - "{\n", - " people: [\n", - " { name: \u001b[32m\"Jeff\"\u001b[39m, hair_color: \u001b[32m\"black\"\u001b[39m, height_in_meters: \u001b[33m1.83\u001b[39m },\n", - " { name: \u001b[32m\"Anna\"\u001b[39m, hair_color: \u001b[32m\"black\"\u001b[39m, height_in_meters: \u001b[1mnull\u001b[22m }\n", - " ]\n", - "}" + "cell_type": "code", + "execution_count": 2, + "id": "a5e490f6-35ad-455e-8ae4-2bae021583ff", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "// Define a custom prompt to provide instructions and any additional context.\n", + "// 1) You can add examples into the prompt template to improve extraction quality\n", + "// 2) Introduce additional parameters to take context into account (e.g., include metadata\n", + "// about the document from which the text was extracted.)\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " `You are an expert extraction algorithm.\n", + "Only extract relevant information from the text.\n", + "If you do not know the value of an attribute asked to extract,\n", + "return null for the attribute's value.`,\n", + " ],\n", + " // Please see the how-to about improving performance with\n", + " // reference examples.\n", + " // [\"placeholder\", \"{examples}\"],\n", + " [\"human\", \"{text}\"],\n", + " ],\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "832bf6a1-8e0c-4b6a-aa37-12fe9c42a6d9", + "metadata": {}, + "source": [ + "We need to use a model that supports function/tool calling.\n", + "\n", + "Please review [the documentation](/docs/concepts/tool_calling) for list of some models that can be used with this API." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "04d846a6-d5cb-4009-ac19-61e3aac0177e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{ name: \u001b[32m\"Alan Smith\"\u001b[39m, hair_color: \u001b[32m\"blond\"\u001b[39m, height_in_meters: \u001b[32m\"1.83\"\u001b[39m }" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const llm = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + " temperature: 0\n", + "})\n", + "\n", + "const runnable = prompt.pipe(llm.withStructuredOutput(personSchema))\n", + "\n", + "const text = \"Alan Smith is 6 feet tall and has blond hair.\"\n", + "await runnable.invoke({ text })" + ] + }, + { + "cell_type": "markdown", + "id": "bd1c493d-f9dc-4236-8da9-50f6919f5710", + "metadata": {}, + "source": [ + ":::{.callout-important} \n", + "\n", + "Extraction is Generative 🤯\n", + "\n", + "LLMs are generative models, so they can do some pretty cool things like correctly extract the height of the person in meters\n", + "even though it was provided in feet!\n", + ":::\n", + "\n", + "We can see the LangSmith trace [here](https://smith.langchain.com/public/3d44b7e8-e7ca-4e02-951d-3290ccc89d64/r).\n", + "\n", + "Even though we defined our schema with the variable name `personSchema`, Zod is unable to infer this name and therefore does not pass it along to the model. To help give the LLM more clues as to what your provided schema represents, you can also give the schema you pass to `withStructuredOutput()` a name:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "02f44203", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{ name: \u001b[32m\"Alan Smith\"\u001b[39m, hair_color: \u001b[32m\"blond\"\u001b[39m, height_in_meters: \u001b[32m\"1.83\"\u001b[39m }" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const runnableWithName = prompt.pipe(llm.withStructuredOutput(personSchema, { name: \"person\" }));\n", + "\n", + "const text2 = \"Alan Smith is 6 feet tall and has blond hair.\";\n", + "\n", + "await runnableWithName.invoke({ text: text2 });" + ] + }, + { + "cell_type": "markdown", + "id": "bfe7d31e", + "metadata": {}, + "source": [ + "This can improve performance in many cases." + ] + }, + { + "cell_type": "markdown", + "id": "28c5ef0c-b8d1-4e12-bd0e-e2528de87fcc", + "metadata": {}, + "source": [ + "## Multiple Entities\n", + "\n", + "In **most cases**, you should be extracting a list of entities rather than a single entity.\n", + "\n", + "This can be easily achieved using Zod by nesting models inside one another." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "591a0c16-7a17-4883-91ee-0d6d2fdb265c", + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "\n", + "const person = z.object({\n", + " name: z.string().nullish().describe('The name of the person'),\n", + " hair_color: z.string().nullish().describe(\"The color of the person's hair if known\"),\n", + " height_in_meters: z.number().nullish().describe('Height measured in meters'),\n", + "});\n", + " \n", + "const dataSchema = z.object({\n", + " people: z.array(person).describe('Extracted data about people'),\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "5f5cda33-fd7b-481e-956a-703f45e40e1d", + "metadata": {}, + "source": [ + ":::{.callout-important}\n", + "Extraction might not be perfect here. Please continue to see how to use **Reference Examples** to improve the quality of extraction, and see the **guidelines** section!\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "cf7062cc-1d1d-4a37-9122-509d1b87f0a6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " people: [\n", + " { name: \u001b[32m\"Jeff\"\u001b[39m, hair_color: \u001b[32m\"black\"\u001b[39m, height_in_meters: \u001b[33m1.83\u001b[39m },\n", + " { name: \u001b[32m\"Anna\"\u001b[39m, hair_color: \u001b[32m\"black\"\u001b[39m, height_in_meters: \u001b[1mnull\u001b[22m }\n", + " ]\n", + "}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const peopleExtractionChain = prompt.pipe(llm.withStructuredOutput(dataSchema))\n", + "const text3 = \"My name is Jeff, my hair is black and i am 6 feet tall. Anna has the same color hair as me.\"\n", + "await peopleExtractionChain.invoke({ text: text3 })" + ] + }, + { + "cell_type": "markdown", + "id": "fba1d770-bf4d-4de4-9e4f-7384872ef0dc", + "metadata": {}, + "source": [ + ":::{.callout-tip}\n", + "When the schema accommodates the extraction of **multiple entities**, it also allows the model to extract **no entities** if no relevant information\n", + "is in the text by providing an empty list. \n", + "\n", + "This is usually a **good** thing! It allows specifying **required** attributes on an entity without necessarily forcing the model to detect this entity.\n", + ":::\n", + "\n", + "We can see the LangSmith trace [here](https://smith.langchain.com/public/272096ab-9ac5-43f9-aa00-3b8443477d17/r)" + ] + }, + { + "cell_type": "markdown", + "id": "f07a7455-7de6-4a6f-9772-0477ef65e3dc", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "Now that you understand the basics of extraction with LangChain, you're ready to proceed to the rest of the how-to guides:\n", + "\n", + "- [Add Examples](/docs/how_to/extraction_examples): Learn how to use **reference examples** to improve performance.\n", + "- [Handle Long Text](/docs/how_to/extraction_long_text): What should you do if the text does not fit into the context window of the LLM?\n", + "- [Use a Parsing Approach](/docs/how_to/extraction_parse): Use a prompt based approach to extract with models that do not support **tool/function calling**." ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "const peopleExtractionChain = prompt.pipe(llm.withStructuredOutput(dataSchema))\n", - "const text3 = \"My name is Jeff, my hair is black and i am 6 feet tall. Anna has the same color hair as me.\"\n", - "await peopleExtractionChain.invoke({ text: text3 })" - ] - }, - { - "cell_type": "markdown", - "id": "fba1d770-bf4d-4de4-9e4f-7384872ef0dc", - "metadata": {}, - "source": [ - ":::{.callout-tip}\n", - "When the schema accommodates the extraction of **multiple entities**, it also allows the model to extract **no entities** if no relevant information\n", - "is in the text by providing an empty list. \n", - "\n", - "This is usually a **good** thing! It allows specifying **required** attributes on an entity without necessarily forcing the model to detect this entity.\n", - ":::\n", - "\n", - "We can see the LangSmith trace [here](https://smith.langchain.com/public/272096ab-9ac5-43f9-aa00-3b8443477d17/r)" - ] - }, - { - "cell_type": "markdown", - "id": "f07a7455-7de6-4a6f-9772-0477ef65e3dc", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "Now that you understand the basics of extraction with LangChain, you're ready to proceed to the rest of the how-to guides:\n", - "\n", - "- [Add Examples](/docs/how_to/extraction_examples): Learn how to use **reference examples** to improve performance.\n", - "- [Handle Long Text](/docs/how_to/extraction_long_text): What should you do if the text does not fit into the context window of the LLM?\n", - "- [Use a Parsing Approach](/docs/how_to/extraction_parse): Use a prompt based approach to extract with models that do not support **tool/function calling**." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/tutorials/llm_chain.ipynb b/docs/core_docs/docs/tutorials/llm_chain.ipynb index b7d1a06a225b..cbb1321a4e13 100644 --- a/docs/core_docs/docs/tutorials/llm_chain.ipynb +++ b/docs/core_docs/docs/tutorials/llm_chain.ipynb @@ -1,575 +1,575 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "63ee3f93", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 0\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9316da0d", - "metadata": {}, - "source": [ - "# Build a Simple LLM Application with LCEL\n", - "\n", - "In this quickstart we'll show you how to build a simple LLM application with LangChain. This application will translate text from English into another language. This is a relatively simple LLM application - it's just a single LLM call plus some prompting. Still, this is a great way to get started with LangChain - a lot of features can be built with just some prompting and an LLM call!\n", - "\n", - "After reading this tutorial, you'll have a high level overview of:\n", - "\n", - "- Using [language models](/docs/concepts/#chat-models)\n", - "\n", - "- Using [PromptTemplates](/docs/concepts/#prompt-templates) and [OutputParsers](/docs/concepts/#output-parsers)\n", - "\n", - "- Using [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language) to chain components together\n", - "\n", - "- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n", - "\n", - "Let's dive in!\n", - "\n", - "## Setup\n", - "\n", - "### Installation\n", - "\n", - "To install LangChain run:\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from '@theme/Npm2Yarn';\n", - "import TabItem from '@theme/TabItem';\n", - "import CodeBlock from \"@theme/CodeBlock\";\n", - "\n", - "\n", - " langchain @langchain/core\n", - "\n", - "```\n", - "\n", - "\n", - "For more details, see our [Installation guide](/docs/how_to/installation/).\n", - "\n", - "### LangSmith\n", - "\n", - "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n", - "As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n", - "The best way to do this is with [LangSmith](https://smith.langchain.com).\n", - "\n", - "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", - "\n", - "```shell\n", - "export LANGCHAIN_TRACING_V2=\"true\"\n", - "export LANGCHAIN_API_KEY=\"...\"\n", - "\n", - "# Reduce tracing latency if you are not in a serverless environment\n", - "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "e5558ca9", - "metadata": {}, - "source": [ - "## Using Language Models\n", - "\n", - "First up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "613634d1", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "import { ChatOpenAI } from '@langchain/openai';\n", - "\n", - "const model = new ChatOpenAI({\n", - " model: \"gpt-4o\",\n", - " temperature: 0,\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "ca5642ff", - "metadata": {}, - "source": [ - "Let's first use the model directly. `ChatModel`s are instances of LangChain \"Runnables\", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "1b2481f0", - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"ciao!\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"ciao!\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m3\u001b[39m, promptTokens: \u001b[33m20\u001b[39m, totalTokens: \u001b[33m23\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - "}" + "cell_type": "raw", + "id": "63ee3f93", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 0\n", + "---" ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import { HumanMessage, SystemMessage } from \"@langchain/core/messages\"\n", - "\n", - "const messages = [\n", - " new SystemMessage(\"Translate the following from English into Italian\"),\n", - " new HumanMessage(\"hi!\"),\n", - "];\n", - "\n", - "await model.invoke(messages)" - ] - }, - { - "cell_type": "markdown", - "id": "f83373db", - "metadata": {}, - "source": [ - "If we've enable LangSmith, we can see that this run is logged to LangSmith, and can see the [LangSmith trace](https://smith.langchain.com/public/45f1a650-38fb-41e1-9b61-becc0684f2ce/r)" - ] - }, - { - "cell_type": "markdown", - "id": "32bd03ed", - "metadata": {}, - "source": [ - "## OutputParsers\n", - "\n", - "Notice that the response from the model is an `AIMessage`. This contains a string response along with other metadata about the response. Oftentimes we may just want to work with the string response. We can parse out just this response by using a simple output parser.\n", - "\n", - "We first import the simple output parser." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d7ae9c58", - "metadata": {}, - "outputs": [], - "source": [ - "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", - "\n", - "const parser = new StringOutputParser();" - ] - }, - { - "cell_type": "markdown", - "id": "eaebe33a", - "metadata": {}, - "source": [ - "One way to use it is to use it by itself. For example, we could save the result of the language model call and then pass it to the parser." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "6bacb837", - "metadata": {}, - "outputs": [], - "source": [ - "const result = await model.invoke(messages)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "efb8da87", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "\u001b[32m\"ciao!\"\u001b[39m" + "cell_type": "markdown", + "id": "9316da0d", + "metadata": {}, + "source": [ + "# Build a Simple LLM Application with LCEL\n", + "\n", + "In this quickstart we'll show you how to build a simple LLM application with LangChain. This application will translate text from English into another language. This is a relatively simple LLM application - it's just a single LLM call plus some prompting. Still, this is a great way to get started with LangChain - a lot of features can be built with just some prompting and an LLM call!\n", + "\n", + "After reading this tutorial, you'll have a high level overview of:\n", + "\n", + "- Using [language models](/docs/concepts/chat_models)\n", + "\n", + "- Using [PromptTemplates](/docs/concepts/prompt_templates) and [OutputParsers](/docs/concepts/output_parsers)\n", + "\n", + "- Using [LangChain Expression Language (LCEL)](/docs/concepts/lcel) to chain components together\n", + "\n", + "- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n", + "\n", + "Let's dive in!\n", + "\n", + "## Setup\n", + "\n", + "### Installation\n", + "\n", + "To install LangChain run:\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from '@theme/Npm2Yarn';\n", + "import TabItem from '@theme/TabItem';\n", + "import CodeBlock from \"@theme/CodeBlock\";\n", + "\n", + "\n", + " langchain @langchain/core\n", + "\n", + "```\n", + "\n", + "\n", + "For more details, see our [Installation guide](/docs/how_to/installation/).\n", + "\n", + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n", + "As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n", + "The best way to do this is with [LangSmith](https://smith.langchain.com).\n", + "\n", + "After you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "```shell\n", + "export LANGCHAIN_TRACING_V2=\"true\"\n", + "export LANGCHAIN_API_KEY=\"...\"\n", + "\n", + "# Reduce tracing latency if you are not in a serverless environment\n", + "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", + "```" ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await parser.invoke(result)" - ] - }, - { - "cell_type": "markdown", - "id": "d508b79d", - "metadata": {}, - "source": [ - "## Chaining together components with LCEL\n", - "\n", - "We can also \"chain\" the model to the output parser. This means this output parser will get called with the output from the model. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string).\n", - "\n", - "We can create the chain using the `.pipe()` method. The `.pipe()` method is used in LangChain to combine two elements together." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "9449cfa6", - "metadata": {}, - "outputs": [], - "source": [ - "const chain = model.pipe(parser);" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "3e82f933", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "\u001b[32m\"Ciao!\"\u001b[39m" + "cell_type": "markdown", + "id": "e5558ca9", + "metadata": {}, + "source": [ + "## Using Language Models\n", + "\n", + "First up, let's learn how to use a language model by itself. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "await chain.invoke(messages)" - ] - }, - { - "cell_type": "markdown", - "id": "dd009096", - "metadata": {}, - "source": [ - "This is a simple example of using [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language) to chain together LangChain modules. There are several benefits to this approach, including optimized streaming and tracing support.\n", - "\n", - "If we now look at LangSmith, we can see that the chain has two steps: first the language model is called, then the result of that is passed to the output parser. We can see the [LangSmith trace](https://smith.langchain.com/public/05bec1c1-fc51-4b2c-ab3b-4b63709e4462/r)" - ] - }, - { - "cell_type": "markdown", - "id": "1ab8da31", - "metadata": {}, - "source": [ - "## Prompt Templates\n", - "\n", - "Right now we are passing a list of messages directly into the language model. Where does this list of messages come from? Usually it is constructed from a combination of user input and application logic. This application logic usually takes the raw user input and transforms it into a list of messages ready to pass to the language model. Common transformations include adding a system message or formatting a template with the user input.\n", - "\n", - "PromptTemplates are a concept in LangChain designed to assist with this transformation. They take in raw user input and return data (a prompt) that is ready to pass into a language model. \n", - "\n", - "Let's create a PromptTemplate here. It will take in two user variables:\n", - "\n", - "- `language`: The language to translate text into\n", - "- `text`: The text to translate" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "3e73cc20", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatPromptTemplate } from \"@langchain/core/prompts\"" - ] - }, - { - "cell_type": "markdown", - "id": "7e876c2a", - "metadata": {}, - "source": [ - "First, let's create a string that we will format to be the system message:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "fd75ecde", - "metadata": {}, - "outputs": [], - "source": [ - "const systemTemplate = \"Translate the following into {language}:\"" - ] - }, - { - "cell_type": "markdown", - "id": "fedf6f13", - "metadata": {}, - "source": [ - "Next, we can create the PromptTemplate. This will be a combination of the `systemTemplate` as well as a simpler template for where to put the text" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "88e566f3", - "metadata": {}, - "outputs": [], - "source": [ - "const promptTemplate = ChatPromptTemplate.fromMessages(\n", - " [\n", - " [\"system\", systemTemplate],\n", - " [\"user\", \"{text}\"]\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "d9711ba6", - "metadata": {}, - "source": [ - "The input to this prompt template is a dictionary. We can play around with this prompt template by itself to see what it does by itself" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "f781b3cb", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "ChatPromptValue {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " messages: [\n", - " SystemMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: { content: \u001b[32m\"hi\"\u001b[39m, additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"hi\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ]\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompt_values\"\u001b[39m ],\n", - " messages: [\n", - " SystemMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: { content: \u001b[32m\"hi\"\u001b[39m, additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"hi\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ]\n", - "}" + "cell_type": "code", + "execution_count": null, + "id": "613634d1", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "import { ChatOpenAI } from '@langchain/openai';\n", + "\n", + "const model = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + "})" ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "const promptValue = await promptTemplate.invoke({ language: \"italian\", text: \"hi\" })\n", - "\n", - "promptValue" - ] - }, - { - "cell_type": "markdown", - "id": "1a49ba9e", - "metadata": {}, - "source": [ - "We can see that it returns a `ChatPromptValue` that consists of two messages. If we want to access the messages directly we do:" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "2159b619", - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "[\n", - " SystemMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " HumanMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: { content: \u001b[32m\"hi\"\u001b[39m, additional_kwargs: {}, response_metadata: {} },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"hi\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - "]" + "cell_type": "markdown", + "id": "ca5642ff", + "metadata": {}, + "source": [ + "Let's first use the model directly. `ChatModel`s are instances of LangChain \"Runnables\", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method." ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "promptValue.toChatMessages()" - ] - }, - { - "cell_type": "markdown", - "id": "5a4267a8", - "metadata": {}, - "source": [ - "We can now combine this with the model and the output parser from above. This will chain all three components together." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "6c6beb4b", - "metadata": {}, - "outputs": [], - "source": [ - "const llmChain = promptTemplate.pipe(model).pipe(parser);" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "3e45595a", - "metadata": {}, - "outputs": [ + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "1b2481f0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"ciao!\"\u001b[39m,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"ciao!\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: \u001b[33m3\u001b[39m, promptTokens: \u001b[33m20\u001b[39m, totalTokens: \u001b[33m23\u001b[39m },\n", + " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", + " },\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + "}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { HumanMessage, SystemMessage } from \"@langchain/core/messages\"\n", + "\n", + "const messages = [\n", + " new SystemMessage(\"Translate the following from English into Italian\"),\n", + " new HumanMessage(\"hi!\"),\n", + "];\n", + "\n", + "await model.invoke(messages)" + ] + }, + { + "cell_type": "markdown", + "id": "f83373db", + "metadata": {}, + "source": [ + "If we've enable LangSmith, we can see that this run is logged to LangSmith, and can see the [LangSmith trace](https://smith.langchain.com/public/45f1a650-38fb-41e1-9b61-becc0684f2ce/r)" + ] + }, + { + "cell_type": "markdown", + "id": "32bd03ed", + "metadata": {}, + "source": [ + "## OutputParsers\n", + "\n", + "Notice that the response from the model is an `AIMessage`. This contains a string response along with other metadata about the response. Oftentimes we may just want to work with the string response. We can parse out just this response by using a simple output parser.\n", + "\n", + "We first import the simple output parser." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d7ae9c58", + "metadata": {}, + "outputs": [], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "const parser = new StringOutputParser();" + ] + }, + { + "cell_type": "markdown", + "id": "eaebe33a", + "metadata": {}, + "source": [ + "One way to use it is to use it by itself. For example, we could save the result of the language model call and then pass it to the parser." + ] + }, { - "data": { - "text/plain": [ - "\u001b[32m\"ciao\"\u001b[39m" + "cell_type": "code", + "execution_count": 3, + "id": "6bacb837", + "metadata": {}, + "outputs": [], + "source": [ + "const result = await model.invoke(messages)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "efb8da87", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"ciao!\"\u001b[39m" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await parser.invoke(result)" + ] + }, + { + "cell_type": "markdown", + "id": "d508b79d", + "metadata": {}, + "source": [ + "## Chaining together components with LCEL\n", + "\n", + "We can also \"chain\" the model to the output parser. This means this output parser will get called with the output from the model. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string).\n", + "\n", + "We can create the chain using the `.pipe()` method. The `.pipe()` method is used in LangChain to combine two elements together." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "9449cfa6", + "metadata": {}, + "outputs": [], + "source": [ + "const chain = model.pipe(parser);" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "3e82f933", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Ciao!\"\u001b[39m" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chain.invoke(messages)" + ] + }, + { + "cell_type": "markdown", + "id": "dd009096", + "metadata": {}, + "source": [ + "This is a simple example of using [LangChain Expression Language (LCEL)](/docs/concepts/lcel) to chain together LangChain modules. There are several benefits to this approach, including optimized streaming and tracing support.\n", + "\n", + "If we now look at LangSmith, we can see that the chain has two steps: first the language model is called, then the result of that is passed to the output parser. We can see the [LangSmith trace](https://smith.langchain.com/public/05bec1c1-fc51-4b2c-ab3b-4b63709e4462/r)" + ] + }, + { + "cell_type": "markdown", + "id": "1ab8da31", + "metadata": {}, + "source": [ + "## Prompt Templates\n", + "\n", + "Right now we are passing a list of messages directly into the language model. Where does this list of messages come from? Usually it is constructed from a combination of user input and application logic. This application logic usually takes the raw user input and transforms it into a list of messages ready to pass to the language model. Common transformations include adding a system message or formatting a template with the user input.\n", + "\n", + "PromptTemplates are a concept in LangChain designed to assist with this transformation. They take in raw user input and return data (a prompt) that is ready to pass into a language model. \n", + "\n", + "Let's create a PromptTemplate here. It will take in two user variables:\n", + "\n", + "- `language`: The language to translate text into\n", + "- `text`: The text to translate" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "3e73cc20", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"" + ] + }, + { + "cell_type": "markdown", + "id": "7e876c2a", + "metadata": {}, + "source": [ + "First, let's create a string that we will format to be the system message:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "fd75ecde", + "metadata": {}, + "outputs": [], + "source": [ + "const systemTemplate = \"Translate the following into {language}:\"" + ] + }, + { + "cell_type": "markdown", + "id": "fedf6f13", + "metadata": {}, + "source": [ + "Next, we can create the PromptTemplate. This will be a combination of the `systemTemplate` as well as a simpler template for where to put the text" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "88e566f3", + "metadata": {}, + "outputs": [], + "source": [ + "const promptTemplate = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\"system\", systemTemplate],\n", + " [\"user\", \"{text}\"]\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d9711ba6", + "metadata": {}, + "source": [ + "The input to this prompt template is a dictionary. We can play around with this prompt template by itself to see what it does by itself" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f781b3cb", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ChatPromptValue {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " messages: [\n", + " SystemMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: { content: \u001b[32m\"hi\"\u001b[39m, additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"hi\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " }\n", + " ]\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompt_values\"\u001b[39m ],\n", + " messages: [\n", + " SystemMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: { content: \u001b[32m\"hi\"\u001b[39m, additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"hi\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const promptValue = await promptTemplate.invoke({ language: \"italian\", text: \"hi\" })\n", + "\n", + "promptValue" + ] + }, + { + "cell_type": "markdown", + "id": "1a49ba9e", + "metadata": {}, + "source": [ + "We can see that it returns a `ChatPromptValue` that consists of two messages. If we want to access the messages directly we do:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "2159b619", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " SystemMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"Translate the following into italian:\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " },\n", + " HumanMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: { content: \u001b[32m\"hi\"\u001b[39m, additional_kwargs: {}, response_metadata: {} },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"hi\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: {}\n", + " }\n", + "]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "promptValue.toChatMessages()" + ] + }, + { + "cell_type": "markdown", + "id": "5a4267a8", + "metadata": {}, + "source": [ + "We can now combine this with the model and the output parser from above. This will chain all three components together." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "6c6beb4b", + "metadata": {}, + "outputs": [], + "source": [ + "const llmChain = promptTemplate.pipe(model).pipe(parser);" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "3e45595a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"ciao\"\u001b[39m" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await llmChain.invoke({ language: \"italian\", text: \"hi\" })" + ] + }, + { + "cell_type": "markdown", + "id": "0b19cecb", + "metadata": {}, + "source": [ + "If we take a look at the LangSmith trace, we can see all three components show up in the [LangSmith trace](https://smith.langchain.com/public/cef6edcd-39ed-4c1e-86f7-491a1b611aeb/r)" + ] + }, + { + "cell_type": "markdown", + "id": "befdb168", + "metadata": {}, + "source": [ + "## Conclusion\n", + "\n", + "That's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to parse their outputs, how to create a prompt template, chaining them together with LCEL, and how to get great observability into chains you create with LangSmith.\n", + "\n", + "This just scratches the surface of what you will want to learn to become a proficient AI Engineer. Luckily - we've got a lot of other resources!\n", + "\n", + "For further reading on the core concepts of LangChain, we've got detailed [Conceptual Guides](/docs/concepts).\n", + "\n", + "If you have more specific questions on these concepts, check out the following sections of the how-to guides:\n", + "\n", + "- [LangChain Expression Language (LCEL)](/docs/how_to/#langchain-expression-language)\n", + "- [Prompt templates](/docs/how_to/#prompt-templates)\n", + "- [Chat models](/docs/how_to/#chat-models)\n", + "- [Output parsers](/docs/how_to/#output-parsers)\n", + "\n", + "And the LangSmith docs:\n", + "\n", + "- [LangSmith](https://docs.smith.langchain.com)" ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" } - ], - "source": [ - "await llmChain.invoke({ language: \"italian\", text: \"hi\" })" - ] - }, - { - "cell_type": "markdown", - "id": "0b19cecb", - "metadata": {}, - "source": [ - "If we take a look at the LangSmith trace, we can see all three components show up in the [LangSmith trace](https://smith.langchain.com/public/cef6edcd-39ed-4c1e-86f7-491a1b611aeb/r)" - ] - }, - { - "cell_type": "markdown", - "id": "befdb168", - "metadata": {}, - "source": [ - "## Conclusion\n", - "\n", - "That's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to parse their outputs, how to create a prompt template, chaining them together with LCEL, and how to get great observability into chains you create with LangSmith.\n", - "\n", - "This just scratches the surface of what you will want to learn to become a proficient AI Engineer. Luckily - we've got a lot of other resources!\n", - "\n", - "For further reading on the core concepts of LangChain, we've got detailed [Conceptual Guides](/docs/concepts).\n", - "\n", - "If you have more specific questions on these concepts, check out the following sections of the how-to guides:\n", - "\n", - "- [LangChain Expression Language (LCEL)](/docs/how_to/#langchain-expression-language)\n", - "- [Prompt templates](/docs/how_to/#prompt-templates)\n", - "- [Chat models](/docs/how_to/#chat-models)\n", - "- [Output parsers](/docs/how_to/#output-parsers)\n", - "\n", - "And the LangSmith docs:\n", - "\n", - "- [LangSmith](https://docs.smith.langchain.com)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Deno", - "language": "typescript", - "name": "deno" + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } }, - "language_info": { - "file_extension": ".ts", - "mimetype": "text/x.typescript", - "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.3.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/tutorials/pdf_qa.ipynb b/docs/core_docs/docs/tutorials/pdf_qa.ipynb index c09bdaf6f8fb..6408a3bb669e 100644 --- a/docs/core_docs/docs/tutorials/pdf_qa.ipynb +++ b/docs/core_docs/docs/tutorials/pdf_qa.ipynb @@ -1,420 +1,420 @@ { - "cells": [ - { - "cell_type": "raw", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "keywords: [pdf, document loader]\n", - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Build a PDF ingestion and Question/Answering system\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Document loaders](/docs/concepts/#document-loaders)\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Embeddings](/docs/concepts/#embedding-models)\n", - "- [Vector stores](/docs/concepts/#vector-stores)\n", - "- [Retrieval-augmented generation](/docs/tutorials/rag/)\n", - "\n", - ":::\n", - "\n", - "PDF files often hold crucial unstructured data unavailable from other sources. They can be quite lengthy, and unlike plain text files, cannot generally be fed directly into the prompt of a language model.\n", - "\n", - "In this tutorial, you'll create a system that can answer questions about PDF files. More specifically, you'll use a [Document Loader](/docs/concepts/#document-loaders) to load text in a format usable by an LLM, then build a retrieval-augmented generation (RAG) pipeline to answer questions, including citations from the source material.\n", - "\n", - "This tutorial will gloss over some concepts more deeply covered in our [RAG](/docs/tutorials/rag/) tutorial, so you may want to go through those first if you haven't already.\n", - "\n", - "Let's dive in!\n", - "\n", - "## Loading documents\n", - "\n", - "First, you'll need to choose a PDF to load. We'll use a document from [Nike's annual public SEC report](https://s1.q4cdn.com/806093406/files/doc_downloads/2023/414759-1-_5_Nike-NPS-Combo_Form-10-K_WR.pdf). It's over 100 pages long, and contains some crucial data mixed with longer explanatory text. However, you can feel free to use a PDF of your choosing.\n", - "\n", - "Once you've chosen your PDF, the next step is to load it into a format that an LLM can more easily handle, since LLMs generally require text inputs. LangChain has a few different [built-in document loaders](/docs/how_to/document_loader_pdf/) for this purpose which you can experiment with. Below, we'll use one powered by the [`pdf-parse`](https://www.npmjs.com/package/pdf-parse) package that reads from a filepath:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "107\n" - ] - } - ], - "source": [ - "import \"pdf-parse\"; // Peer dep\n", - "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", - "\n", - "const loader = new PDFLoader(\"../../data/nke-10k-2023.pdf\");\n", - "\n", - "const docs = await loader.load();\n", - "\n", - "console.log(docs.length);" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "keywords: [pdf, document loader]\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Table of Contents\n", - "UNITED STATES\n", - "SECURITIES AND EXCHANGE COMMISSION\n", - "Washington, D.C. 20549\n", - "FORM 10-K\n", - "\n", - "{\n", - " source: '../../data/nke-10k-2023.pdf',\n", - " pdf: {\n", - " version: '1.10.100',\n", - " info: {\n", - " PDFFormatVersion: '1.4',\n", - " IsAcroFormPresent: false,\n", - " IsXFAPresent: false,\n", - " Title: '0000320187-23-000039',\n", - " Author: 'EDGAR Online, a division of Donnelley Financial Solutions',\n", - " Subject: 'Form 10-K filed on 2023-07-20 for the period ending 2023-05-31',\n", - " Keywords: '0000320187-23-000039; ; 10-K',\n", - " Creator: 'EDGAR Filing HTML Converter',\n", - " Producer: 'EDGRpdf Service w/ EO.Pdf 22.0.40.0',\n", - " CreationDate: \"D:20230720162200-04'00'\",\n", - " ModDate: \"D:20230720162208-04'00'\"\n", - " },\n", - " metadata: null,\n", - " totalPages: 107\n", - " },\n", - " loc: { pageNumber: 1 }\n", - "}\n" - ] - } - ], - "source": [ - "console.log(docs[0].pageContent.slice(0, 100));\n", - "console.log(docs[0].metadata)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "So what just happened?\n", - "\n", - "- The loader reads the PDF at the specified path into memory.\n", - "- It then extracts text data using the `pdf-parse` package.\n", - "- Finally, it creates a LangChain [Document](/docs/concepts/#documents) for each page of the PDF with the page's content and some metadata about where in the document the text came from.\n", - "\n", - "LangChain has [many other document loaders](/docs/integrations/document_loaders/) for other data sources, or you can create a [custom document loader](/docs/how_to/document_loader_custom/).\n", - "\n", - "## Question answering with RAG\n", - "\n", - "Next, you'll prepare the loaded documents for later retrieval. Using a [text splitter](/docs/concepts/#text-splitters), you'll split your loaded documents into smaller documents that can more easily fit into an LLM's context window, then load them into a [vector store](/docs/concepts/#vectorstores). You can then create a [retriever](/docs/concepts/#retrievers) from the vector store for use in our RAG chain:\n", - "\n", - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", - "\n", - "const textSplitter = new RecursiveCharacterTextSplitter({\n", - " chunkSize: 1000,\n", - " chunkOverlap: 200,\n", - "});\n", - "\n", - "const splits = await textSplitter.splitDocuments(docs);\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", - "\n", - "const retriever = vectorstore.asRetriever();" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, you'll use some built-in helpers to construct the final `ragChain`:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Build a PDF ingestion and Question/Answering system\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Document loaders](/docs/concepts/document_loaders)\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Embeddings](/docs/concepts/embedding_models)\n", + "- [Vector stores](/docs/concepts/#vector-stores)\n", + "- [Retrieval-augmented generation](/docs/tutorials/rag/)\n", + "\n", + ":::\n", + "\n", + "PDF files often hold crucial unstructured data unavailable from other sources. They can be quite lengthy, and unlike plain text files, cannot generally be fed directly into the prompt of a language model.\n", + "\n", + "In this tutorial, you'll create a system that can answer questions about PDF files. More specifically, you'll use a [Document Loader](/docs/concepts/document_loaders) to load text in a format usable by an LLM, then build a retrieval-augmented generation (RAG) pipeline to answer questions, including citations from the source material.\n", + "\n", + "This tutorial will gloss over some concepts more deeply covered in our [RAG](/docs/tutorials/rag/) tutorial, so you may want to go through those first if you haven't already.\n", + "\n", + "Let's dive in!\n", + "\n", + "## Loading documents\n", + "\n", + "First, you'll need to choose a PDF to load. We'll use a document from [Nike's annual public SEC report](https://s1.q4cdn.com/806093406/files/doc_downloads/2023/414759-1-_5_Nike-NPS-Combo_Form-10-K_WR.pdf). It's over 100 pages long, and contains some crucial data mixed with longer explanatory text. However, you can feel free to use a PDF of your choosing.\n", + "\n", + "Once you've chosen your PDF, the next step is to load it into a format that an LLM can more easily handle, since LLMs generally require text inputs. LangChain has a few different [built-in document loaders](/docs/how_to/document_loader_pdf/) for this purpose which you can experiment with. Below, we'll use one powered by the [`pdf-parse`](https://www.npmjs.com/package/pdf-parse) package that reads from a filepath:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " input: \"What was Nike's revenue in 2023?\",\n", - " chat_history: [],\n", - " context: [\n", - " Document {\n", - " pageContent: 'Enterprise Resource Planning Platform, data and analytics, demand sensing, insight gathering, and other areas to create an end-to-end technology foundation, which we\\n' +\n", - " 'believe will further accelerate our digital transformation. We believe this unified approach will accelerate growth and unlock more efficiency for our business, while driving\\n' +\n", - " 'speed and responsiveness as we serve consumers globally.\\n' +\n", - " 'FINANCIAL HIGHLIGHTS\\n' +\n", - " '•In fiscal 2023, NIKE, Inc. achieved record Revenues of $51.2 billion, which increased 10% and 16% on a reported and currency-neutral basis, respectively\\n' +\n", - " '•NIKE Direct revenues grew 14% from $18.7 billion in fiscal 2022 to $21.3 billion in fiscal 2023, and represented approximately 44% of total NIKE Brand revenues for\\n' +\n", - " 'fiscal 2023\\n' +\n", - " '•Gross margin for the fiscal year decreased 250 basis points to 43.5% primarily driven by higher product costs, higher markdowns and unfavorable changes in foreign\\n' +\n", - " 'currency exchange rates, partially offset by strategic pricing actions',\n", - " metadata: [Object]\n", - " },\n", - " Document {\n", - " pageContent: 'Table of Contents\\n' +\n", - " 'FISCAL 2023 NIKE BRAND REVENUE HIGHLIGHTS\\n' +\n", - " 'The following tables present NIKE Brand revenues disaggregated by reportable operating segment, distribution channel and major product line:\\n' +\n", - " 'FISCAL 2023 COMPARED TO FISCAL 2022\\n' +\n", - " '•NIKE, Inc. Revenues were $51.2 billion in fiscal 2023, which increased 10% and 16% compared to fiscal 2022 on a reported and currency-neutral basis, respectively.\\n' +\n", - " 'The increase was due to higher revenues in North America, Europe, Middle East & Africa (\"EMEA\"), APLA and Greater China, which contributed approximately 7, 6,\\n' +\n", - " '2 and 1 percentage points to NIKE, Inc. Revenues, respectively.\\n' +\n", - " '•NIKE Brand revenues, which represented over 90% of NIKE, Inc. Revenues, increased 10% and 16% on a reported and currency-neutral basis, respectively. This\\n' +\n", - " \"increase was primarily due to higher revenues in Men's, the Jordan Brand, Women's and Kids' which grew 17%, 35%,11% and 10%, respectively, on a wholesale\\n\" +\n", - " 'equivalent basis.',\n", - " metadata: [Object]\n", - " },\n", - " Document {\n", - " pageContent: 'Table of Contents\\n' +\n", - " 'EUROPE, MIDDLE EAST & AFRICA\\n' +\n", - " '(Dollars in millions)\\n' +\n", - " 'FISCAL 2023FISCAL 2022% CHANGE\\n' +\n", - " '% CHANGE\\n' +\n", - " 'EXCLUDING\\n' +\n", - " 'CURRENCY\\n' +\n", - " 'CHANGESFISCAL 2021% CHANGE\\n' +\n", - " '% CHANGE\\n' +\n", - " 'EXCLUDING\\n' +\n", - " 'CURRENCY\\n' +\n", - " 'CHANGES\\n' +\n", - " 'Revenues by:\\n' +\n", - " 'Footwear$8,260 $7,388 12 %25 %$6,970 6 %9 %\\n' +\n", - " 'Apparel4,566 4,527 1 %14 %3,996 13 %16 %\\n' +\n", - " 'Equipment592 564 5 %18 %490 15 %17 %\\n' +\n", - " 'TOTAL REVENUES$13,418 $12,479 8 %21 %$11,456 9 %12 %\\n' +\n", - " 'Revenues by: \\n' +\n", - " 'Sales to Wholesale Customers$8,522 $8,377 2 %15 %$7,812 7 %10 %\\n' +\n", - " 'Sales through NIKE Direct4,896 4,102 19 %33 %3,644 13 %15 %\\n' +\n", - " 'TOTAL REVENUES$13,418 $12,479 8 %21 %$11,456 9 %12 %\\n' +\n", - " 'EARNINGS BEFORE INTEREST AND TAXES$3,531 $3,293 7 %$2,435 35 % \\n' +\n", - " 'FISCAL 2023 COMPARED TO FISCAL 2022\\n' +\n", - " \"•EMEA revenues increased 21% on a currency-neutral basis, due to higher revenues in Men's, the Jordan Brand, Women's and Kids'. NIKE Direct revenues\\n\" +\n", - " 'increased 33%, driven primarily by strong digital sales growth of 43% and comparable store sales growth of 22%.',\n", - " metadata: [Object]\n", - " },\n", - " Document {\n", - " pageContent: 'Table of Contents\\n' +\n", - " 'NORTH AMERICA\\n' +\n", - " '(Dollars in millions)\\n' +\n", - " 'FISCAL 2023FISCAL 2022% CHANGE\\n' +\n", - " '% CHANGE\\n' +\n", - " 'EXCLUDING\\n' +\n", - " 'CURRENCY\\n' +\n", - " 'CHANGESFISCAL 2021% CHANGE\\n' +\n", - " '% CHANGE\\n' +\n", - " 'EXCLUDING\\n' +\n", - " 'CURRENCY\\n' +\n", - " 'CHANGES\\n' +\n", - " 'Revenues by:\\n' +\n", - " 'Footwear$14,897 $12,228 22 %22 %$11,644 5 %5 %\\n' +\n", - " 'Apparel5,947 5,492 8 %9 %5,028 9 %9 %\\n' +\n", - " 'Equipment764 633 21 %21 %507 25 %25 %\\n' +\n", - " 'TOTAL REVENUES$21,608 $18,353 18 %18 %$17,179 7 %7 %\\n' +\n", - " 'Revenues by: \\n' +\n", - " 'Sales to Wholesale Customers$11,273 $9,621 17 %18 %$10,186 -6 %-6 %\\n' +\n", - " 'Sales through NIKE Direct10,335 8,732 18 %18 %6,993 25 %25 %\\n' +\n", - " 'TOTAL REVENUES$21,608 $18,353 18 %18 %$17,179 7 %7 %\\n' +\n", - " 'EARNINGS BEFORE INTEREST AND TAXES$5,454 $5,114 7 %$5,089 0 %\\n' +\n", - " 'FISCAL 2023 COMPARED TO FISCAL 2022\\n' +\n", - " \"•North America revenues increased 18% on a currency-neutral basis, primarily due to higher revenues in Men's and the Jordan Brand. NIKE Direct revenues\\n\" +\n", - " 'increased 18%, driven by strong digital sales growth of 23%, comparable store sales growth of 9% and the addition of new stores.',\n", - " metadata: [Object]\n", - " }\n", - " ],\n", - " answer: 'According to the financial highlights, Nike, Inc. achieved record revenues of $51.2 billion in fiscal 2023, which increased 10% on a reported basis and 16% on a currency-neutral basis compared to fiscal 2022.'\n", - "}\n" - ] - } - ], - "source": [ - "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", - "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "\n", - "const systemTemplate = [\n", - " `You are an assistant for question-answering tasks. `,\n", - " `Use the following pieces of retrieved context to answer `,\n", - " `the question. If you don't know the answer, say that you `,\n", - " `don't know. Use three sentences maximum and keep the `,\n", - " `answer concise.`,\n", - " `\\n\\n`,\n", - " `{context}`,\n", - "].join(\"\");\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", systemTemplate],\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "const questionAnswerChain = await createStuffDocumentsChain({ llm, prompt });\n", - "const ragChain = await createRetrievalChain({ retriever, combineDocsChain: questionAnswerChain });\n", - "\n", - "const results = await ragChain.invoke({\n", - " input: \"What was Nike's revenue in 2023?\",\n", - "});\n", - "\n", - "console.log(results);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see that you get both a final answer in the `answer` key of the results object, and the `context` the LLM used to generate an answer.\n", - "\n", - "Examining the values under the `context` further, you can see that they are documents that each contain a chunk of the ingested page content. Usefully, these documents also preserve the original metadata from way back when you first loaded them:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "107\n" + ] + } + ], + "source": [ + "import \"pdf-parse\"; // Peer dep\n", + "import { PDFLoader } from \"@langchain/community/document_loaders/fs/pdf\";\n", + "\n", + "const loader = new PDFLoader(\"../../data/nke-10k-2023.pdf\");\n", + "\n", + "const docs = await loader.load();\n", + "\n", + "console.log(docs.length);" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Enterprise Resource Planning Platform, data and analytics, demand sensing, insight gathering, and other areas to create an end-to-end technology foundation, which we\n", - "believe will further accelerate our digital transformation. We believe this unified approach will accelerate growth and unlock more efficiency for our business, while driving\n", - "speed and responsiveness as we serve consumers globally.\n", - "FINANCIAL HIGHLIGHTS\n", - "•In fiscal 2023, NIKE, Inc. achieved record Revenues of $51.2 billion, which increased 10% and 16% on a reported and currency-neutral basis, respectively\n", - "•NIKE Direct revenues grew 14% from $18.7 billion in fiscal 2022 to $21.3 billion in fiscal 2023, and represented approximately 44% of total NIKE Brand revenues for\n", - "fiscal 2023\n", - "•Gross margin for the fiscal year decreased 250 basis points to 43.5% primarily driven by higher product costs, higher markdowns and unfavorable changes in foreign\n", - "currency exchange rates, partially offset by strategic pricing actions\n" - ] - } - ], - "source": [ - "console.log(results.context[0].pageContent);" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Table of Contents\n", + "UNITED STATES\n", + "SECURITIES AND EXCHANGE COMMISSION\n", + "Washington, D.C. 20549\n", + "FORM 10-K\n", + "\n", + "{\n", + " source: '../../data/nke-10k-2023.pdf',\n", + " pdf: {\n", + " version: '1.10.100',\n", + " info: {\n", + " PDFFormatVersion: '1.4',\n", + " IsAcroFormPresent: false,\n", + " IsXFAPresent: false,\n", + " Title: '0000320187-23-000039',\n", + " Author: 'EDGAR Online, a division of Donnelley Financial Solutions',\n", + " Subject: 'Form 10-K filed on 2023-07-20 for the period ending 2023-05-31',\n", + " Keywords: '0000320187-23-000039; ; 10-K',\n", + " Creator: 'EDGAR Filing HTML Converter',\n", + " Producer: 'EDGRpdf Service w/ EO.Pdf 22.0.40.0',\n", + " CreationDate: \"D:20230720162200-04'00'\",\n", + " ModDate: \"D:20230720162208-04'00'\"\n", + " },\n", + " metadata: null,\n", + " totalPages: 107\n", + " },\n", + " loc: { pageNumber: 1 }\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].pageContent.slice(0, 100));\n", + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "So what just happened?\n", + "\n", + "- The loader reads the PDF at the specified path into memory.\n", + "- It then extracts text data using the `pdf-parse` package.\n", + "- Finally, it creates a LangChain [Document](https://api.js.langchain.com/classes/_langchain_core.documents.Document.html) for each page of the PDF with the page's content and some metadata about where in the document the text came from.\n", + "\n", + "LangChain has [many other document loaders](/docs/integrations/document_loaders/) for other data sources, or you can create a [custom document loader](/docs/how_to/document_loader_custom/).\n", + "\n", + "## Question answering with RAG\n", + "\n", + "Next, you'll prepare the loaded documents for later retrieval. Using a [text splitter](/docs/concepts/text_splitters), you'll split your loaded documents into smaller documents that can more easily fit into an LLM's context window, then load them into a [vector store](/docs/concepts/#vectorstores). You can then create a [retriever](/docs/concepts/retrievers) from the vector store for use in our RAG chain:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { RecursiveCharacterTextSplitter } from \"@langchain/textsplitters\";\n", + "\n", + "const textSplitter = new RecursiveCharacterTextSplitter({\n", + " chunkSize: 1000,\n", + " chunkOverlap: 200,\n", + "});\n", + "\n", + "const splits = await textSplitter.splitDocuments(docs);\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", + "\n", + "const retriever = vectorstore.asRetriever();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, you'll use some built-in helpers to construct the final `ragChain`:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " source: '../../data/nke-10k-2023.pdf',\n", - " pdf: {\n", - " version: '1.10.100',\n", - " info: {\n", - " PDFFormatVersion: '1.4',\n", - " IsAcroFormPresent: false,\n", - " IsXFAPresent: false,\n", - " Title: '0000320187-23-000039',\n", - " Author: 'EDGAR Online, a division of Donnelley Financial Solutions',\n", - " Subject: 'Form 10-K filed on 2023-07-20 for the period ending 2023-05-31',\n", - " Keywords: '0000320187-23-000039; ; 10-K',\n", - " Creator: 'EDGAR Filing HTML Converter',\n", - " Producer: 'EDGRpdf Service w/ EO.Pdf 22.0.40.0',\n", - " CreationDate: \"D:20230720162200-04'00'\",\n", - " ModDate: \"D:20230720162208-04'00'\"\n", - " },\n", - " metadata: null,\n", - " totalPages: 107\n", - " },\n", - " loc: { pageNumber: 31, lines: { from: 14, to: 22 } }\n", - "}\n" - ] + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " input: \"What was Nike's revenue in 2023?\",\n", + " chat_history: [],\n", + " context: [\n", + " Document {\n", + " pageContent: 'Enterprise Resource Planning Platform, data and analytics, demand sensing, insight gathering, and other areas to create an end-to-end technology foundation, which we\\n' +\n", + " 'believe will further accelerate our digital transformation. We believe this unified approach will accelerate growth and unlock more efficiency for our business, while driving\\n' +\n", + " 'speed and responsiveness as we serve consumers globally.\\n' +\n", + " 'FINANCIAL HIGHLIGHTS\\n' +\n", + " '•In fiscal 2023, NIKE, Inc. achieved record Revenues of $51.2 billion, which increased 10% and 16% on a reported and currency-neutral basis, respectively\\n' +\n", + " '•NIKE Direct revenues grew 14% from $18.7 billion in fiscal 2022 to $21.3 billion in fiscal 2023, and represented approximately 44% of total NIKE Brand revenues for\\n' +\n", + " 'fiscal 2023\\n' +\n", + " '•Gross margin for the fiscal year decreased 250 basis points to 43.5% primarily driven by higher product costs, higher markdowns and unfavorable changes in foreign\\n' +\n", + " 'currency exchange rates, partially offset by strategic pricing actions',\n", + " metadata: [Object]\n", + " },\n", + " Document {\n", + " pageContent: 'Table of Contents\\n' +\n", + " 'FISCAL 2023 NIKE BRAND REVENUE HIGHLIGHTS\\n' +\n", + " 'The following tables present NIKE Brand revenues disaggregated by reportable operating segment, distribution channel and major product line:\\n' +\n", + " 'FISCAL 2023 COMPARED TO FISCAL 2022\\n' +\n", + " '•NIKE, Inc. Revenues were $51.2 billion in fiscal 2023, which increased 10% and 16% compared to fiscal 2022 on a reported and currency-neutral basis, respectively.\\n' +\n", + " 'The increase was due to higher revenues in North America, Europe, Middle East & Africa (\"EMEA\"), APLA and Greater China, which contributed approximately 7, 6,\\n' +\n", + " '2 and 1 percentage points to NIKE, Inc. Revenues, respectively.\\n' +\n", + " '•NIKE Brand revenues, which represented over 90% of NIKE, Inc. Revenues, increased 10% and 16% on a reported and currency-neutral basis, respectively. This\\n' +\n", + " \"increase was primarily due to higher revenues in Men's, the Jordan Brand, Women's and Kids' which grew 17%, 35%,11% and 10%, respectively, on a wholesale\\n\" +\n", + " 'equivalent basis.',\n", + " metadata: [Object]\n", + " },\n", + " Document {\n", + " pageContent: 'Table of Contents\\n' +\n", + " 'EUROPE, MIDDLE EAST & AFRICA\\n' +\n", + " '(Dollars in millions)\\n' +\n", + " 'FISCAL 2023FISCAL 2022% CHANGE\\n' +\n", + " '% CHANGE\\n' +\n", + " 'EXCLUDING\\n' +\n", + " 'CURRENCY\\n' +\n", + " 'CHANGESFISCAL 2021% CHANGE\\n' +\n", + " '% CHANGE\\n' +\n", + " 'EXCLUDING\\n' +\n", + " 'CURRENCY\\n' +\n", + " 'CHANGES\\n' +\n", + " 'Revenues by:\\n' +\n", + " 'Footwear$8,260 $7,388 12 %25 %$6,970 6 %9 %\\n' +\n", + " 'Apparel4,566 4,527 1 %14 %3,996 13 %16 %\\n' +\n", + " 'Equipment592 564 5 %18 %490 15 %17 %\\n' +\n", + " 'TOTAL REVENUES$13,418 $12,479 8 %21 %$11,456 9 %12 %\\n' +\n", + " 'Revenues by: \\n' +\n", + " 'Sales to Wholesale Customers$8,522 $8,377 2 %15 %$7,812 7 %10 %\\n' +\n", + " 'Sales through NIKE Direct4,896 4,102 19 %33 %3,644 13 %15 %\\n' +\n", + " 'TOTAL REVENUES$13,418 $12,479 8 %21 %$11,456 9 %12 %\\n' +\n", + " 'EARNINGS BEFORE INTEREST AND TAXES$3,531 $3,293 7 %$2,435 35 % \\n' +\n", + " 'FISCAL 2023 COMPARED TO FISCAL 2022\\n' +\n", + " \"•EMEA revenues increased 21% on a currency-neutral basis, due to higher revenues in Men's, the Jordan Brand, Women's and Kids'. NIKE Direct revenues\\n\" +\n", + " 'increased 33%, driven primarily by strong digital sales growth of 43% and comparable store sales growth of 22%.',\n", + " metadata: [Object]\n", + " },\n", + " Document {\n", + " pageContent: 'Table of Contents\\n' +\n", + " 'NORTH AMERICA\\n' +\n", + " '(Dollars in millions)\\n' +\n", + " 'FISCAL 2023FISCAL 2022% CHANGE\\n' +\n", + " '% CHANGE\\n' +\n", + " 'EXCLUDING\\n' +\n", + " 'CURRENCY\\n' +\n", + " 'CHANGESFISCAL 2021% CHANGE\\n' +\n", + " '% CHANGE\\n' +\n", + " 'EXCLUDING\\n' +\n", + " 'CURRENCY\\n' +\n", + " 'CHANGES\\n' +\n", + " 'Revenues by:\\n' +\n", + " 'Footwear$14,897 $12,228 22 %22 %$11,644 5 %5 %\\n' +\n", + " 'Apparel5,947 5,492 8 %9 %5,028 9 %9 %\\n' +\n", + " 'Equipment764 633 21 %21 %507 25 %25 %\\n' +\n", + " 'TOTAL REVENUES$21,608 $18,353 18 %18 %$17,179 7 %7 %\\n' +\n", + " 'Revenues by: \\n' +\n", + " 'Sales to Wholesale Customers$11,273 $9,621 17 %18 %$10,186 -6 %-6 %\\n' +\n", + " 'Sales through NIKE Direct10,335 8,732 18 %18 %6,993 25 %25 %\\n' +\n", + " 'TOTAL REVENUES$21,608 $18,353 18 %18 %$17,179 7 %7 %\\n' +\n", + " 'EARNINGS BEFORE INTEREST AND TAXES$5,454 $5,114 7 %$5,089 0 %\\n' +\n", + " 'FISCAL 2023 COMPARED TO FISCAL 2022\\n' +\n", + " \"•North America revenues increased 18% on a currency-neutral basis, primarily due to higher revenues in Men's and the Jordan Brand. NIKE Direct revenues\\n\" +\n", + " 'increased 18%, driven by strong digital sales growth of 23%, comparable store sales growth of 9% and the addition of new stores.',\n", + " metadata: [Object]\n", + " }\n", + " ],\n", + " answer: 'According to the financial highlights, Nike, Inc. achieved record revenues of $51.2 billion in fiscal 2023, which increased 10% on a reported basis and 16% on a currency-neutral basis compared to fiscal 2022.'\n", + "}\n" + ] + } + ], + "source": [ + "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const systemTemplate = [\n", + " `You are an assistant for question-answering tasks. `,\n", + " `Use the following pieces of retrieved context to answer `,\n", + " `the question. If you don't know the answer, say that you `,\n", + " `don't know. Use three sentences maximum and keep the `,\n", + " `answer concise.`,\n", + " `\\n\\n`,\n", + " `{context}`,\n", + "].join(\"\");\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemTemplate],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const questionAnswerChain = await createStuffDocumentsChain({ llm, prompt });\n", + "const ragChain = await createRetrievalChain({ retriever, combineDocsChain: questionAnswerChain });\n", + "\n", + "const results = await ragChain.invoke({\n", + " input: \"What was Nike's revenue in 2023?\",\n", + "});\n", + "\n", + "console.log(results);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see that you get both a final answer in the `answer` key of the results object, and the `context` the LLM used to generate an answer.\n", + "\n", + "Examining the values under the `context` further, you can see that they are documents that each contain a chunk of the ingested page content. Usefully, these documents also preserve the original metadata from way back when you first loaded them:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Enterprise Resource Planning Platform, data and analytics, demand sensing, insight gathering, and other areas to create an end-to-end technology foundation, which we\n", + "believe will further accelerate our digital transformation. We believe this unified approach will accelerate growth and unlock more efficiency for our business, while driving\n", + "speed and responsiveness as we serve consumers globally.\n", + "FINANCIAL HIGHLIGHTS\n", + "•In fiscal 2023, NIKE, Inc. achieved record Revenues of $51.2 billion, which increased 10% and 16% on a reported and currency-neutral basis, respectively\n", + "•NIKE Direct revenues grew 14% from $18.7 billion in fiscal 2022 to $21.3 billion in fiscal 2023, and represented approximately 44% of total NIKE Brand revenues for\n", + "fiscal 2023\n", + "•Gross margin for the fiscal year decreased 250 basis points to 43.5% primarily driven by higher product costs, higher markdowns and unfavorable changes in foreign\n", + "currency exchange rates, partially offset by strategic pricing actions\n" + ] + } + ], + "source": [ + "console.log(results.context[0].pageContent);" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " source: '../../data/nke-10k-2023.pdf',\n", + " pdf: {\n", + " version: '1.10.100',\n", + " info: {\n", + " PDFFormatVersion: '1.4',\n", + " IsAcroFormPresent: false,\n", + " IsXFAPresent: false,\n", + " Title: '0000320187-23-000039',\n", + " Author: 'EDGAR Online, a division of Donnelley Financial Solutions',\n", + " Subject: 'Form 10-K filed on 2023-07-20 for the period ending 2023-05-31',\n", + " Keywords: '0000320187-23-000039; ; 10-K',\n", + " Creator: 'EDGAR Filing HTML Converter',\n", + " Producer: 'EDGRpdf Service w/ EO.Pdf 22.0.40.0',\n", + " CreationDate: \"D:20230720162200-04'00'\",\n", + " ModDate: \"D:20230720162208-04'00'\"\n", + " },\n", + " metadata: null,\n", + " totalPages: 107\n", + " },\n", + " loc: { pageNumber: 31, lines: { from: 14, to: 22 } }\n", + "}\n" + ] + } + ], + "source": [ + "console.log(results.context[0].metadata);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This particular chunk came from page 31 in the original PDF. You can use this data to show which page in the PDF the answer came from, allowing users to quickly verify that answers are based on the source material.\n", + "\n", + ":::info\n", + "For a deeper dive into RAG, see [this more focused tutorial](/docs/tutorials/rag/) or [our how-to guides](/docs/how_to/#qa-with-rag).\n", + ":::\n", + "\n", + "## Next steps\n", + "\n", + "You've now seen how to load documents from a PDF file with a Document Loader and some techniques you can use to prepare that loaded data for RAG.\n", + "\n", + "For more on document loaders, you can check out:\n", + "\n", + "- [The entry in the conceptual guide](/docs/concepts/document_loaders)\n", + "- [Related how-to guides](/docs/how_to/#document-loaders)\n", + "- [Available integrations](/docs/integrations/document_loaders/)\n", + "- [How to create a custom document loader](/docs/how_to/document_loader_custom/)\n", + "\n", + "For more on RAG, see:\n", + "\n", + "- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)\n", + "- [Related how-to guides](/docs/how_to/#qa-with-rag)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "console.log(results.context[0].metadata);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This particular chunk came from page 31 in the original PDF. You can use this data to show which page in the PDF the answer came from, allowing users to quickly verify that answers are based on the source material.\n", - "\n", - ":::info\n", - "For a deeper dive into RAG, see [this more focused tutorial](/docs/tutorials/rag/) or [our how-to guides](/docs/how_to/#qa-with-rag).\n", - ":::\n", - "\n", - "## Next steps\n", - "\n", - "You've now seen how to load documents from a PDF file with a Document Loader and some techniques you can use to prepare that loaded data for RAG.\n", - "\n", - "For more on document loaders, you can check out:\n", - "\n", - "- [The entry in the conceptual guide](/docs/concepts/#document-loaders)\n", - "- [Related how-to guides](/docs/how_to/#document-loaders)\n", - "- [Available integrations](/docs/integrations/document_loaders/)\n", - "- [How to create a custom document loader](/docs/how_to/document_loader_custom/)\n", - "\n", - "For more on RAG, see:\n", - "\n", - "- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)\n", - "- [Related how-to guides](/docs/how_to/#qa-with-rag)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/core_docs/docs/tutorials/qa_chat_history.ipynb b/docs/core_docs/docs/tutorials/qa_chat_history.ipynb index b1694c4413ab..c7fa1db2e3d9 100644 --- a/docs/core_docs/docs/tutorials/qa_chat_history.ipynb +++ b/docs/core_docs/docs/tutorials/qa_chat_history.ipynb @@ -1,1438 +1,1438 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "023635f2-71cf-43f2-a2e2-a7b4ced30a74", - "metadata": {}, - "source": [ - "---\n", - "sidebar_position: 2\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "86fc5bb2-017f-434e-8cd6-53ab214a5604", - "metadata": {}, - "source": [ - "# Conversational RAG\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat history](/docs/concepts/#chat-history)\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Embeddings](/docs/concepts/#embedding-models)\n", - "- [Vector stores](/docs/concepts/#vector-stores)\n", - "- [Retrieval-augmented generation](/docs/tutorials/rag/)\n", - "- [Tools](/docs/concepts/#tools)\n", - "- [Agents](/docs/concepts/#agents)\n", - "\n", - ":::\n", - "\n", - "In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", - "\n", - "In this guide we focus on **adding logic for incorporating historical messages.** Further details on chat history management is [covered here](/docs/how_to/message_history).\n", - "\n", - "We will cover two approaches:\n", - "\n", - "1. Chains, in which we always execute a retrieval step;\n", - "2. Agents, in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n", - "\n", - "For the external knowledge source, we will use the same [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag)." - ] - }, - { - "cell_type": "markdown", - "id": "487d8d79-5ee9-4aa4-9fdf-cd5f4303e099", - "metadata": {}, - "source": [ - "## Setup\n", - "### Dependencies\n", - "\n", - "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/#chat-models) or [LLM](/docs/concepts#llms), [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers).\n", - "\n", - "We’ll use the following packages:\n", - "\n", - "```bash\n", - "npm install --save langchain @langchain/openai langchain cheerio\n", - "```\n", - "\n", - "We need to set environment variable `OPENAI_API_KEY`:\n", - "\n", - "```bash\n", - "export OPENAI_API_KEY=YOUR_KEY\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "1665e740-ce01-4f09-b9ed-516db0bd326f", - "metadata": {}, - "source": [ - "### LangSmith\n", - "\n", - "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://docs.smith.langchain.com).\n", - "\n", - "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", - "\n", - "\n", - "```bash\n", - "export LANGCHAIN_TRACING_V2=true\n", - "export LANGCHAIN_API_KEY=YOUR_KEY\n", - "\n", - "# Reduce tracing latency if you are not in a serverless environment\n", - "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "fa6ba684-26cf-4860-904e-a4d51380c134", - "metadata": {}, - "source": [ - "## Chains {#chains}\n" - ] - }, - { - "cell_type": "markdown", - "id": "7d2cf4ef", - "metadata": {}, - "source": [ - "\n", - "Let's first revisit the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [RAG tutorial](/docs/tutorials/rag)." - ] - }, - { - "cell_type": "markdown", - "id": "646840fb-5212-48ea-8bc7-ec7be5ec727e", - "metadata": {}, - "source": [ - "```{=mdx}\n", - "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "cb58f273-2111-4a9b-8932-9b64c95030c8", - "metadata": {}, - "outputs": [], - "source": [ - "// @lc-docs-hide-cell\n", - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "\n", - "const llm = new ChatOpenAI({ model: \"gpt-4o\" });" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "820244ae-74b4-4593-b392-822979dd91b8", - "metadata": {}, - "outputs": [], - "source": [ - "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", - "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", - "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", - "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", - "\n", - "// 1. Load, chunk and index the contents of the blog to create a retriever.\n", - "const loader = new CheerioWebBaseLoader(\n", - " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", - " {\n", - " selector: \".post-content, .post-title, .post-header\"\n", - " }\n", - ");\n", - "const docs = await loader.load();\n", - "\n", - "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", - "const splits = await textSplitter.splitDocuments(docs);\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", - "const retriever = vectorstore.asRetriever();\n", - "\n", - "// 2. Incorporate the retriever into a question-answering chain.\n", - "const systemPrompt = \n", - " \"You are an assistant for question-answering tasks. \" +\n", - " \"Use the following pieces of retrieved context to answer \" +\n", - " \"the question. If you don't know the answer, say that you \" +\n", - " \"don't know. Use three sentences maximum and keep the \" +\n", - " \"answer concise.\" +\n", - " \"\\n\\n\" +\n", - " \"{context}\";\n", - "\n", - "const prompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", systemPrompt],\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "const questionAnswerChain = await createStuffDocumentsChain({\n", - " llm,\n", - " prompt,\n", - "});\n", - "\n", - "const ragChain = await createRetrievalChain({\n", - " retriever,\n", - " combineDocsChain: questionAnswerChain,\n", - "});" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "bf55faaf-0d17-4b74-925d-c478b555f7b2", - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task decomposition involves breaking down large and complex tasks into smaller, more manageable subgoals or steps. This approach helps agents or models efficiently handle intricate tasks by simplifying them into easier components. Task decomposition can be achieved through techniques like Chain of Thought, Tree of Thoughts, or by using task-specific instructions and human input.\n" - ] - } - ], - "source": [ - "const response = await ragChain.invoke({ input: \"What is Task Decomposition?\" });\n", - "console.log(response.answer);" - ] - }, - { - "cell_type": "markdown", - "id": "187404c7-db47-49c5-be29-9ecb96dc9afa", - "metadata": {}, - "source": [ - "Note that we have used the built-in chain constructors `createStuffDocumentsChain` and `createRetrievalChain`, so that the basic ingredients to our solution are:\n", - "\n", - "1. retriever;\n", - "2. prompt;\n", - "3. LLM.\n", - "\n", - "This will simplify the process of incorporating chat history.\n", - "\n", - "### Adding chat history\n", - "\n", - "The chain we have built uses the input query directly to retrieve relevant context. But in a conversational setting, the user query might require conversational context to be understood. For example, consider this exchange:\n", - "\n", - "> Human: \"What is Task Decomposition?\"\n", - ">\n", - "> AI: \"Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model.\"\n", - ">\n", - "> Human: \"What are common ways of doing it?\"\n", - "\n", - "In order to answer the second question, our system needs to understand that \"it\" refers to \"Task Decomposition.\"\n", - "\n", - "We'll need to update two things about our existing app:\n", - "\n", - "1. **Prompt**: Update our prompt to support historical messages as an input.\n", - "2. **Contextualizing questions**: Add a sub-chain that takes the latest user question and reformulates it in the context of the chat history. This can be thought of simply as building a new \"history aware\" retriever. Whereas before we had:\n", - " - `query` -> `retriever` \n", - " Now we will have:\n", - " - `(query, conversation history)` -> `LLM` -> `rephrased query` -> `retriever`" - ] - }, - { - "cell_type": "markdown", - "id": "776ae958-cbdc-4471-8669-c6087436f0b5", - "metadata": {}, - "source": [ - "#### Contextualizing the question\n", - "\n", - "First we'll need to define a sub-chain that takes historical messages and the latest user question, and reformulates the question if it makes reference to any information in the historical information.\n", - "\n", - "We'll use a prompt that includes a `MessagesPlaceholder` variable under the name \"chat_history\". This allows us to pass in a list of Messages to the prompt using the \"chat_history\" input key, and these messages will be inserted after the system message and before the human message containing the latest question.\n", - "\n", - "Note that we leverage a helper function [createHistoryAwareRetriever](https://api.js.langchain.com/functions/langchain.chains_history_aware_retriever.createHistoryAwareRetriever.html) for this step, which manages the case where `chat_history` is empty, and otherwise applies `prompt.pipe(llm).pipe(new StringOutputParser()).pipe(retriever)` in sequence.\n", - "\n", - "`createHistoryAwareRetriever` constructs a chain that accepts keys `input` and `chat_history` as input, and has the same output schema as a retriever." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "2b685428-8b82-4af1-be4f-7232c5d55b73", - "metadata": {}, - "outputs": [], - "source": [ - "import { createHistoryAwareRetriever } from \"langchain/chains/history_aware_retriever\";\n", - "import { MessagesPlaceholder } from \"@langchain/core/prompts\";\n", - "\n", - "const contextualizeQSystemPrompt = \n", - " \"Given a chat history and the latest user question \" +\n", - " \"which might reference context in the chat history, \" +\n", - " \"formulate a standalone question which can be understood \" +\n", - " \"without the chat history. Do NOT answer the question, \" +\n", - " \"just reformulate it if needed and otherwise return it as is.\";\n", - "\n", - "const contextualizeQPrompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", contextualizeQSystemPrompt],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "const historyAwareRetriever = await createHistoryAwareRetriever({\n", - " llm,\n", - " retriever,\n", - " rephrasePrompt: contextualizeQPrompt,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "42a47168-4a1f-4e39-bd2d-d5b03609a243", - "metadata": {}, - "source": [ - "This chain prepends a rephrasing of the input query to our retriever, so that the retrieval incorporates the context of the conversation.\n", - "\n", - "Now we can build our full QA chain. This is as simple as updating the retriever to be our new `historyAwareRetriever`.\n", - "\n", - "Again, we will use [createStuffDocumentsChain](https://api.js.langchain.com/functions/langchain.chains_combine_documents.createStuffDocumentsChain.html) to generate a `questionAnswerChain2`, with input keys `context`, `chat_history`, and `input`-- it accepts the retrieved context alongside the conversation history and query to generate an answer. A more detailed explaination is over [here](/docs/tutorials/rag/#built-in-chains)\n", - "\n", - "We build our final `ragChain2` with [createRetrievalChain](https://api.js.langchain.com/functions/langchain.chains_retrieval.createRetrievalChain.html). This chain applies the `historyAwareRetriever` and `questionAnswerChain2` in sequence, retaining intermediate outputs such as the retrieved context for convenience. It has input keys `input` and `chat_history`, and includes `input`, `chat_history`, `context`, and `answer` in its output." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "66f275f3-ddef-4678-b90d-ee64576878f9", - "metadata": {}, - "outputs": [], - "source": [ - "const qaPrompt = ChatPromptTemplate.fromMessages([\n", - " [\"system\", systemPrompt],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "const questionAnswerChain2 = await createStuffDocumentsChain({\n", - " llm,\n", - " prompt: qaPrompt,\n", - "});\n", - "\n", - "const ragChain2 = await createRetrievalChain({\n", - " retriever: historyAwareRetriever,\n", - " combineDocsChain: questionAnswerChain2,\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "1ba1ae56-7ecb-4563-b792-50a1a5042df3", - "metadata": {}, - "source": [ - "Let's try this. Below we ask a question and a follow-up question that requires contextualization to return a sensible response. Because our chain includes a `\"chat_history\"` input, the caller needs to manage the chat history. We can achieve this by appending input and output messages to a list:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "0005810b-1b95-4666-a795-08d80e478b83", - "metadata": {}, - "outputs": [ + "cell_type": "raw", + "id": "023635f2-71cf-43f2-a2e2-a7b4ced30a74", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 2\n", + "---" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Common ways of doing Task Decomposition include:\n", - "1. Using simple prompting with an LLM, such as asking it to outline steps or subgoals for a task.\n", - "2. Employing task-specific instructions, like \"Write a story outline\" for writing a novel.\n", - "3. Incorporating human inputs for guidance.\n", - "Additionally, advanced approaches like Chain of Thought (CoT) and Tree of Thoughts (ToT) can further refine the process, and using an external classical planner with PDDL (as in LLM+P) is another option.\n" - ] - } - ], - "source": [ - "import { BaseMessage, HumanMessage, AIMessage } from \"@langchain/core/messages\";\n", - "\n", - "let chatHistory: BaseMessage[] = [];\n", - "\n", - "const question = \"What is Task Decomposition?\";\n", - "const aiMsg1 = await ragChain2.invoke({ input: question, chat_history: chatHistory });\n", - "chatHistory = chatHistory.concat([\n", - " new HumanMessage(question),\n", - " new AIMessage(aiMsg1.answer),\n", - "]);\n", - "\n", - "const secondQuestion = \"What are common ways of doing it?\";\n", - "const aiMsg2 = await ragChain2.invoke({ input: secondQuestion, chat_history: chatHistory });\n", - "\n", - "console.log(aiMsg2.answer);" - ] - }, - { - "cell_type": "markdown", - "id": "53a662c2-f38b-45f9-95c4-66de15637614", - "metadata": {}, - "source": [ - "#### Stateful management of chat history\n", - "\n", - "Here we've gone over how to add application logic for incorporating historical outputs, but we're still manually updating the chat history and inserting it into each input. In a real Q&A application we'll want some way of persisting chat history and some way of automatically inserting and updating it.\n", - "\n", - "For this we can use:\n", - "\n", - "- [BaseChatMessageHistory](https://api.js.langchain.com/classes/_langchain_core.chat_history.BaseChatMessageHistory.html): Store chat history.\n", - "- [RunnableWithMessageHistory](/docs/how_to/message_history): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", - "\n", - "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history) LCEL page.\n", - "\n", - "Instances of `RunnableWithMessageHistory` manage the chat history for you. They accept a config with a key (`\"sessionId\"` by default) that specifies what conversation history to fetch and prepend to the input, and append the output to the same conversation history. Below is an example:" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72", - "metadata": {}, - "outputs": [], - "source": [ - "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", - "import { ChatMessageHistory } from \"langchain/stores/message/in_memory\";\n", - "\n", - "const demoEphemeralChatMessageHistoryForChain = new ChatMessageHistory();\n", - "\n", - "const conversationalRagChain = new RunnableWithMessageHistory({\n", - " runnable: ragChain2,\n", - " getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistoryForChain,\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - " outputMessagesKey: \"answer\",\n", - "})" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "1046c92f-21b3-4214-907d-92878d8cba23", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "86fc5bb2-017f-434e-8cd6-53ab214a5604", + "metadata": {}, + "source": [ + "# Conversational RAG\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat history](/docs/concepts/chat_history)\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Embeddings](/docs/concepts/embedding_models)\n", + "- [Vector stores](/docs/concepts/#vector-stores)\n", + "- [Retrieval-augmented generation](/docs/tutorials/rag/)\n", + "- [Tools](/docs/concepts/tools)\n", + "- [Agents](/docs/concepts/agents)\n", + "\n", + ":::\n", + "\n", + "In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", + "\n", + "In this guide we focus on **adding logic for incorporating historical messages.** Further details on chat history management is [covered here](/docs/how_to/message_history).\n", + "\n", + "We will cover two approaches:\n", + "\n", + "1. Chains, in which we always execute a retrieval step;\n", + "2. Agents, in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n", + "\n", + "For the external knowledge source, we will use the same [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task Decomposition involves breaking down complicated tasks into smaller, more manageable subgoals. Techniques such as the Chain of Thought (CoT) and Tree of Thoughts extend this by decomposing problems into multiple thought steps and exploring multiple reasoning possibilities at each step. LLMs can perform task decomposition using simple prompts, task-specific instructions, or human inputs, and some approaches like LLM+P involve using external classical planners.\n" - ] - } - ], - "source": [ - "const result1 = await conversationalRagChain.invoke(\n", - " { input: \"What is Task Decomposition?\" },\n", - " { configurable: { sessionId: \"abc123\" } }\n", - ");\n", - "console.log(result1.answer);" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "487d8d79-5ee9-4aa4-9fdf-cd5f4303e099", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/concepts/chat_models) or [LLM](/docs/concepts/text_llms), [Embeddings](/docs/concepts/embedding_models), and [VectorStore](/docs/concepts/vectorstores) or [Retriever](/docs/concepts/retrievers).\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/openai langchain cheerio\n", + "```\n", + "\n", + "We need to set environment variable `OPENAI_API_KEY`:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=YOUR_KEY\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Common ways of doing task decomposition include:\n", - "\n", - "1. Using simple prompting with an LLM, such as \"Steps for XYZ.\\n1.\" or \"What are the subgoals for achieving XYZ?\"\n", - "2. Utilizing task-specific instructions, like \"Write a story outline.\" for writing a novel.\n", - "3. Incorporating human inputs to guide and refine the decomposition process. \n", - "\n", - "Additionally, the LLM+P approach utilizes an external classical planner, involving PDDL to describe and plan complex tasks.\n" - ] - } - ], - "source": [ - "const result2 = await conversationalRagChain.invoke(\n", - " { input: \"What are common ways of doing it?\" },\n", - " { configurable: { sessionId: \"abc123\" } }\n", - ");\n", - "console.log(result2.answer);" - ] - }, - { - "cell_type": "markdown", - "id": "0ab1ded4-76d9-453f-9b9b-db9a4560c737", - "metadata": {}, - "source": [ - "### Tying it together" - ] - }, - { - "cell_type": "markdown", - "id": "8a08a5ea-df5b-4547-93c6-2a3940dd5c3e", - "metadata": {}, - "source": [ - "![](../../static/img/conversational_retrieval_chain.png)\n", - "\n", - "For convenience, we tie together all of the necessary steps in a single code cell:" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "71c32048-1a41-465f-a9e2-c4affc332fd9", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "1665e740-ce01-4f09-b9ed-516db0bd326f", + "metadata": {}, + "source": [ + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://docs.smith.langchain.com).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "\n", + "# Reduce tracing latency if you are not in a serverless environment\n", + "# export LANGCHAIN_CALLBACKS_BACKGROUND=true\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{ input: 'What is Task Decomposition?' }\n", - "----\n", - "{ chat_history: [] }\n", - "----\n", - "{\n", - " context: [\n", - " Document {\n", - " pageContent: 'Fig. 1. Overview of a LLM-powered autonomous agent system.\\n' +\n", - " 'Component One: Planning#\\n' +\n", - " 'A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\n' +\n", - " 'Task Decomposition#\\n' +\n", - " 'Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n' +\n", - " 'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.',\n", - " metadata: [Object],\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n' +\n", - " 'Another quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\n' +\n", - " 'Self-Reflection#',\n", - " metadata: [Object],\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Planning\\n' +\n", - " '\\n' +\n", - " 'Subgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\n' +\n", - " 'Reflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n' +\n", - " '\\n' +\n", - " '\\n' +\n", - " 'Memory\\n' +\n", - " '\\n' +\n", - " 'Short-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\n' +\n", - " 'Long-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n' +\n", - " '\\n' +\n", - " '\\n' +\n", - " 'Tool use\\n' +\n", - " '\\n' +\n", - " 'The agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.',\n", - " metadata: [Object],\n", - " id: undefined\n", - " },\n", - " Document {\n", - " pageContent: 'Resources:\\n' +\n", - " '1. Internet access for searches and information gathering.\\n' +\n", - " '2. Long Term memory management.\\n' +\n", - " '3. GPT-3.5 powered Agents for delegation of simple tasks.\\n' +\n", - " '4. File output.\\n' +\n", - " '\\n' +\n", - " 'Performance Evaluation:\\n' +\n", - " '1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n' +\n", - " '2. Constructively self-criticize your big-picture behavior constantly.\\n' +\n", - " '3. Reflect on past decisions and strategies to refine your approach.\\n' +\n", - " '4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',\n", - " metadata: [Object],\n", - " id: undefined\n", - " }\n", - " ]\n", - "}\n", - "----\n", - "{ answer: '' }\n", - "----\n", - "{ answer: 'Task' }\n", - "----\n", - "{ answer: ' decomposition' }\n", - "----\n", - "{ answer: ' involves' }\n", - "----\n", - "{ answer: ' breaking' }\n", - "----\n", - "{ answer: ' down' }\n", - "----\n", - "{ answer: ' a' }\n", - "----\n", - "{ answer: ' complex' }\n", - "----\n", - "{ answer: ' task' }\n", - "----\n", - "{ answer: ' into' }\n", - "----\n", - "{ answer: ' smaller' }\n", - "----\n", - "{ answer: ' and' }\n", - "----\n", - "{ answer: ' more' }\n", - "----\n", - "{ answer: ' manageable' }\n", - "----\n", - "{ answer: ' sub' }\n", - "----\n", - "{ answer: 'goals' }\n", - "----\n", - "{ answer: ' or' }\n", - "----\n", - "{ answer: ' steps' }\n", - "----\n", - "{ answer: '.' }\n", - "----\n", - "{ answer: ' This' }\n", - "----\n", - "{ answer: ' process' }\n", - "----\n", - "{ answer: ' allows' }\n", - "----\n", - "{ answer: ' an' }\n", - "----\n", - "{ answer: ' agent' }\n", - "----\n", - "{ answer: ' or' }\n", - "----\n", - "{ answer: ' model' }\n", - "----\n", - "{ answer: ' to' }\n", - "----\n", - "{ answer: ' efficiently' }\n", - "----\n", - "{ answer: ' handle' }\n", - "----\n", - "{ answer: ' intricate' }\n", - "----\n", - "{ answer: ' tasks' }\n", - "----\n", - "{ answer: ' by' }\n", - "----\n", - "{ answer: ' dividing' }\n", - "----\n", - "{ answer: ' them' }\n", - "----\n", - "{ answer: ' into' }\n", - "----\n", - "{ answer: ' simpler' }\n", - "----\n", - "{ answer: ' components' }\n", - "----\n", - "{ answer: '.' }\n", - "----\n", - "{ answer: ' Task' }\n", - "----\n", - "{ answer: ' decomposition' }\n", - "----\n", - "{ answer: ' can' }\n", - "----\n", - "{ answer: ' be' }\n", - "----\n", - "{ answer: ' achieved' }\n", - "----\n", - "{ answer: ' through' }\n", - "----\n", - "{ answer: ' techniques' }\n", - "----\n", - "{ answer: ' like' }\n", - "----\n", - "{ answer: ' Chain' }\n", - "----\n", - "{ answer: ' of' }\n", - "----\n", - "{ answer: ' Thought' }\n", - "----\n", - "{ answer: ',' }\n", - "----\n", - "{ answer: ' Tree' }\n", - "----\n", - "{ answer: ' of' }\n", - "----\n", - "{ answer: ' Thoughts' }\n", - "----\n", - "{ answer: ',' }\n", - "----\n", - "{ answer: ' or' }\n", - "----\n", - "{ answer: ' by' }\n", - "----\n", - "{ answer: ' using' }\n", - "----\n", - "{ answer: ' task' }\n", - "----\n", - "{ answer: '-specific' }\n", - "----\n", - "{ answer: ' instructions' }\n", - "----\n", - "{ answer: '.' }\n", - "----\n", - "{ answer: '' }\n", - "----\n", - "{ answer: '' }\n", - "----\n" - ] - } - ], - "source": [ - "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", - "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", - "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", - "import { createHistoryAwareRetriever } from \"langchain/chains/history_aware_retriever\";\n", - "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", - "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", - "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", - "import { ChatMessageHistory } from \"langchain/stores/message/in_memory\";\n", - "import { BaseChatMessageHistory } from \"@langchain/core/chat_history\";\n", - "\n", - "const llm2 = new ChatOpenAI({ model: \"gpt-3.5-turbo\", temperature: 0 });\n", - "\n", - "// Construct retriever\n", - "const loader2 = new CheerioWebBaseLoader(\n", - " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", - " {\n", - " selector: \".post-content, .post-title, .post-header\"\n", - " }\n", - ");\n", - "\n", - "const docs2 = await loader2.load();\n", - "\n", - "const textSplitter2 = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", - "const splits2 = await textSplitter2.splitDocuments(docs2);\n", - "const vectorstore2 = await MemoryVectorStore.fromDocuments(splits2, new OpenAIEmbeddings());\n", - "const retriever2 = vectorstore2.asRetriever();\n", - "\n", - "// Contextualize question\n", - "const contextualizeQSystemPrompt2 = \n", - " \"Given a chat history and the latest user question \" +\n", - " \"which might reference context in the chat history, \" +\n", - " \"formulate a standalone question which can be understood \" +\n", - " \"without the chat history. Do NOT answer the question, \" +\n", - " \"just reformulate it if needed and otherwise return it as is.\";\n", - "\n", - "const contextualizeQPrompt2 = ChatPromptTemplate.fromMessages([\n", - " [\"system\", contextualizeQSystemPrompt2],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "const historyAwareRetriever2 = await createHistoryAwareRetriever({\n", - " llm: llm2,\n", - " retriever: retriever2,\n", - " rephrasePrompt: contextualizeQPrompt2\n", - "});\n", - "\n", - "// Answer question\n", - "const systemPrompt2 = \n", - " \"You are an assistant for question-answering tasks. \" +\n", - " \"Use the following pieces of retrieved context to answer \" +\n", - " \"the question. If you don't know the answer, say that you \" +\n", - " \"don't know. Use three sentences maximum and keep the \" +\n", - " \"answer concise.\" +\n", - " \"\\n\\n\" +\n", - " \"{context}\";\n", - "\n", - "const qaPrompt2 = ChatPromptTemplate.fromMessages([\n", - " [\"system\", systemPrompt2],\n", - " new MessagesPlaceholder(\"chat_history\"),\n", - " [\"human\", \"{input}\"],\n", - "]);\n", - "\n", - "const questionAnswerChain3 = await createStuffDocumentsChain({\n", - " llm,\n", - " prompt: qaPrompt2,\n", - "});\n", - "\n", - "const ragChain3 = await createRetrievalChain({\n", - " retriever: historyAwareRetriever2,\n", - " combineDocsChain: questionAnswerChain3,\n", - "});\n", - "\n", - "// Statefully manage chat history\n", - "const store2: Record = {};\n", - "\n", - "function getSessionHistory2(sessionId: string): BaseChatMessageHistory {\n", - " if (!(sessionId in store2)) {\n", - " store2[sessionId] = new ChatMessageHistory();\n", - " }\n", - " return store2[sessionId];\n", - "}\n", - "\n", - "const conversationalRagChain2 = new RunnableWithMessageHistory({\n", - " runnable: ragChain3,\n", - " getMessageHistory: getSessionHistory2,\n", - " inputMessagesKey: \"input\",\n", - " historyMessagesKey: \"chat_history\",\n", - " outputMessagesKey: \"answer\",\n", - "});\n", - "\n", - "// Example usage\n", - "const query2 = \"What is Task Decomposition?\";\n", - "\n", - "for await (const s of await conversationalRagChain2.stream(\n", - " { input: query2 },\n", - " { configurable: { sessionId: \"unique_session_id\" } }\n", - ")) {\n", - " console.log(s);\n", - " console.log(\"----\");\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "861da8ed-d890-4fdc-a3bf-30433db61e0d", - "metadata": {}, - "source": [ - "## Agents {#agents}\n", - "\n", - "Agents leverage the reasoning capabilities of LLMs to make decisions during execution. Using agents allow you to offload some discretion over the retrieval process. Although their behavior is less predictable than chains, they offer some advantages in this context:\n", - "\n", - "- Agents generate the input to the retriever directly, without necessarily needing us to explicitly build in contextualization, as we did above;\n", - "- Agents can execute multiple retrieval steps in service of a query, or refrain from executing a retrieval step altogether (e.g., in response to a generic greeting from a user).\n", - "\n", - "### Retrieval tool\n", - "\n", - "Agents can access \"tools\" and manage their execution. In this case, we will convert our retriever into a LangChain tool to be wielded by the agent:" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "809cc747-2135-40a2-8e73-e4556343ee64", - "metadata": {}, - "outputs": [], - "source": [ - "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", - "\n", - "const tool = createRetrieverTool(\n", - " retriever,\n", - " {\n", - " name: \"blog_post_retriever\",\n", - " description: \"Searches and returns excerpts from the Autonomous Agents blog post.\",\n", - " }\n", - ")\n", - "const tools = [tool]" - ] - }, - { - "cell_type": "markdown", - "id": "07dcb968-ed9a-458a-85e1-528cd28c6965", - "metadata": {}, - "source": [ - "Tools are LangChain [Runnables](/docs/concepts#langchain-expression-language-lcel), and implement the usual interface:" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "931c4fe3-c603-4efb-9b37-5f7cbbb1cbbd", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "fa6ba684-26cf-4860-904e-a4d51380c134", + "metadata": {}, + "source": [ + "## Chains {#chains}\n" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n", - "Another quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\n", - "Self-Reflection#\n", - "\n", - "Fig. 1. Overview of a LLM-powered autonomous agent system.\n", - "Component One: Planning#\n", - "A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n", - "Task Decomposition#\n", - "Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n", - "Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n", - "\n", - "(3) Task execution: Expert models execute on the specific tasks and log results.\n", - "Instruction:\n", - "\n", - "With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n", - "\n", - "Resources:\n", - "1. Internet access for searches and information gathering.\n", - "2. Long Term memory management.\n", - "3. GPT-3.5 powered Agents for delegation of simple tasks.\n", - "4. File output.\n", - "\n", - "Performance Evaluation:\n", - "1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n", - "2. Constructively self-criticize your big-picture behavior constantly.\n", - "3. Reflect on past decisions and strategies to refine your approach.\n", - "4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n" - ] - } - ], - "source": [ - "console.log(await tool.invoke({ query: \"task decomposition\" }))" - ] - }, - { - "cell_type": "markdown", - "id": "f77e0217-28be-4b8b-b4c4-9cc4ed5ec201", - "metadata": {}, - "source": [ - "### Agent constructor\n", - "\n", - "Now that we have defined the tools and the LLM, we can create the agent. We will be using [LangGraph](/docs/concepts/#langgraph) to construct the agent. \n", - "Currently we are using a high level interface to construct the agent, but the nice thing about LangGraph is that this high-level interface is backed by a low-level, highly controllable API in case you want to modify the agent logic." - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "1726d151-4653-4c72-a187-a14840add526", - "metadata": {}, - "outputs": [], - "source": [ - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", - "\n", - "const agentExecutor = createReactAgent({ llm, tools });" - ] - }, - { - "cell_type": "markdown", - "id": "6d5152ca-1c3b-4f58-bb28-f31c0be7ba66", - "metadata": {}, - "source": [ - "We can now try it out. Note that so far it is not stateful (we still need to add in memory)" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "170403a2-c914-41db-85d8-a2c381da112d", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "7d2cf4ef", + "metadata": {}, + "source": [ + "\n", + "Let's first revisit the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [RAG tutorial](/docs/tutorials/rag)." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABABtUmgD1ZlOHZd0nD9TR8yb3mMe\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_dWxEY41mg9VSLamVYHltsUxL\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 19,\n", - " \"promptTokens\": 66,\n", - " \"totalTokens\": 85\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_3537616b13\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"blog_post_retriever\",\n", - " \"args\": {\n", - " \"query\": \"Task Decomposition\"\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_dWxEY41mg9VSLamVYHltsUxL\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 66,\n", - " \"output_tokens\": 19,\n", - " \"total_tokens\": 85\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n", - "{\n", - " tools: {\n", - " messages: [\n", - " ToolMessage {\n", - " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\",\n", - " \"name\": \"blog_post_retriever\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_dWxEY41mg9VSLamVYHltsUxL\"\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n", - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABABuSj5FHmHFdeR2Pv7Cxcmq5aQz\",\n", - " \"content\": \"Task Decomposition is a technique that allows an agent to break down a complex task into smaller, more manageable subtasks or steps. The primary goal is to simplify the task to ensure efficient execution and better understanding. \\n\\n### Methods in Task Decomposition:\\n1. **Chain of Thought (CoT)**:\\n - **Description**: This technique involves instructing the model to “think step by step” to decompose hard tasks into smaller ones. It transforms large tasks into multiple manageable tasks, enhancing the model's performance and providing insight into its thinking process. \\n - **Example**: When given a complex problem, the model outlines sequential steps to reach a solution.\\n\\n2. **Tree of Thoughts**:\\n - **Description**: This extends CoT by exploring multiple reasoning possibilities at each step. The problem is decomposed into multiple thought steps, with several thoughts generated per step, forming a sort of decision tree.\\n - **Example**: For a given task, the model might consider various alternative actions at each stage, evaluating each before proceeding.\\n\\n3. **LLM with Prompts**:\\n - **Description**: Basic task decomposition can be done via simple prompts like \\\"Steps for XYZ\\\" or \\\"What are the subgoals for achieving XYZ?\\\" This can also be guided by task-specific instructions or human inputs when necessary.\\n - **Example**: Asking the model to list the subgoals for writing a novel might produce an outline broken down into chapters, character development, and plot points.\\n\\n4. **LLM+P**:\\n - **Description**: This approach involves outsourcing long-term planning to an external classical planner using Planning Domain Definition Language (PDDL). The task is translated into a PDDL problem by the model, planned using classical planning tools, and then translated back into natural language.\\n - **Example**: In robotics, translating a task into PDDL and then using a domain-specific planner to generate a sequence of actions.\\n\\n### Applications:\\n- **Planning**: Helps an agent plan tasks by breaking them into clear, manageable steps.\\n- **Self-Reflection**: Allows agents to reflect and refine their actions, learning from past mistakes to improve future performance.\\n- **Memory**: Utilizes short-term memory for immediate context and long-term memory for retaining and recalling information over extended periods.\\n- **Tool Use**: Enables the agent to call external APIs for additional information or capabilities not inherent in the model.\\n\\nIn essence, task decomposition leverages various methodologies to simplify complex tasks, ensuring better performance, improved reasoning, and effective task execution.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 522,\n", - " \"promptTokens\": 821,\n", - " \"totalTokens\": 1343\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 821,\n", - " \"output_tokens\": 522,\n", - " \"total_tokens\": 1343\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n" - ] - } - ], - "source": [ - "const query = \"What is Task Decomposition?\";\n", - "\n", - "for await (const s of await agentExecutor.stream(\n", - " { messages: [new HumanMessage(query)] }\n", - ")) {\n", - " console.log(s);\n", - " console.log(\"----\");\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "1df703b1-aad6-48fb-b6fa-703e32ea88b9", - "metadata": {}, - "source": [ - "LangGraph comes with built in persistence, so we don't need to use ChatMessageHistory! Rather, we can pass in a checkpointer to our LangGraph agent directly" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "id": "04a3a664-3c3f-4cd1-9995-26662a52da7c", - "metadata": {}, - "outputs": [], - "source": [ - "import { MemorySaver } from \"@langchain/langgraph\";\n", - "\n", - "const memory = new MemorySaver();\n", - "\n", - "const agentExecutorWithMemory = createReactAgent({ llm, tools, checkpointSaver: memory });" - ] - }, - { - "cell_type": "markdown", - "id": "02026f78-338e-4d18-9f05-131e1dd59197", - "metadata": {}, - "source": [ - "This is all we need to construct a conversational RAG agent.\n", - "\n", - "Let's observe its behavior. Note that if we input a query that does not require a retrieval step, the agent does not execute one:" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8", - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "id": "646840fb-5212-48ea-8bc7-ec7be5ec727e", + "metadata": {}, + "source": [ + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABACGc1vDPUSHYN7YVkuUMwpKR20P\",\n", - " \"content\": \"Hello, Bob! How can I assist you today?\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 12,\n", - " \"promptTokens\": 64,\n", - " \"totalTokens\": 76\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_e375328146\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 64,\n", - " \"output_tokens\": 12,\n", - " \"total_tokens\": 76\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n" - ] - } - ], - "source": [ - "const config = { configurable: { thread_id: \"abc123\" } };\n", - "\n", - "for await (const s of await agentExecutorWithMemory.stream(\n", - " { messages: [new HumanMessage(\"Hi! I'm bob\")] },\n", - " config\n", - ")) {\n", - " console.log(s);\n", - " console.log(\"----\");\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "a7928865-3dd6-4d36-abc6-2a30de770d09", - "metadata": {}, - "source": [ - "Further, if we input a query that does require a retrieval step, the agent generates the input to the tool:" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "e2c570ae-dd91-402c-8693-ae746de63b16", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 4, + "id": "cb58f273-2111-4a9b-8932-9b64c95030c8", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ model: \"gpt-4o\" });" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABACI6WN7hkfJjFhIUBGt3TswtPOv\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 19,\n", - " \"promptTokens\": 89,\n", - " \"totalTokens\": 108\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_f82f5b050c\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"blog_post_retriever\",\n", - " \"args\": {\n", - " \"query\": \"Task Decomposition\"\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 89,\n", - " \"output_tokens\": 19,\n", - " \"total_tokens\": 108\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n", - "{\n", - " tools: {\n", - " messages: [\n", - " ToolMessage {\n", - " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\",\n", - " \"name\": \"blog_post_retriever\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\"\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n", - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABACJu56eYSAyyMNaV9UEUwHS8vRu\",\n", - " \"content\": \"Task Decomposition is a method used to break down complicated tasks into smaller, more manageable steps. This approach leverages the \\\"Chain of Thought\\\" (CoT) technique, which prompts models to \\\"think step by step\\\" to enhance performance on complex tasks. Here’s a summary of the key concepts related to Task Decomposition:\\n\\n1. **Chain of Thought (CoT):**\\n - A prompting technique that encourages models to decompose hard tasks into simpler steps, transforming big tasks into multiple manageable sub-tasks.\\n - CoT helps to provide insights into the model’s thinking process.\\n\\n2. **Tree of Thoughts:**\\n - An extension of CoT, this approach explores multiple reasoning paths at each step.\\n - It creates a tree structure by generating multiple thoughts per step, and uses search methods like breadth-first search (BFS) or depth-first search (DFS) to explore these thoughts.\\n - Each state is evaluated by a classifier or majority vote.\\n\\n3. **Methods for Task Decomposition:**\\n - Simple prompting such as instructing with phrases like \\\"Steps for XYZ: 1., 2., 3.\\\" or \\\"What are the subgoals for achieving XYZ?\\\".\\n - Using task-specific instructions like \\\"Write a story outline\\\" for specific tasks such as writing a novel.\\n - Incorporating human inputs for better granularity.\\n\\n4. **LLM+P (Long-horizon Planning):**\\n - A method that involves using an external classical planner for long-horizon planning.\\n - The process involves translating the problem into a Planning Domain Definition Language (PDDL) problem, using a classical planner to generate a PDDL plan, and then translating it back into natural language.\\n\\nTask Decomposition is essential in planning complex tasks, allowing for efficient handling by breaking them into sub-tasks and sub-goals. This process is integral to the functioning of autonomous agent systems and enhances their capability to execute intricate tasks effectively.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 396,\n", - " \"promptTokens\": 844,\n", - " \"totalTokens\": 1240\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 844,\n", - " \"output_tokens\": 396,\n", - " \"total_tokens\": 1240\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n" - ] - } - ], - "source": [ - "for await (const s of await agentExecutorWithMemory.stream(\n", - " { messages: [new HumanMessage(query)] },\n", - " config\n", - ")) {\n", - " console.log(s);\n", - " console.log(\"----\");\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "26eaae33-3c4e-49fc-9fc6-db8967e25579", - "metadata": {}, - "source": [ - "Above, instead of inserting our query verbatim into the tool, the agent stripped unnecessary words like \"what\" and \"is\".\n", - "\n", - "This same principle allows the agent to use the context of the conversation when necessary:" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "id": "570d8c68-136e-4ba5-969a-03ba195f6118", - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 3, + "id": "820244ae-74b4-4593-b392-822979dd91b8", + "metadata": {}, + "outputs": [], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "\n", + "// 1. Load, chunk and index the contents of the blog to create a retriever.\n", + "const loader = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " {\n", + " selector: \".post-content, .post-title, .post-header\"\n", + " }\n", + ");\n", + "const docs = await loader.load();\n", + "\n", + "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits = await textSplitter.splitDocuments(docs);\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", + "const retriever = vectorstore.asRetriever();\n", + "\n", + "// 2. Incorporate the retriever into a question-answering chain.\n", + "const systemPrompt = \n", + " \"You are an assistant for question-answering tasks. \" +\n", + " \"Use the following pieces of retrieved context to answer \" +\n", + " \"the question. If you don't know the answer, say that you \" +\n", + " \"don't know. Use three sentences maximum and keep the \" +\n", + " \"answer concise.\" +\n", + " \"\\n\\n\" +\n", + " \"{context}\";\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemPrompt],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const questionAnswerChain = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt,\n", + "});\n", + "\n", + "const ragChain = await createRetrievalChain({\n", + " retriever,\n", + " combineDocsChain: questionAnswerChain,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "bf55faaf-0d17-4b74-925d-c478b555f7b2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Task decomposition involves breaking down large and complex tasks into smaller, more manageable subgoals or steps. This approach helps agents or models efficiently handle intricate tasks by simplifying them into easier components. Task decomposition can be achieved through techniques like Chain of Thought, Tree of Thoughts, or by using task-specific instructions and human input.\n" + ] + } + ], + "source": [ + "const response = await ragChain.invoke({ input: \"What is Task Decomposition?\" });\n", + "console.log(response.answer);" + ] + }, + { + "cell_type": "markdown", + "id": "187404c7-db47-49c5-be29-9ecb96dc9afa", + "metadata": {}, + "source": [ + "Note that we have used the built-in chain constructors `createStuffDocumentsChain` and `createRetrievalChain`, so that the basic ingredients to our solution are:\n", + "\n", + "1. retriever;\n", + "2. prompt;\n", + "3. LLM.\n", + "\n", + "This will simplify the process of incorporating chat history.\n", + "\n", + "### Adding chat history\n", + "\n", + "The chain we have built uses the input query directly to retrieve relevant context. But in a conversational setting, the user query might require conversational context to be understood. For example, consider this exchange:\n", + "\n", + "> Human: \"What is Task Decomposition?\"\n", + ">\n", + "> AI: \"Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model.\"\n", + ">\n", + "> Human: \"What are common ways of doing it?\"\n", + "\n", + "In order to answer the second question, our system needs to understand that \"it\" refers to \"Task Decomposition.\"\n", + "\n", + "We'll need to update two things about our existing app:\n", + "\n", + "1. **Prompt**: Update our prompt to support historical messages as an input.\n", + "2. **Contextualizing questions**: Add a sub-chain that takes the latest user question and reformulates it in the context of the chat history. This can be thought of simply as building a new \"history aware\" retriever. Whereas before we had:\n", + " - `query` -> `retriever` \n", + " Now we will have:\n", + " - `(query, conversation history)` -> `LLM` -> `rephrased query` -> `retriever`" + ] + }, + { + "cell_type": "markdown", + "id": "776ae958-cbdc-4471-8669-c6087436f0b5", + "metadata": {}, + "source": [ + "#### Contextualizing the question\n", + "\n", + "First we'll need to define a sub-chain that takes historical messages and the latest user question, and reformulates the question if it makes reference to any information in the historical information.\n", + "\n", + "We'll use a prompt that includes a `MessagesPlaceholder` variable under the name \"chat_history\". This allows us to pass in a list of Messages to the prompt using the \"chat_history\" input key, and these messages will be inserted after the system message and before the human message containing the latest question.\n", + "\n", + "Note that we leverage a helper function [createHistoryAwareRetriever](https://api.js.langchain.com/functions/langchain.chains_history_aware_retriever.createHistoryAwareRetriever.html) for this step, which manages the case where `chat_history` is empty, and otherwise applies `prompt.pipe(llm).pipe(new StringOutputParser()).pipe(retriever)` in sequence.\n", + "\n", + "`createHistoryAwareRetriever` constructs a chain that accepts keys `input` and `chat_history` as input, and has the same output schema as a retriever." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "2b685428-8b82-4af1-be4f-7232c5d55b73", + "metadata": {}, + "outputs": [], + "source": [ + "import { createHistoryAwareRetriever } from \"langchain/chains/history_aware_retriever\";\n", + "import { MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "\n", + "const contextualizeQSystemPrompt = \n", + " \"Given a chat history and the latest user question \" +\n", + " \"which might reference context in the chat history, \" +\n", + " \"formulate a standalone question which can be understood \" +\n", + " \"without the chat history. Do NOT answer the question, \" +\n", + " \"just reformulate it if needed and otherwise return it as is.\";\n", + "\n", + "const contextualizeQPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", contextualizeQSystemPrompt],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const historyAwareRetriever = await createHistoryAwareRetriever({\n", + " llm,\n", + " retriever,\n", + " rephrasePrompt: contextualizeQPrompt,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "42a47168-4a1f-4e39-bd2d-d5b03609a243", + "metadata": {}, + "source": [ + "This chain prepends a rephrasing of the input query to our retriever, so that the retrieval incorporates the context of the conversation.\n", + "\n", + "Now we can build our full QA chain. This is as simple as updating the retriever to be our new `historyAwareRetriever`.\n", + "\n", + "Again, we will use [createStuffDocumentsChain](https://api.js.langchain.com/functions/langchain.chains_combine_documents.createStuffDocumentsChain.html) to generate a `questionAnswerChain2`, with input keys `context`, `chat_history`, and `input`-- it accepts the retrieved context alongside the conversation history and query to generate an answer. A more detailed explaination is over [here](/docs/tutorials/rag/#built-in-chains)\n", + "\n", + "We build our final `ragChain2` with [createRetrievalChain](https://api.js.langchain.com/functions/langchain.chains_retrieval.createRetrievalChain.html). This chain applies the `historyAwareRetriever` and `questionAnswerChain2` in sequence, retaining intermediate outputs such as the retrieved context for convenience. It has input keys `input` and `chat_history`, and includes `input`, `chat_history`, `context`, and `answer` in its output." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "66f275f3-ddef-4678-b90d-ee64576878f9", + "metadata": {}, + "outputs": [], + "source": [ + "const qaPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemPrompt],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const questionAnswerChain2 = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt: qaPrompt,\n", + "});\n", + "\n", + "const ragChain2 = await createRetrievalChain({\n", + " retriever: historyAwareRetriever,\n", + " combineDocsChain: questionAnswerChain2,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "1ba1ae56-7ecb-4563-b792-50a1a5042df3", + "metadata": {}, + "source": [ + "Let's try this. Below we ask a question and a follow-up question that requires contextualization to return a sensible response. Because our chain includes a `\"chat_history\"` input, the caller needs to manage the chat history. We can achieve this by appending input and output messages to a list:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0005810b-1b95-4666-a795-08d80e478b83", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Common ways of doing Task Decomposition include:\n", + "1. Using simple prompting with an LLM, such as asking it to outline steps or subgoals for a task.\n", + "2. Employing task-specific instructions, like \"Write a story outline\" for writing a novel.\n", + "3. Incorporating human inputs for guidance.\n", + "Additionally, advanced approaches like Chain of Thought (CoT) and Tree of Thoughts (ToT) can further refine the process, and using an external classical planner with PDDL (as in LLM+P) is another option.\n" + ] + } + ], + "source": [ + "import { BaseMessage, HumanMessage, AIMessage } from \"@langchain/core/messages\";\n", + "\n", + "let chatHistory: BaseMessage[] = [];\n", + "\n", + "const question = \"What is Task Decomposition?\";\n", + "const aiMsg1 = await ragChain2.invoke({ input: question, chat_history: chatHistory });\n", + "chatHistory = chatHistory.concat([\n", + " new HumanMessage(question),\n", + " new AIMessage(aiMsg1.answer),\n", + "]);\n", + "\n", + "const secondQuestion = \"What are common ways of doing it?\";\n", + "const aiMsg2 = await ragChain2.invoke({ input: secondQuestion, chat_history: chatHistory });\n", + "\n", + "console.log(aiMsg2.answer);" + ] + }, + { + "cell_type": "markdown", + "id": "53a662c2-f38b-45f9-95c4-66de15637614", + "metadata": {}, + "source": [ + "#### Stateful management of chat history\n", + "\n", + "Here we've gone over how to add application logic for incorporating historical outputs, but we're still manually updating the chat history and inserting it into each input. In a real Q&A application we'll want some way of persisting chat history and some way of automatically inserting and updating it.\n", + "\n", + "For this we can use:\n", + "\n", + "- [BaseChatMessageHistory](https://api.js.langchain.com/classes/_langchain_core.chat_history.BaseChatMessageHistory.html): Store chat history.\n", + "- [RunnableWithMessageHistory](/docs/how_to/message_history): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", + "\n", + "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history) LCEL page.\n", + "\n", + "Instances of `RunnableWithMessageHistory` manage the chat history for you. They accept a config with a key (`\"sessionId\"` by default) that specifies what conversation history to fetch and prepend to the input, and append the output to the same conversation history. Below is an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72", + "metadata": {}, + "outputs": [], + "source": [ + "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "import { ChatMessageHistory } from \"langchain/stores/message/in_memory\";\n", + "\n", + "const demoEphemeralChatMessageHistoryForChain = new ChatMessageHistory();\n", + "\n", + "const conversationalRagChain = new RunnableWithMessageHistory({\n", + " runnable: ragChain2,\n", + " getMessageHistory: (_sessionId) => demoEphemeralChatMessageHistoryForChain,\n", + " inputMessagesKey: \"input\",\n", + " historyMessagesKey: \"chat_history\",\n", + " outputMessagesKey: \"answer\",\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "1046c92f-21b3-4214-907d-92878d8cba23", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Task Decomposition involves breaking down complicated tasks into smaller, more manageable subgoals. Techniques such as the Chain of Thought (CoT) and Tree of Thoughts extend this by decomposing problems into multiple thought steps and exploring multiple reasoning possibilities at each step. LLMs can perform task decomposition using simple prompts, task-specific instructions, or human inputs, and some approaches like LLM+P involve using external classical planners.\n" + ] + } + ], + "source": [ + "const result1 = await conversationalRagChain.invoke(\n", + " { input: \"What is Task Decomposition?\" },\n", + " { configurable: { sessionId: \"abc123\" } }\n", + ");\n", + "console.log(result1.answer);" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Common ways of doing task decomposition include:\n", + "\n", + "1. Using simple prompting with an LLM, such as \"Steps for XYZ.\\n1.\" or \"What are the subgoals for achieving XYZ?\"\n", + "2. Utilizing task-specific instructions, like \"Write a story outline.\" for writing a novel.\n", + "3. Incorporating human inputs to guide and refine the decomposition process. \n", + "\n", + "Additionally, the LLM+P approach utilizes an external classical planner, involving PDDL to describe and plan complex tasks.\n" + ] + } + ], + "source": [ + "const result2 = await conversationalRagChain.invoke(\n", + " { input: \"What are common ways of doing it?\" },\n", + " { configurable: { sessionId: \"abc123\" } }\n", + ");\n", + "console.log(result2.answer);" + ] + }, + { + "cell_type": "markdown", + "id": "0ab1ded4-76d9-453f-9b9b-db9a4560c737", + "metadata": {}, + "source": [ + "### Tying it together" + ] + }, + { + "cell_type": "markdown", + "id": "8a08a5ea-df5b-4547-93c6-2a3940dd5c3e", + "metadata": {}, + "source": [ + "![](../../static/img/conversational_retrieval_chain.png)\n", + "\n", + "For convenience, we tie together all of the necessary steps in a single code cell:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "71c32048-1a41-465f-a9e2-c4affc332fd9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ input: 'What is Task Decomposition?' }\n", + "----\n", + "{ chat_history: [] }\n", + "----\n", + "{\n", + " context: [\n", + " Document {\n", + " pageContent: 'Fig. 1. Overview of a LLM-powered autonomous agent system.\\n' +\n", + " 'Component One: Planning#\\n' +\n", + " 'A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\n' +\n", + " 'Task Decomposition#\\n' +\n", + " 'Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n' +\n", + " 'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.',\n", + " metadata: [Object],\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n' +\n", + " 'Another quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\n' +\n", + " 'Self-Reflection#',\n", + " metadata: [Object],\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Planning\\n' +\n", + " '\\n' +\n", + " 'Subgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\n' +\n", + " 'Reflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'Memory\\n' +\n", + " '\\n' +\n", + " 'Short-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\n' +\n", + " 'Long-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'Tool use\\n' +\n", + " '\\n' +\n", + " 'The agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.',\n", + " metadata: [Object],\n", + " id: undefined\n", + " },\n", + " Document {\n", + " pageContent: 'Resources:\\n' +\n", + " '1. Internet access for searches and information gathering.\\n' +\n", + " '2. Long Term memory management.\\n' +\n", + " '3. GPT-3.5 powered Agents for delegation of simple tasks.\\n' +\n", + " '4. File output.\\n' +\n", + " '\\n' +\n", + " 'Performance Evaluation:\\n' +\n", + " '1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n' +\n", + " '2. Constructively self-criticize your big-picture behavior constantly.\\n' +\n", + " '3. Reflect on past decisions and strategies to refine your approach.\\n' +\n", + " '4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',\n", + " metadata: [Object],\n", + " id: undefined\n", + " }\n", + " ]\n", + "}\n", + "----\n", + "{ answer: '' }\n", + "----\n", + "{ answer: 'Task' }\n", + "----\n", + "{ answer: ' decomposition' }\n", + "----\n", + "{ answer: ' involves' }\n", + "----\n", + "{ answer: ' breaking' }\n", + "----\n", + "{ answer: ' down' }\n", + "----\n", + "{ answer: ' a' }\n", + "----\n", + "{ answer: ' complex' }\n", + "----\n", + "{ answer: ' task' }\n", + "----\n", + "{ answer: ' into' }\n", + "----\n", + "{ answer: ' smaller' }\n", + "----\n", + "{ answer: ' and' }\n", + "----\n", + "{ answer: ' more' }\n", + "----\n", + "{ answer: ' manageable' }\n", + "----\n", + "{ answer: ' sub' }\n", + "----\n", + "{ answer: 'goals' }\n", + "----\n", + "{ answer: ' or' }\n", + "----\n", + "{ answer: ' steps' }\n", + "----\n", + "{ answer: '.' }\n", + "----\n", + "{ answer: ' This' }\n", + "----\n", + "{ answer: ' process' }\n", + "----\n", + "{ answer: ' allows' }\n", + "----\n", + "{ answer: ' an' }\n", + "----\n", + "{ answer: ' agent' }\n", + "----\n", + "{ answer: ' or' }\n", + "----\n", + "{ answer: ' model' }\n", + "----\n", + "{ answer: ' to' }\n", + "----\n", + "{ answer: ' efficiently' }\n", + "----\n", + "{ answer: ' handle' }\n", + "----\n", + "{ answer: ' intricate' }\n", + "----\n", + "{ answer: ' tasks' }\n", + "----\n", + "{ answer: ' by' }\n", + "----\n", + "{ answer: ' dividing' }\n", + "----\n", + "{ answer: ' them' }\n", + "----\n", + "{ answer: ' into' }\n", + "----\n", + "{ answer: ' simpler' }\n", + "----\n", + "{ answer: ' components' }\n", + "----\n", + "{ answer: '.' }\n", + "----\n", + "{ answer: ' Task' }\n", + "----\n", + "{ answer: ' decomposition' }\n", + "----\n", + "{ answer: ' can' }\n", + "----\n", + "{ answer: ' be' }\n", + "----\n", + "{ answer: ' achieved' }\n", + "----\n", + "{ answer: ' through' }\n", + "----\n", + "{ answer: ' techniques' }\n", + "----\n", + "{ answer: ' like' }\n", + "----\n", + "{ answer: ' Chain' }\n", + "----\n", + "{ answer: ' of' }\n", + "----\n", + "{ answer: ' Thought' }\n", + "----\n", + "{ answer: ',' }\n", + "----\n", + "{ answer: ' Tree' }\n", + "----\n", + "{ answer: ' of' }\n", + "----\n", + "{ answer: ' Thoughts' }\n", + "----\n", + "{ answer: ',' }\n", + "----\n", + "{ answer: ' or' }\n", + "----\n", + "{ answer: ' by' }\n", + "----\n", + "{ answer: ' using' }\n", + "----\n", + "{ answer: ' task' }\n", + "----\n", + "{ answer: '-specific' }\n", + "----\n", + "{ answer: ' instructions' }\n", + "----\n", + "{ answer: '.' }\n", + "----\n", + "{ answer: '' }\n", + "----\n", + "{ answer: '' }\n", + "----\n" + ] + } + ], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "import { createHistoryAwareRetriever } from \"langchain/chains/history_aware_retriever\";\n", + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "import { createRetrievalChain } from \"langchain/chains/retrieval\";\n", + "import { RunnableWithMessageHistory } from \"@langchain/core/runnables\";\n", + "import { ChatMessageHistory } from \"langchain/stores/message/in_memory\";\n", + "import { BaseChatMessageHistory } from \"@langchain/core/chat_history\";\n", + "\n", + "const llm2 = new ChatOpenAI({ model: \"gpt-3.5-turbo\", temperature: 0 });\n", + "\n", + "// Construct retriever\n", + "const loader2 = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " {\n", + " selector: \".post-content, .post-title, .post-header\"\n", + " }\n", + ");\n", + "\n", + "const docs2 = await loader2.load();\n", + "\n", + "const textSplitter2 = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits2 = await textSplitter2.splitDocuments(docs2);\n", + "const vectorstore2 = await MemoryVectorStore.fromDocuments(splits2, new OpenAIEmbeddings());\n", + "const retriever2 = vectorstore2.asRetriever();\n", + "\n", + "// Contextualize question\n", + "const contextualizeQSystemPrompt2 = \n", + " \"Given a chat history and the latest user question \" +\n", + " \"which might reference context in the chat history, \" +\n", + " \"formulate a standalone question which can be understood \" +\n", + " \"without the chat history. Do NOT answer the question, \" +\n", + " \"just reformulate it if needed and otherwise return it as is.\";\n", + "\n", + "const contextualizeQPrompt2 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", contextualizeQSystemPrompt2],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const historyAwareRetriever2 = await createHistoryAwareRetriever({\n", + " llm: llm2,\n", + " retriever: retriever2,\n", + " rephrasePrompt: contextualizeQPrompt2\n", + "});\n", + "\n", + "// Answer question\n", + "const systemPrompt2 = \n", + " \"You are an assistant for question-answering tasks. \" +\n", + " \"Use the following pieces of retrieved context to answer \" +\n", + " \"the question. If you don't know the answer, say that you \" +\n", + " \"don't know. Use three sentences maximum and keep the \" +\n", + " \"answer concise.\" +\n", + " \"\\n\\n\" +\n", + " \"{context}\";\n", + "\n", + "const qaPrompt2 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", systemPrompt2],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "const questionAnswerChain3 = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt: qaPrompt2,\n", + "});\n", + "\n", + "const ragChain3 = await createRetrievalChain({\n", + " retriever: historyAwareRetriever2,\n", + " combineDocsChain: questionAnswerChain3,\n", + "});\n", + "\n", + "// Statefully manage chat history\n", + "const store2: Record = {};\n", + "\n", + "function getSessionHistory2(sessionId: string): BaseChatMessageHistory {\n", + " if (!(sessionId in store2)) {\n", + " store2[sessionId] = new ChatMessageHistory();\n", + " }\n", + " return store2[sessionId];\n", + "}\n", + "\n", + "const conversationalRagChain2 = new RunnableWithMessageHistory({\n", + " runnable: ragChain3,\n", + " getMessageHistory: getSessionHistory2,\n", + " inputMessagesKey: \"input\",\n", + " historyMessagesKey: \"chat_history\",\n", + " outputMessagesKey: \"answer\",\n", + "});\n", + "\n", + "// Example usage\n", + "const query2 = \"What is Task Decomposition?\";\n", + "\n", + "for await (const s of await conversationalRagChain2.stream(\n", + " { input: query2 },\n", + " { configurable: { sessionId: \"unique_session_id\" } }\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "861da8ed-d890-4fdc-a3bf-30433db61e0d", + "metadata": {}, + "source": [ + "## Agents {#agents}\n", + "\n", + "Agents leverage the reasoning capabilities of LLMs to make decisions during execution. Using agents allow you to offload some discretion over the retrieval process. Although their behavior is less predictable than chains, they offer some advantages in this context:\n", + "\n", + "- Agents generate the input to the retriever directly, without necessarily needing us to explicitly build in contextualization, as we did above;\n", + "- Agents can execute multiple retrieval steps in service of a query, or refrain from executing a retrieval step altogether (e.g., in response to a generic greeting from a user).\n", + "\n", + "### Retrieval tool\n", + "\n", + "Agents can access \"tools\" and manage their execution. In this case, we will convert our retriever into a LangChain tool to be wielded by the agent:" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "809cc747-2135-40a2-8e73-e4556343ee64", + "metadata": {}, + "outputs": [], + "source": [ + "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", + "\n", + "const tool = createRetrieverTool(\n", + " retriever,\n", + " {\n", + " name: \"blog_post_retriever\",\n", + " description: \"Searches and returns excerpts from the Autonomous Agents blog post.\",\n", + " }\n", + ")\n", + "const tools = [tool]" + ] + }, + { + "cell_type": "markdown", + "id": "07dcb968-ed9a-458a-85e1-528cd28c6965", + "metadata": {}, + "source": [ + "Tools are LangChain [Runnables](/docs/concepts/lcel), and implement the usual interface:" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABACPZzSugzrREQRO4mVQfI3cQOeL\",\n", - " \"content\": \"\",\n", - " \"additional_kwargs\": {\n", - " \"tool_calls\": [\n", - " {\n", - " \"id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\",\n", - " \"type\": \"function\",\n", - " \"function\": \"[Object]\"\n", - " }\n", - " ]\n", - " },\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 22,\n", - " \"promptTokens\": 1263,\n", - " \"totalTokens\": 1285\n", - " },\n", - " \"finish_reason\": \"tool_calls\",\n", - " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", - " },\n", - " \"tool_calls\": [\n", - " {\n", - " \"name\": \"blog_post_retriever\",\n", - " \"args\": {\n", - " \"query\": \"common ways of doing task decomposition\"\n", - " },\n", - " \"type\": \"tool_call\",\n", - " \"id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\"\n", - " }\n", - " ],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 1263,\n", - " \"output_tokens\": 22,\n", - " \"total_tokens\": 1285\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n", - "{\n", - " tools: {\n", - " messages: [\n", - " ToolMessage {\n", - " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\",\n", - " \"name\": \"blog_post_retriever\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {},\n", - " \"tool_call_id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\"\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n", - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessage {\n", - " \"id\": \"chatcmpl-ABACQt9pT5dKCTaGQpVawcmCCWdET\",\n", - " \"content\": \"According to the blog post, common ways of performing Task Decomposition include:\\n\\n1. **Using Large Language Models (LLMs) with Simple Prompting:**\\n - Providing clear and structured prompts such as \\\"Steps for XYZ: 1., 2., 3.\\\" or asking \\\"What are the subgoals for achieving XYZ?\\\"\\n - This allows the model to break down the tasks step-by-step.\\n\\n2. **Task-Specific Instructions:**\\n - Employing specific instructions tailored to the task at hand, for example, \\\"Write a story outline\\\" for writing a novel.\\n - These instructions guide the model in decomposing the task appropriately.\\n\\n3. **Involving Human Inputs:**\\n - Integrating insights and directives from humans to aid in the decomposition process.\\n - This can ensure that the breakdown is comprehensive and accurately reflects the nuances of the task.\\n\\n4. **LLM+P Approach for Long-Horizon Planning:**\\n - Utilizing an external classical planner by translating the problem into Planning Domain Definition Language (PDDL).\\n - The process involves:\\n 1. Translating the problem into “Problem PDDL”.\\n 2. Requesting a classical planner to generate a PDDL plan based on an existing “Domain PDDL”.\\n 3. Translating the PDDL plan back into natural language.\\n\\nThese methods enable effective management and execution of complex tasks by transforming them into simpler, more manageable components.\",\n", - " \"additional_kwargs\": {},\n", - " \"response_metadata\": {\n", - " \"tokenUsage\": {\n", - " \"completionTokens\": 292,\n", - " \"promptTokens\": 2010,\n", - " \"totalTokens\": 2302\n", - " },\n", - " \"finish_reason\": \"stop\",\n", - " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", - " },\n", - " \"tool_calls\": [],\n", - " \"invalid_tool_calls\": [],\n", - " \"usage_metadata\": {\n", - " \"input_tokens\": 2010,\n", - " \"output_tokens\": 292,\n", - " \"total_tokens\": 2302\n", - " }\n", - " }\n", - " ]\n", - " }\n", - "}\n", - "----\n" - ] + "cell_type": "code", + "execution_count": 24, + "id": "931c4fe3-c603-4efb-9b37-5f7cbbb1cbbd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n", + "Another quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\n", + "Self-Reflection#\n", + "\n", + "Fig. 1. Overview of a LLM-powered autonomous agent system.\n", + "Component One: Planning#\n", + "A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n", + "Task Decomposition#\n", + "Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n", + "Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n", + "\n", + "(3) Task execution: Expert models execute on the specific tasks and log results.\n", + "Instruction:\n", + "\n", + "With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n", + "\n", + "Resources:\n", + "1. Internet access for searches and information gathering.\n", + "2. Long Term memory management.\n", + "3. GPT-3.5 powered Agents for delegation of simple tasks.\n", + "4. File output.\n", + "\n", + "Performance Evaluation:\n", + "1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n", + "2. Constructively self-criticize your big-picture behavior constantly.\n", + "3. Reflect on past decisions and strategies to refine your approach.\n", + "4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n" + ] + } + ], + "source": [ + "console.log(await tool.invoke({ query: \"task decomposition\" }))" + ] + }, + { + "cell_type": "markdown", + "id": "f77e0217-28be-4b8b-b4c4-9cc4ed5ec201", + "metadata": {}, + "source": [ + "### Agent constructor\n", + "\n", + "Now that we have defined the tools and the LLM, we can create the agent. We will be using [LangGraph](https://langchain-ai.github.io/langgraphjs) to construct the agent. \n", + "Currently we are using a high level interface to construct the agent, but the nice thing about LangGraph is that this high-level interface is backed by a low-level, highly controllable API in case you want to modify the agent logic." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "1726d151-4653-4c72-a187-a14840add526", + "metadata": {}, + "outputs": [], + "source": [ + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "\n", + "const agentExecutor = createReactAgent({ llm, tools });" + ] + }, + { + "cell_type": "markdown", + "id": "6d5152ca-1c3b-4f58-bb28-f31c0be7ba66", + "metadata": {}, + "source": [ + "We can now try it out. Note that so far it is not stateful (we still need to add in memory)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "170403a2-c914-41db-85d8-a2c381da112d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABABtUmgD1ZlOHZd0nD9TR8yb3mMe\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_dWxEY41mg9VSLamVYHltsUxL\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 66,\n", + " \"totalTokens\": 85\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_3537616b13\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"Task Decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_dWxEY41mg9VSLamVYHltsUxL\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 66,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 85\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_dWxEY41mg9VSLamVYHltsUxL\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABABuSj5FHmHFdeR2Pv7Cxcmq5aQz\",\n", + " \"content\": \"Task Decomposition is a technique that allows an agent to break down a complex task into smaller, more manageable subtasks or steps. The primary goal is to simplify the task to ensure efficient execution and better understanding. \\n\\n### Methods in Task Decomposition:\\n1. **Chain of Thought (CoT)**:\\n - **Description**: This technique involves instructing the model to “think step by step” to decompose hard tasks into smaller ones. It transforms large tasks into multiple manageable tasks, enhancing the model's performance and providing insight into its thinking process. \\n - **Example**: When given a complex problem, the model outlines sequential steps to reach a solution.\\n\\n2. **Tree of Thoughts**:\\n - **Description**: This extends CoT by exploring multiple reasoning possibilities at each step. The problem is decomposed into multiple thought steps, with several thoughts generated per step, forming a sort of decision tree.\\n - **Example**: For a given task, the model might consider various alternative actions at each stage, evaluating each before proceeding.\\n\\n3. **LLM with Prompts**:\\n - **Description**: Basic task decomposition can be done via simple prompts like \\\"Steps for XYZ\\\" or \\\"What are the subgoals for achieving XYZ?\\\" This can also be guided by task-specific instructions or human inputs when necessary.\\n - **Example**: Asking the model to list the subgoals for writing a novel might produce an outline broken down into chapters, character development, and plot points.\\n\\n4. **LLM+P**:\\n - **Description**: This approach involves outsourcing long-term planning to an external classical planner using Planning Domain Definition Language (PDDL). The task is translated into a PDDL problem by the model, planned using classical planning tools, and then translated back into natural language.\\n - **Example**: In robotics, translating a task into PDDL and then using a domain-specific planner to generate a sequence of actions.\\n\\n### Applications:\\n- **Planning**: Helps an agent plan tasks by breaking them into clear, manageable steps.\\n- **Self-Reflection**: Allows agents to reflect and refine their actions, learning from past mistakes to improve future performance.\\n- **Memory**: Utilizes short-term memory for immediate context and long-term memory for retaining and recalling information over extended periods.\\n- **Tool Use**: Enables the agent to call external APIs for additional information or capabilities not inherent in the model.\\n\\nIn essence, task decomposition leverages various methodologies to simplify complex tasks, ensuring better performance, improved reasoning, and effective task execution.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 522,\n", + " \"promptTokens\": 821,\n", + " \"totalTokens\": 1343\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 821,\n", + " \"output_tokens\": 522,\n", + " \"total_tokens\": 1343\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const query = \"What is Task Decomposition?\";\n", + "\n", + "for await (const s of await agentExecutor.stream(\n", + " { messages: [new HumanMessage(query)] }\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "1df703b1-aad6-48fb-b6fa-703e32ea88b9", + "metadata": {}, + "source": [ + "LangGraph comes with built in persistence, so we don't need to use ChatMessageHistory! Rather, we can pass in a checkpointer to our LangGraph agent directly" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "04a3a664-3c3f-4cd1-9995-26662a52da7c", + "metadata": {}, + "outputs": [], + "source": [ + "import { MemorySaver } from \"@langchain/langgraph\";\n", + "\n", + "const memory = new MemorySaver();\n", + "\n", + "const agentExecutorWithMemory = createReactAgent({ llm, tools, checkpointSaver: memory });" + ] + }, + { + "cell_type": "markdown", + "id": "02026f78-338e-4d18-9f05-131e1dd59197", + "metadata": {}, + "source": [ + "This is all we need to construct a conversational RAG agent.\n", + "\n", + "Let's observe its behavior. Note that if we input a query that does not require a retrieval step, the agent does not execute one:" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACGc1vDPUSHYN7YVkuUMwpKR20P\",\n", + " \"content\": \"Hello, Bob! How can I assist you today?\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 12,\n", + " \"promptTokens\": 64,\n", + " \"totalTokens\": 76\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_e375328146\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 64,\n", + " \"output_tokens\": 12,\n", + " \"total_tokens\": 76\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const config = { configurable: { thread_id: \"abc123\" } };\n", + "\n", + "for await (const s of await agentExecutorWithMemory.stream(\n", + " { messages: [new HumanMessage(\"Hi! I'm bob\")] },\n", + " config\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "a7928865-3dd6-4d36-abc6-2a30de770d09", + "metadata": {}, + "source": [ + "Further, if we input a query that does require a retrieval step, the agent generates the input to the tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "e2c570ae-dd91-402c-8693-ae746de63b16", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACI6WN7hkfJjFhIUBGt3TswtPOv\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 19,\n", + " \"promptTokens\": 89,\n", + " \"totalTokens\": 108\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_f82f5b050c\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"Task Decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 89,\n", + " \"output_tokens\": 19,\n", + " \"total_tokens\": 108\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_Lys2G4TbOMJ6RBuVvKnFSK4V\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACJu56eYSAyyMNaV9UEUwHS8vRu\",\n", + " \"content\": \"Task Decomposition is a method used to break down complicated tasks into smaller, more manageable steps. This approach leverages the \\\"Chain of Thought\\\" (CoT) technique, which prompts models to \\\"think step by step\\\" to enhance performance on complex tasks. Here’s a summary of the key concepts related to Task Decomposition:\\n\\n1. **Chain of Thought (CoT):**\\n - A prompting technique that encourages models to decompose hard tasks into simpler steps, transforming big tasks into multiple manageable sub-tasks.\\n - CoT helps to provide insights into the model’s thinking process.\\n\\n2. **Tree of Thoughts:**\\n - An extension of CoT, this approach explores multiple reasoning paths at each step.\\n - It creates a tree structure by generating multiple thoughts per step, and uses search methods like breadth-first search (BFS) or depth-first search (DFS) to explore these thoughts.\\n - Each state is evaluated by a classifier or majority vote.\\n\\n3. **Methods for Task Decomposition:**\\n - Simple prompting such as instructing with phrases like \\\"Steps for XYZ: 1., 2., 3.\\\" or \\\"What are the subgoals for achieving XYZ?\\\".\\n - Using task-specific instructions like \\\"Write a story outline\\\" for specific tasks such as writing a novel.\\n - Incorporating human inputs for better granularity.\\n\\n4. **LLM+P (Long-horizon Planning):**\\n - A method that involves using an external classical planner for long-horizon planning.\\n - The process involves translating the problem into a Planning Domain Definition Language (PDDL) problem, using a classical planner to generate a PDDL plan, and then translating it back into natural language.\\n\\nTask Decomposition is essential in planning complex tasks, allowing for efficient handling by breaking them into sub-tasks and sub-goals. This process is integral to the functioning of autonomous agent systems and enhances their capability to execute intricate tasks effectively.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 396,\n", + " \"promptTokens\": 844,\n", + " \"totalTokens\": 1240\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 844,\n", + " \"output_tokens\": 396,\n", + " \"total_tokens\": 1240\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "for await (const s of await agentExecutorWithMemory.stream(\n", + " { messages: [new HumanMessage(query)] },\n", + " config\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "26eaae33-3c4e-49fc-9fc6-db8967e25579", + "metadata": {}, + "source": [ + "Above, instead of inserting our query verbatim into the tool, the agent stripped unnecessary words like \"what\" and \"is\".\n", + "\n", + "This same principle allows the agent to use the context of the conversation when necessary:" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "570d8c68-136e-4ba5-969a-03ba195f6118", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACPZzSugzrREQRO4mVQfI3cQOeL\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 22,\n", + " \"promptTokens\": 1263,\n", + " \"totalTokens\": 1285\n", + " },\n", + " \"finish_reason\": \"tool_calls\",\n", + " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"blog_post_retriever\",\n", + " \"args\": {\n", + " \"query\": \"common ways of doing task decomposition\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 1263,\n", + " \"output_tokens\": 22,\n", + " \"total_tokens\": 1285\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " tools: {\n", + " messages: [\n", + " ToolMessage {\n", + " \"content\": \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\n\\nTask decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\\n\\nPlanning\\n\\nSubgoal and decomposition: The agent breaks down large tasks into smaller, manageable subgoals, enabling efficient handling of complex tasks.\\nReflection and refinement: The agent can do self-criticism and self-reflection over past actions, learn from mistakes and refine them for future steps, thereby improving the quality of final results.\\n\\n\\nMemory\\n\\nShort-term memory: I would consider all the in-context learning (See Prompt Engineering) as utilizing short-term memory of the model to learn.\\nLong-term memory: This provides the agent with the capability to retain and recall (infinite) information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n\\nTool use\\n\\nThe agent learns to call external APIs for extra information that is missing from the model weights (often hard to change after pre-training), including current information, code execution capability, access to proprietary information sources and more.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\",\n", + " \"name\": \"blog_post_retriever\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_call_id\": \"call_5nSZb396Tcg73Pok6Bx1XV8b\"\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n", + "{\n", + " agent: {\n", + " messages: [\n", + " AIMessage {\n", + " \"id\": \"chatcmpl-ABACQt9pT5dKCTaGQpVawcmCCWdET\",\n", + " \"content\": \"According to the blog post, common ways of performing Task Decomposition include:\\n\\n1. **Using Large Language Models (LLMs) with Simple Prompting:**\\n - Providing clear and structured prompts such as \\\"Steps for XYZ: 1., 2., 3.\\\" or asking \\\"What are the subgoals for achieving XYZ?\\\"\\n - This allows the model to break down the tasks step-by-step.\\n\\n2. **Task-Specific Instructions:**\\n - Employing specific instructions tailored to the task at hand, for example, \\\"Write a story outline\\\" for writing a novel.\\n - These instructions guide the model in decomposing the task appropriately.\\n\\n3. **Involving Human Inputs:**\\n - Integrating insights and directives from humans to aid in the decomposition process.\\n - This can ensure that the breakdown is comprehensive and accurately reflects the nuances of the task.\\n\\n4. **LLM+P Approach for Long-Horizon Planning:**\\n - Utilizing an external classical planner by translating the problem into Planning Domain Definition Language (PDDL).\\n - The process involves:\\n 1. Translating the problem into “Problem PDDL”.\\n 2. Requesting a classical planner to generate a PDDL plan based on an existing “Domain PDDL”.\\n 3. Translating the PDDL plan back into natural language.\\n\\nThese methods enable effective management and execution of complex tasks by transforming them into simpler, more manageable components.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 292,\n", + " \"promptTokens\": 2010,\n", + " \"totalTokens\": 2302\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_9f2bfdaa89\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 2010,\n", + " \"output_tokens\": 292,\n", + " \"total_tokens\": 2302\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}\n", + "----\n" + ] + } + ], + "source": [ + "const query3 = \"What according to the blog post are common ways of doing it? redo the search\";\n", + "\n", + "for await (const s of await agentExecutorWithMemory.stream(\n", + " { messages: [new HumanMessage(query3)] },\n", + " config\n", + ")) {\n", + " console.log(s);\n", + " console.log(\"----\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "f2724616-c106-4e15-a61a-3077c535f692", + "metadata": {}, + "source": [ + "Note that the agent was able to infer that \"it\" in our query refers to \"task decomposition\", and generated a reasonable search query as a result-- in this case, \"common ways of task decomposition\"." + ] + }, + { + "cell_type": "markdown", + "id": "1cf87847-23bb-4672-b41c-12ad9cf81ed4", + "metadata": {}, + "source": [ + "### Tying it together\n", + "\n", + "For convenience, we tie together all of the necessary steps in a single code cell:" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "b1d2b4d4-e604-497d-873d-d345b808578e", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { MemorySaver } from \"@langchain/langgraph\";\n", + "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", + "\n", + "const memory3 = new MemorySaver();\n", + "const llm3 = new ChatOpenAI({ model: \"gpt-4o\", temperature: 0 });\n", + "\n", + "// Construct retriever\n", + "const loader3 = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " {\n", + " selector: \".post-content, .post-title, .post-header\"\n", + " }\n", + ");\n", + "\n", + "const docs3 = await loader3.load();\n", + "\n", + "const textSplitter3 = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits3 = await textSplitter3.splitDocuments(docs3);\n", + "const vectorstore3 = await MemoryVectorStore.fromDocuments(splits3, new OpenAIEmbeddings());\n", + "const retriever3 = vectorstore3.asRetriever();\n", + "\n", + "// Build retriever tool\n", + "const tool3 = createRetrieverTool(\n", + " retriever3,\n", + " {\n", + " name: \"blog_post_retriever\",\n", + " description: \"Searches and returns excerpts from the Autonomous Agents blog post.\",\n", + " }\n", + ");\n", + "const tools3 = [tool3];\n", + "\n", + "const agentExecutor3 = createReactAgent({ llm: llm3, tools: tools3, checkpointSaver: memory3 });" + ] + }, + { + "cell_type": "markdown", + "id": "cd6bf4f4-74f4-419d-9e26-f0ed83cf05fa", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "We've covered the steps to build a basic conversational Q&A application:\n", + "\n", + "- We used chains to build a predictable application that generates search queries for each user input;\n", + "- We used agents to build an application that \"decides\" when and how to generate search queries.\n", + "\n", + "To explore different types of retrievers and retrieval strategies, visit the [retrievers](/docs/how_to/#retrievers) section of the how-to guides.\n", + "\n", + "For a detailed walkthrough of LangChain's conversation memory abstractions, visit the [How to add message history (memory)](/docs/how_to/message_history) LCEL page." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "const query3 = \"What according to the blog post are common ways of doing it? redo the search\";\n", - "\n", - "for await (const s of await agentExecutorWithMemory.stream(\n", - " { messages: [new HumanMessage(query3)] },\n", - " config\n", - ")) {\n", - " console.log(s);\n", - " console.log(\"----\");\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "f2724616-c106-4e15-a61a-3077c535f692", - "metadata": {}, - "source": [ - "Note that the agent was able to infer that \"it\" in our query refers to \"task decomposition\", and generated a reasonable search query as a result-- in this case, \"common ways of task decomposition\"." - ] - }, - { - "cell_type": "markdown", - "id": "1cf87847-23bb-4672-b41c-12ad9cf81ed4", - "metadata": {}, - "source": [ - "### Tying it together\n", - "\n", - "For convenience, we tie together all of the necessary steps in a single code cell:" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "b1d2b4d4-e604-497d-873d-d345b808578e", - "metadata": {}, - "outputs": [], - "source": [ - "import { ChatOpenAI } from \"@langchain/openai\";\n", - "import { MemorySaver } from \"@langchain/langgraph\";\n", - "import { createReactAgent } from \"@langchain/langgraph/prebuilt\";\n", - "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\";\n", - "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", - "\n", - "const memory3 = new MemorySaver();\n", - "const llm3 = new ChatOpenAI({ model: \"gpt-4o\", temperature: 0 });\n", - "\n", - "// Construct retriever\n", - "const loader3 = new CheerioWebBaseLoader(\n", - " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", - " {\n", - " selector: \".post-content, .post-title, .post-header\"\n", - " }\n", - ");\n", - "\n", - "const docs3 = await loader3.load();\n", - "\n", - "const textSplitter3 = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", - "const splits3 = await textSplitter3.splitDocuments(docs3);\n", - "const vectorstore3 = await MemoryVectorStore.fromDocuments(splits3, new OpenAIEmbeddings());\n", - "const retriever3 = vectorstore3.asRetriever();\n", - "\n", - "// Build retriever tool\n", - "const tool3 = createRetrieverTool(\n", - " retriever3,\n", - " {\n", - " name: \"blog_post_retriever\",\n", - " description: \"Searches and returns excerpts from the Autonomous Agents blog post.\",\n", - " }\n", - ");\n", - "const tools3 = [tool3];\n", - "\n", - "const agentExecutor3 = createReactAgent({ llm: llm3, tools: tools3, checkpointSaver: memory3 });" - ] - }, - { - "cell_type": "markdown", - "id": "cd6bf4f4-74f4-419d-9e26-f0ed83cf05fa", - "metadata": {}, - "source": [ - "## Next steps\n", - "\n", - "We've covered the steps to build a basic conversational Q&A application:\n", - "\n", - "- We used chains to build a predictable application that generates search queries for each user input;\n", - "- We used agents to build an application that \"decides\" when and how to generate search queries.\n", - "\n", - "To explore different types of retrievers and retrieval strategies, visit the [retrievers](/docs/how_to/#retrievers) section of the how-to guides.\n", - "\n", - "For a detailed walkthrough of LangChain's conversation memory abstractions, visit the [How to add message history (memory)](/docs/how_to/message_history) LCEL page." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/tutorials/query_analysis.ipynb b/docs/core_docs/docs/tutorials/query_analysis.ipynb index 9a33120b8363..8e253710a54b 100644 --- a/docs/core_docs/docs/tutorials/query_analysis.ipynb +++ b/docs/core_docs/docs/tutorials/query_analysis.ipynb @@ -21,11 +21,11 @@ "\n", "This guide assumes familiarity with the following concepts:\n", "\n", - "- [Document loaders](/docs/concepts/#document-loaders)\n", - "- [Chat models](/docs/concepts/#chat-models)\n", - "- [Embeddings](/docs/concepts/#embedding-models)\n", + "- [Document loaders](/docs/concepts/document_loaders)\n", + "- [Chat models](/docs/concepts/chat_models)\n", + "- [Embeddings](/docs/concepts/embedding_models)\n", "- [Vector stores](/docs/concepts/#vector-stores)\n", - "- [Retrieval](/docs/concepts/#retrieval)\n", + "- [Retrieval](/docs/concepts/retrieval)\n", "\n", ":::\n", "\n", @@ -495,4 +495,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/docs/core_docs/docs/tutorials/rag.ipynb b/docs/core_docs/docs/tutorials/rag.ipynb index 5f19f579fb51..43dfe6e66a2b 100644 --- a/docs/core_docs/docs/tutorials/rag.ipynb +++ b/docs/core_docs/docs/tutorials/rag.ipynb @@ -16,7 +16,7 @@ "complexity.\n", "\n", "If you're already familiar with basic retrieval, you might also be interested in\n", - "this [high-level overview of different retrieval techinques](/docs/concepts/#retrieval).\n", + "this [high-level overview of different retrieval techinques](/docs/concepts/retrieval).\n", "\n", "## What is RAG?\n", "\n", @@ -38,15 +38,15 @@ "The most common full sequence from raw data to answer looks like:\n", "\n", "### Indexing\n", - "1. **Load**: First we need to load our data. This is done with [Document Loaders](/docs/concepts/#document-loaders).\n", - "2. **Split**: [Text splitters](/docs/concepts/#text-splitters) break large `Documents` into smaller chunks. This is useful both for indexing data and for passing it in to a model, since large chunks are harder to search over and won't fit in a model's finite context window.\n", - "3. **Store**: We need somewhere to store and index our splits, so that they can later be searched over. This is often done using a [VectorStore](/docs/concepts/#vectorstores) and [Embeddings](/docs/concepts/#embedding-models) model.\n", + "1. **Load**: First we need to load our data. This is done with [Document Loaders](/docs/concepts/document_loaders).\n", + "2. **Split**: [Text splitters](/docs/concepts/text_splitters) break large `Documents` into smaller chunks. This is useful both for indexing data and for passing it in to a model, since large chunks are harder to search over and won't fit in a model's finite context window.\n", + "3. **Store**: We need somewhere to store and index our splits, so that they can later be searched over. This is often done using a [VectorStore](/docs/concepts/#vectorstores) and [Embeddings](/docs/concepts/embedding_models) model.\n", "\n", "![index_diagram](../../static/img/rag_indexing.png)\n", "\n", "### Retrieval and generation\n", - "4. **Retrieve**: Given a user input, relevant splits are retrieved from storage using a [Retriever](/docs/concepts/#retrievers).\n", - "5. **Generate**: A [ChatModel](/docs/concepts/#chat-models) / [LLM](/docs/concepts/#llms) produces an answer using a prompt that includes the question and the retrieved data\n", + "4. **Retrieve**: Given a user input, relevant splits are retrieved from storage using a [Retriever](/docs/concepts/retrievers).\n", + "5. **Generate**: A [ChatModel](/docs/concepts/chat_models) / [LLM](/docs/concepts/text_llms) produces an answer using a prompt that includes the question and the retrieved data\n", "\n", "![retrieval_diagram](../../static/img/rag_retrieval_generation.png)\n", "\n", @@ -238,7 +238,7 @@ "Let’s go through the above code step-by-step to really understand what’s going on.\n", "\n", "## 1. Indexing: Load\n", - "We need to first load the blog post contents. We can use [DocumentLoaders](/docs/concepts#document-loaders) for this, which are objects that load in data from a source and return a list of [Documents](https://api.js.langchain.com/classes/langchain_core.documents.Document.html). A Document is an object with some pageContent (`string`) and metadata (`Record`).\n", + "We need to first load the blog post contents. We can use [DocumentLoaders](/docs/concepts/document_loaders) for this, which are objects that load in data from a source and return a list of [Documents](https://api.js.langchain.com/classes/langchain_core.documents.Document.html). A Document is an object with some pageContent (`string`) and metadata (`Record`).\n", "\n", "In this case we’ll use the [CheerioWebBaseLoader](https://api.js.langchain.com/classes/langchain.document_loaders_web_cheerio.CheerioWebBaseLoader.html), which uses cheerio to load HTML form web URLs and parse it to text. We can pass custom selectors to the constructor to only parse specific elements:" ] @@ -309,7 +309,7 @@ "metadata": {}, "source": [ "### Go deeper\n", - "`DocumentLoader`: Class that loads data from a source as list of Documents. - [Docs](/docs/concepts#document-loaders): Detailed documentation on how to use\n", + "`DocumentLoader`: Class that loads data from a source as list of Documents. - [Docs](/docs/concepts/document_loaders): Detailed documentation on how to use\n", "\n", "`DocumentLoaders`. - [Integrations](/docs/integrations/document_loaders/) - [Interface](https:/api.js.langchain.com/classes/langchain.document_loaders_base.BaseDocumentLoader.html): API reference for the base interface." ] @@ -431,9 +431,9 @@ "source": [ "### Go deeper\n", "\n", - "`Embeddings`: Wrapper around a text embedding model, used for converting text to embeddings. - [Docs](/docs/concepts#embedding-models): Detailed documentation on how to use embeddings. - [Integrations](/docs/integrations/text_embedding): 30+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core.embeddings.Embeddings.html): API reference for the base interface.\n", + "`Embeddings`: Wrapper around a text embedding model, used for converting text to embeddings. - [Docs](/docs/concepts/embedding_models): Detailed documentation on how to use embeddings. - [Integrations](/docs/integrations/text_embedding): 30+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core.embeddings.Embeddings.html): API reference for the base interface.\n", "\n", - "`VectorStore`: Wrapper around a vector database, used for storing and querying embeddings. - [Docs](/docs/concepts#vectorstores): Detailed documentation on how to use vector stores. - [Integrations](/docs/integrations/vectorstores): 40+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core.vectorstores.VectorStore.html): API reference for the base interface.\n", + "`VectorStore`: Wrapper around a vector database, used for storing and querying embeddings. - [Docs](/docs/concepts/vectorstores): Detailed documentation on how to use vector stores. - [Integrations](/docs/integrations/vectorstores): 40+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core.vectorstores.VectorStore.html): API reference for the base interface.\n", "\n", "This completes the **Indexing** portion of the pipeline. At this point we have a query-able vector store containing the chunked contents of our blog post. Given a user question, we should ideally be able to return the snippets of the blog post that answer the question." ] @@ -446,7 +446,7 @@ "\n", "Now let’s write the actual application logic. We want to create a simple application that takes a user question, searches for documents relevant to that question, passes the retrieved documents and initial question to a model, and returns an answer.\n", "\n", - "First we need to define our logic for searching over documents. LangChain defines a [Retriever](/docs/concepts#retrievers) interface which wraps an index that can return relevant `Document`s given a string query.\n", + "First we need to define our logic for searching over documents. LangChain defines a [Retriever](/docs/concepts/retrievers) interface which wraps an index that can return relevant `Document`s given a string query.\n", "\n", "The most common type of Retriever is the [VectorStoreRetriever](https://api.js.langchain.com/classes/langchain_core.vectorstores.VectorStoreRetriever.html), which uses the similarity search capabilities of a vector store to facilitate retrieval. Any `VectorStore` can easily be turned into a `Retriever` with `VectorStore.asRetriever()`:" ] @@ -511,7 +511,7 @@ "\n", "Vector stores are commonly used for retrieval, but there are other ways to do retrieval, too.\n", "\n", - "`Retriever`: An object that returns `Document`s given a text query - [Docs](/docs/concepts#retrievers): Further documentation on the interface and built-in retrieval techniques. Some of which include: - `MultiQueryRetriever` [generates variants of the input question](/docs/how_to/multiple_queries/) to improve retrieval hit rate. - `MultiVectorRetriever` (diagram below) instead generates variants of the embeddings, also in order to improve retrieval hit rate. - Max marginal relevance selects for relevance and diversity among the retrieved documents to avoid passing in duplicate context. - Documents can be filtered during vector store retrieval using metadata filters. - Integrations: Integrations with retrieval services. - Interface: API reference for the base interface." + "`Retriever`: An object that returns `Document`s given a text query - [Docs](/docs/concepts/retrievers): Further documentation on the interface and built-in retrieval techniques. Some of which include: - `MultiQueryRetriever` [generates variants of the input question](/docs/how_to/multiple_queries/) to improve retrieval hit rate. - `MultiVectorRetriever` (diagram below) instead generates variants of the embeddings, also in order to improve retrieval hit rate. - Max marginal relevance selects for relevance and diversity among the retrieved documents to avoid passing in duplicate context. - Documents can be filtered during vector store retrieval using metadata filters. - Integrations: Integrations with retrieval services. - Interface: API reference for the base interface." ] }, { @@ -732,9 +732,9 @@ "### Go deeper\n", "\n", "#### Choosing a model\n", - "`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages and returns a message. - [Docs](/docs/concepts/#chat-models): Detailed documentation on - [Integrations](/docs/integrations/chat/): 25+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core.language_models_chat_models.BaseChatModel.html): API reference for the base interface.\n", + "`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages and returns a message. - [Docs](/docs/concepts/chat_models): Detailed documentation on - [Integrations](/docs/integrations/chat/): 25+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core.language_models_chat_models.BaseChatModel.html): API reference for the base interface.\n", "\n", - "`LLM`: A text-in-text-out LLM. Takes in a string and returns a string. - [Docs](/docs/concepts#llms) - [Integrations](/docs/integrations/llms/): 75+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core.language_models_llms.BaseLLM.html): API reference for the base interface.\n", + "`LLM`: A text-in-text-out LLM. Takes in a string and returns a string. - [Docs](/docs/concepts/text_llms) - [Integrations](/docs/integrations/llms/): 75+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core.language_models_llms.BaseLLM.html): API reference for the base interface.\n", "\n", "See a guide on RAG with locally-running models [here](/docs/tutorials/local_rag/).\n", "\n", @@ -804,7 +804,7 @@ "- [Return sources](/docs/how_to/qa_sources/): Learn how to return source documents\n", "- [Streaming](/docs/how_to/qa_streaming/): Learn how to stream outputs and intermediate steps\n", "- [Add chat history](/docs/how_to/qa_chat_history_how_to/): Learn how to add chat history to your app\n", - "- [Retrieval conceptual guide](/docs/concepts/#retrieval): A high-level overview of specific retrieval techniques" + "- [Retrieval conceptual guide](/docs/concepts/retrieval): A high-level overview of specific retrieval techniques" ] } ], @@ -828,4 +828,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/docs/core_docs/docs/tutorials/sql_qa.mdx b/docs/core_docs/docs/tutorials/sql_qa.mdx index 60559c31695a..c276c1d1af87 100644 --- a/docs/core_docs/docs/tutorials/sql_qa.mdx +++ b/docs/core_docs/docs/tutorials/sql_qa.mdx @@ -5,9 +5,9 @@ This guide assumes familiarity with the following concepts: - [Chaining runnables](/docs/how_to/sequence/) -- [Chat models](/docs/concepts/#chat-models) -- [Tools](/docs/concepts/#tools) -- [Agents](/docs/concepts/#agents) +- [Chat models](/docs/concepts/chat_models) +- [Tools](/docs/concepts/tools) +- [Agents](/docs/concepts/agents) ::: diff --git a/docs/core_docs/docs/versions/migrating_memory/chat_history.ipynb b/docs/core_docs/docs/versions/migrating_memory/chat_history.ipynb index 927aec36ad0c..816a5b8300df 100644 --- a/docs/core_docs/docs/versions/migrating_memory/chat_history.ipynb +++ b/docs/core_docs/docs/versions/migrating_memory/chat_history.ipynb @@ -1,268 +1,268 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "c298a5c9-b9af-481d-9eba-cbd65f987a8a", - "metadata": {}, - "source": [ - "# How to use BaseChatMessageHistory with LangGraph\n", - "\n", - ":::info Prerequisites\n", - "\n", - "This guide assumes familiarity with the following concepts:\n", - "\n", - "- [Chat History](/docs/concepts/#chat-history)\n", - "- [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html)\n", - "- [LangGraph](https://langchain-ai.github.io/langgraphjs/concepts/high_level/)\n", - "- [Memory](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#memory)\n", - "\n", - ":::\n", - "\n", - "We recommend that new LangChain applications take advantage of the [built-in LangGraph peristence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to implement memory.\n", - "\n", - "In some situations, users may need to keep using an existing persistence solution for chat message history.\n", - "\n", - "Here, we will show how to use [LangChain chat message histories](/docs/integrations/memory/) (implementations of [BaseChatMessageHistory](https://api.js.langchain.com/classes/_langchain_core.chat_history.BaseChatMessageHistory.html)) with LangGraph." - ] - }, - { - "cell_type": "markdown", - "id": "548bc988-167b-43f1-860a-d247e28b2b42", - "metadata": {}, - "source": [ - "## Set up\n", - "\n", - "```typescript\n", - "process.env.ANTHROPIC_API_KEY = 'YOUR_API_KEY'\n", - "```\n", - "\n", - "```{=mdx}\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", - "\n", - "\n", - " @langchain/core @langchain/langgraph @langchain/anthropic\n", - "\n", - "```" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "c5e08659-b68c-48f2-8b33-e79b0c6999e1", - "metadata": {}, - "source": [ - "## ChatMessageHistory\n", - "\n", - "A message history needs to be parameterized by a conversation ID or maybe by the 2-tuple of (user ID, conversation ID).\n", - "\n", - "Many of the [LangChain chat message histories](/docs/integrations/memory/) will have either a `sessionId` or some `namespace` to allow keeping track of different conversations. Please refer to the specific implementations to check how it is parameterized.\n", - "\n", - "The built-in `InMemoryChatMessageHistory` does not contains such a parameterization, so we'll create a dictionary to keep track of the message histories." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "28049308-2543-48e6-90d0-37a88951a637", - "metadata": {}, - "outputs": [], - "source": [ - "import { InMemoryChatMessageHistory } from \"@langchain/core/chat_history\";\n", - "\n", - "const chatsBySessionId: Record = {}\n", - "\n", - "const getChatHistory = (sessionId: string) => {\n", - " let chatHistory: InMemoryChatMessageHistory | undefined = chatsBySessionId[sessionId]\n", - " if (!chatHistory) {\n", - " chatHistory = new InMemoryChatMessageHistory()\n", - " chatsBySessionId[sessionId] = chatHistory\n", - " }\n", - " return chatHistory\n", - "}" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "94c53ce3-4212-41e6-8ad3-f0ab5df6130f", - "metadata": {}, - "source": [ - "## Use with LangGraph\n", - "\n", - "Next, we'll set up a basic chat bot using LangGraph. If you're not familiar with LangGraph, you should look at the following [Quick Start Tutorial](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/).\n", - "\n", - "We'll create a [LangGraph node](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#nodes) for the chat model, and manually manage the conversation history, taking into account the conversation ID passed as part of the RunnableConfig.\n", - "\n", - "The conversation ID can be passed as either part of the RunnableConfig (as we'll do here), or as part of the [graph state](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#state)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "d818e23f", - "metadata": {}, - "outputs": [ + "cells": [ + { + "cell_type": "markdown", + "id": "c298a5c9-b9af-481d-9eba-cbd65f987a8a", + "metadata": {}, + "source": [ + "# How to use BaseChatMessageHistory with LangGraph\n", + "\n", + ":::info Prerequisites\n", + "\n", + "This guide assumes familiarity with the following concepts:\n", + "\n", + "- [Chat History](/docs/concepts/chat_history)\n", + "- [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html)\n", + "- [LangGraph](https://langchain-ai.github.io/langgraphjs/concepts/high_level/)\n", + "- [Memory](https://langchain-ai.github.io/langgraphjs/concepts/agentic_concepts/#memory)\n", + "\n", + ":::\n", + "\n", + "We recommend that new LangChain applications take advantage of the [built-in LangGraph peristence](https://langchain-ai.github.io/langgraphjs/concepts/persistence/) to implement memory.\n", + "\n", + "In some situations, users may need to keep using an existing persistence solution for chat message history.\n", + "\n", + "Here, we will show how to use [LangChain chat message histories](/docs/integrations/memory/) (implementations of [BaseChatMessageHistory](https://api.js.langchain.com/classes/_langchain_core.chat_history.BaseChatMessageHistory.html)) with LangGraph." + ] + }, + { + "cell_type": "markdown", + "id": "548bc988-167b-43f1-860a-d247e28b2b42", + "metadata": {}, + "source": [ + "## Set up\n", + "\n", + "```typescript\n", + "process.env.ANTHROPIC_API_KEY = 'YOUR_API_KEY'\n", + "```\n", + "\n", + "```{=mdx}\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\"\n", + "\n", + "\n", + " @langchain/core @langchain/langgraph @langchain/anthropic\n", + "\n", + "```" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "hi! I'm bob\n", - "Hello Bob! It's nice to meet you. How can I assist you today?\n", - "what was my name?\n", - "You said your name is Bob.\n" - ] + "attachments": {}, + "cell_type": "markdown", + "id": "c5e08659-b68c-48f2-8b33-e79b0c6999e1", + "metadata": {}, + "source": [ + "## ChatMessageHistory\n", + "\n", + "A message history needs to be parameterized by a conversation ID or maybe by the 2-tuple of (user ID, conversation ID).\n", + "\n", + "Many of the [LangChain chat message histories](/docs/integrations/memory/) will have either a `sessionId` or some `namespace` to allow keeping track of different conversations. Please refer to the specific implementations to check how it is parameterized.\n", + "\n", + "The built-in `InMemoryChatMessageHistory` does not contains such a parameterization, so we'll create a dictionary to keep track of the message histories." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "28049308-2543-48e6-90d0-37a88951a637", + "metadata": {}, + "outputs": [], + "source": [ + "import { InMemoryChatMessageHistory } from \"@langchain/core/chat_history\";\n", + "\n", + "const chatsBySessionId: Record = {}\n", + "\n", + "const getChatHistory = (sessionId: string) => {\n", + " let chatHistory: InMemoryChatMessageHistory | undefined = chatsBySessionId[sessionId]\n", + " if (!chatHistory) {\n", + " chatHistory = new InMemoryChatMessageHistory()\n", + " chatsBySessionId[sessionId] = chatHistory\n", + " }\n", + " return chatHistory\n", + "}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "94c53ce3-4212-41e6-8ad3-f0ab5df6130f", + "metadata": {}, + "source": [ + "## Use with LangGraph\n", + "\n", + "Next, we'll set up a basic chat bot using LangGraph. If you're not familiar with LangGraph, you should look at the following [Quick Start Tutorial](https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/).\n", + "\n", + "We'll create a [LangGraph node](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#nodes) for the chat model, and manually manage the conversation history, taking into account the conversation ID passed as part of the RunnableConfig.\n", + "\n", + "The conversation ID can be passed as either part of the RunnableConfig (as we'll do here), or as part of the [graph state](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#state)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d818e23f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hi! I'm bob\n", + "Hello Bob! It's nice to meet you. How can I assist you today?\n", + "what was my name?\n", + "You said your name is Bob.\n" + ] + } + ], + "source": [ + "import { v4 as uuidv4 } from \"uuid\";\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { StateGraph, MessagesAnnotation, END, START } from \"@langchain/langgraph\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import { RunnableConfig } from \"@langchain/core/runnables\";\n", + "\n", + "// Define a chat model\n", + "const model = new ChatAnthropic({ modelName: \"claude-3-haiku-20240307\" });\n", + "\n", + "// Define the function that calls the model\n", + "const callModel = async (\n", + " state: typeof MessagesAnnotation.State,\n", + " config: RunnableConfig\n", + "): Promise> => {\n", + " if (!config.configurable?.sessionId) {\n", + " throw new Error(\n", + " \"Make sure that the config includes the following information: {'configurable': {'sessionId': 'some_value'}}\"\n", + " );\n", + " }\n", + "\n", + " const chatHistory = getChatHistory(config.configurable.sessionId as string);\n", + "\n", + " let messages = [...(await chatHistory.getMessages()), ...state.messages];\n", + "\n", + " if (state.messages.length === 1) {\n", + " // First message, ensure it's in the chat history\n", + " await chatHistory.addMessage(state.messages[0]);\n", + " }\n", + "\n", + " const aiMessage = await model.invoke(messages);\n", + "\n", + " // Update the chat history\n", + " await chatHistory.addMessage(aiMessage);\n", + "\n", + " return { messages: [aiMessage] };\n", + "};\n", + "\n", + "// Define a new graph\n", + "const workflow = new StateGraph(MessagesAnnotation)\n", + " .addNode(\"model\", callModel)\n", + " .addEdge(START, \"model\")\n", + " .addEdge(\"model\", END);\n", + "\n", + "const app = workflow.compile();\n", + "\n", + "// Create a unique session ID to identify the conversation\n", + "const sessionId = uuidv4();\n", + "const config = { configurable: { sessionId }, streamMode: \"values\" as const };\n", + "\n", + "const inputMessage = new HumanMessage(\"hi! I'm bob\");\n", + "\n", + "for await (const event of await app.stream({ messages: [inputMessage] }, config)) {\n", + " const lastMessage = event.messages[event.messages.length - 1];\n", + " console.log(lastMessage.content);\n", + "}\n", + "\n", + "// Here, let's confirm that the AI remembers our name!\n", + "const followUpMessage = new HumanMessage(\"what was my name?\");\n", + "\n", + "for await (const event of await app.stream({ messages: [followUpMessage] }, config)) {\n", + " const lastMessage = event.messages[event.messages.length - 1];\n", + " console.log(lastMessage.content);\n", + "}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "da0536dd-9a0b-49e3-b0b6-e8c7abf3b1f9", + "metadata": {}, + "source": [ + "## Using With RunnableWithMessageHistory\n", + "\n", + "This how-to guide used the `messages` and `addMessages` interface of `BaseChatMessageHistory` directly. \n", + "\n", + "Alternatively, you can use [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html), as [LCEL](/docs/concepts/lcel/) can be used inside any [LangGraph node](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#nodes).\n", + "\n", + "To do that replace the following code:\n", + "\n", + "```typescript\n", + "const callModel = async (\n", + " state: typeof MessagesAnnotation.State,\n", + " config: RunnableConfig\n", + "): Promise> => {\n", + " // highlight-start\n", + " if (!config.configurable?.sessionId) {\n", + " throw new Error(\n", + " \"Make sure that the config includes the following information: {'configurable': {'sessionId': 'some_value'}}\"\n", + " );\n", + " }\n", + "\n", + " const chatHistory = getChatHistory(config.configurable.sessionId as string);\n", + "\n", + " let messages = [...(await chatHistory.getMessages()), ...state.messages];\n", + "\n", + " if (state.messages.length === 1) {\n", + " // First message, ensure it's in the chat history\n", + " await chatHistory.addMessage(state.messages[0]);\n", + " }\n", + "\n", + " const aiMessage = await model.invoke(messages);\n", + "\n", + " // Update the chat history\n", + " await chatHistory.addMessage(aiMessage);\n", + " // highlight-end\n", + " return { messages: [aiMessage] };\n", + "};\n", + "```\n", + "\n", + "With the corresponding instance of `RunnableWithMessageHistory` defined in your current application.\n", + "\n", + "```typescript\n", + "const runnable = new RunnableWithMessageHistory({\n", + " // ... configuration from existing code\n", + "});\n", + "\n", + "const callModel = async (\n", + " state: typeof MessagesAnnotation.State,\n", + " config: RunnableConfig\n", + "): Promise> => {\n", + " // RunnableWithMessageHistory takes care of reading the message history\n", + " // and updating it with the new human message and AI response.\n", + " const aiMessage = await runnable.invoke(state.messages, config);\n", + " return {\n", + " messages: [aiMessage]\n", + " };\n", + "};\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" } - ], - "source": [ - "import { v4 as uuidv4 } from \"uuid\";\n", - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", - "import { StateGraph, MessagesAnnotation, END, START } from \"@langchain/langgraph\";\n", - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "import { RunnableConfig } from \"@langchain/core/runnables\";\n", - "\n", - "// Define a chat model\n", - "const model = new ChatAnthropic({ modelName: \"claude-3-haiku-20240307\" });\n", - "\n", - "// Define the function that calls the model\n", - "const callModel = async (\n", - " state: typeof MessagesAnnotation.State,\n", - " config: RunnableConfig\n", - "): Promise> => {\n", - " if (!config.configurable?.sessionId) {\n", - " throw new Error(\n", - " \"Make sure that the config includes the following information: {'configurable': {'sessionId': 'some_value'}}\"\n", - " );\n", - " }\n", - "\n", - " const chatHistory = getChatHistory(config.configurable.sessionId as string);\n", - "\n", - " let messages = [...(await chatHistory.getMessages()), ...state.messages];\n", - "\n", - " if (state.messages.length === 1) {\n", - " // First message, ensure it's in the chat history\n", - " await chatHistory.addMessage(state.messages[0]);\n", - " }\n", - "\n", - " const aiMessage = await model.invoke(messages);\n", - "\n", - " // Update the chat history\n", - " await chatHistory.addMessage(aiMessage);\n", - "\n", - " return { messages: [aiMessage] };\n", - "};\n", - "\n", - "// Define a new graph\n", - "const workflow = new StateGraph(MessagesAnnotation)\n", - " .addNode(\"model\", callModel)\n", - " .addEdge(START, \"model\")\n", - " .addEdge(\"model\", END);\n", - "\n", - "const app = workflow.compile();\n", - "\n", - "// Create a unique session ID to identify the conversation\n", - "const sessionId = uuidv4();\n", - "const config = { configurable: { sessionId }, streamMode: \"values\" as const };\n", - "\n", - "const inputMessage = new HumanMessage(\"hi! I'm bob\");\n", - "\n", - "for await (const event of await app.stream({ messages: [inputMessage] }, config)) {\n", - " const lastMessage = event.messages[event.messages.length - 1];\n", - " console.log(lastMessage.content);\n", - "}\n", - "\n", - "// Here, let's confirm that the AI remembers our name!\n", - "const followUpMessage = new HumanMessage(\"what was my name?\");\n", - "\n", - "for await (const event of await app.stream({ messages: [followUpMessage] }, config)) {\n", - " const lastMessage = event.messages[event.messages.length - 1];\n", - " console.log(lastMessage.content);\n", - "}" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "da0536dd-9a0b-49e3-b0b6-e8c7abf3b1f9", - "metadata": {}, - "source": [ - "## Using With RunnableWithMessageHistory\n", - "\n", - "This how-to guide used the `messages` and `addMessages` interface of `BaseChatMessageHistory` directly. \n", - "\n", - "Alternatively, you can use [RunnableWithMessageHistory](https://api.js.langchain.com/classes/_langchain_core.runnables.RunnableWithMessageHistory.html), as [LCEL](/docs/concepts/#langchain-expression-language-lcel/) can be used inside any [LangGraph node](https://langchain-ai.github.io/langgraphjs/concepts/low_level/#nodes).\n", - "\n", - "To do that replace the following code:\n", - "\n", - "```typescript\n", - "const callModel = async (\n", - " state: typeof MessagesAnnotation.State,\n", - " config: RunnableConfig\n", - "): Promise> => {\n", - " // highlight-start\n", - " if (!config.configurable?.sessionId) {\n", - " throw new Error(\n", - " \"Make sure that the config includes the following information: {'configurable': {'sessionId': 'some_value'}}\"\n", - " );\n", - " }\n", - "\n", - " const chatHistory = getChatHistory(config.configurable.sessionId as string);\n", - "\n", - " let messages = [...(await chatHistory.getMessages()), ...state.messages];\n", - "\n", - " if (state.messages.length === 1) {\n", - " // First message, ensure it's in the chat history\n", - " await chatHistory.addMessage(state.messages[0]);\n", - " }\n", - "\n", - " const aiMessage = await model.invoke(messages);\n", - "\n", - " // Update the chat history\n", - " await chatHistory.addMessage(aiMessage);\n", - " // highlight-end\n", - " return { messages: [aiMessage] };\n", - "};\n", - "```\n", - "\n", - "With the corresponding instance of `RunnableWithMessageHistory` defined in your current application.\n", - "\n", - "```typescript\n", - "const runnable = new RunnableWithMessageHistory({\n", - " // ... configuration from existing code\n", - "});\n", - "\n", - "const callModel = async (\n", - " state: typeof MessagesAnnotation.State,\n", - " config: RunnableConfig\n", - "): Promise> => {\n", - " // RunnableWithMessageHistory takes care of reading the message history\n", - " // and updating it with the new human message and AI response.\n", - " const aiMessage = await runnable.invoke(state.messages, config);\n", - " return {\n", - " messages: [aiMessage]\n", - " };\n", - "};\n", - "```" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "mode": "typescript", - "name": "javascript", - "typescript": true - }, - "file_extension": ".ts", - "mimetype": "text/typescript", - "name": "typescript", - "version": "3.7.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/docs/core_docs/docs/versions/migrating_memory/index.mdx b/docs/core_docs/docs/versions/migrating_memory/index.mdx index 837e7f03c544..fcf9bd1a5712 100644 --- a/docs/core_docs/docs/versions/migrating_memory/index.mdx +++ b/docs/core_docs/docs/versions/migrating_memory/index.mdx @@ -18,7 +18,7 @@ The main advantages of persistence in LangGraph are: - Error recovery - Allowing human intervention in AI workflows - Exploring different conversation paths ("time travel") -- Full compatibility with both traditional [language models](/docs/concepts/#llms) and modern [chat models](/docs/concepts/#chat-models). Early memory implementations in LangChain weren't designed for newer chat model APIs, causing issues with features like tool-calling. LangGraph memory can persist any custom state. +- Full compatibility with both traditional [language models](/docs/concepts/text_llms) and modern [chat models](/docs/concepts/chat_models). Early memory implementations in LangChain weren't designed for newer chat model APIs, causing issues with features like tool-calling. LangGraph memory can persist any custom state. - Highly customizable, allowing you to fully control how memory works and use different storage backends. ## Evolution of memory in LangChain @@ -68,7 +68,8 @@ These guides assume some familiarity with the following concepts: - [LangGraph](https://langchain-ai.github.io/langgraphjs/) - [v0.0.x Memory](https://js.langchain.com/v0.1/docs/modules/memory/) - [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraphjs/how-tos/persistence/) - ::: + +::: ### 1. Managing conversation history diff --git a/docs/core_docs/sidebars.js b/docs/core_docs/sidebars.js index 50336c478fd8..851912174a54 100644 --- a/docs/core_docs/sidebars.js +++ b/docs/core_docs/sidebars.js @@ -47,7 +47,19 @@ module.exports = { }, ], }, - "concepts", + { + type: "category", + link: { type: "doc", id: "concepts/index" }, + label: "Conceptual Guide", + collapsible: false, + items: [ + { + type: "autogenerated", + dirName: "concepts", + className: "hidden", + }, + ], + }, { type: "category", label: "Ecosystem", diff --git a/docs/core_docs/src/css/custom.css b/docs/core_docs/src/css/custom.css index e97a71b9d4a2..a59f2492203c 100644 --- a/docs/core_docs/src/css/custom.css +++ b/docs/core_docs/src/css/custom.css @@ -37,6 +37,9 @@ --ifm-menu-link-padding-horizontal: 0.5rem; --ifm-menu-link-padding-vertical: 0.5rem; --doc-sidebar-width: 275px !important; + /* Code highlighting background color */ + --docusaurus-highlighted-code-line-bg: rgb(202, 203, 205); + } /* For readability concerns, you should choose a lighter palette in dark mode. */ @@ -48,6 +51,8 @@ --ifm-color-primary-light: #29d5b0; --ifm-color-primary-lighter: #32d8b4; --ifm-color-primary-lightest: #4fddbf; + /* Code highlighting background color */ + --docusaurus-highlighted-code-line-bg: rgb(73, 73, 73); } nav, h1, h2, h3, h4 { @@ -305,4 +310,4 @@ nav, h1, h2, h3, h4 { [data-theme='dark'] .announcementBar_node_modules-\@docusaurus-theme-classic-lib-theme-AnnouncementBar-styles-module button { color: #fff; -} \ No newline at end of file +} diff --git a/docs/core_docs/src/theme/RedirectAnchors.js b/docs/core_docs/src/theme/RedirectAnchors.js new file mode 100644 index 000000000000..0de000e37977 --- /dev/null +++ b/docs/core_docs/src/theme/RedirectAnchors.js @@ -0,0 +1,111 @@ +// eslint-disable-next-line no-unused-vars +import React from "react"; + +function RedirectAnchors() { + if (typeof window === "undefined") return null; + + // get # anchor from url + const lookup = { + "#conceptual-guide": "/docs/concepts", + "#architecture": "/docs/concepts/architecture", + "#langchaincore": "/docs/concepts/architecture/#langchaincore", + "#langchain": "/docs/concepts/architecture/#langchain", + "#langchaincommunity": "/docs/concepts/architecture/#langchaincommunity", + "#partner-packages": "/docs/concepts/architecture/#integration-packages", + "#langgraph": "/docs/concepts/architecture/#langchianlanggraph", + "#langsmith": "/docs/concepts/architecture/#langsmith", + "#langchain-expression-language-lcel": "/docs/concepts/lcel", + "#langchain-expression-language": "/docs/concepts/lcel", + "#runnable-interface": "/docs/concepts/runnables", + "#components": "/docs/concepts/", + "#chat-models": "/docs/concepts/chat_models", + "#multimodality": "/docs/concepts/multimodality", + "#llms": "/docs/concepts/chat_models", + "#messages": "/docs/concepts/messages", + "#message-types": "/docs/concepts/messages", + "#humanmessage": "/docs/concepts/messages/#humanmessage", + "#aimessage": "/docs/concepts/messages/#aimessage", + "#systemmessage": "/docs/concepts/messages/#systemmessage", + "#toolmessage": "/docs/concepts/messages/#toolmessage", + "#legacy-functionmessage": + "/docs/concepts/messages/#legacy-functionmessage", + "#prompt-templates": "/docs/concepts/prompt_templates", + "#string-prompttemplates": "/docs/concepts/prompt_templates", + "#chatprompttemplates": "/docs/concepts/prompt_templates", + "#messagesplaceholder": "/docs/concepts/prompt_templates", + "#example-selectors": "/docs/concepts/example_selectors", + "#output-parsers": "/docs/concepts/output_parsers", + "#chat-history": "/docs/concepts/chat_history", + "#documents": + "https://api.js.langchain.com/classes/_langchain_core.documents.Document.html", + "#document": + "https://api.js.langchain.com/classes/_langchain_core.documents.Document.html", + "#document-loaders": "/docs/concepts/document_loaders", + "#text-splitters": "/docs/concepts/text_splitters", + "#embedding-models": "/docs/concepts/embedding_models", + "#vector-stores": "/docs/concepts/vectorstores", + "#vectorstore": "/docs/concepts/vectorstores", + "#retrievers": "/docs/concepts/retrievers", + "#keyvalue-stores": "/docs/concepts/key_value_stores", + "#interface": "/docs/concepts/runnables", + "#tools": "/docs/concepts/tools", + "#invoke-with-just-the-arguments": "/docs/concepts/tools", + "#invoke-with-toolcall": "/docs/concepts/tools", + "#best-practices": "/docs/concepts/tools/#best-practices", + "#related": "/docs/concepts/tools", + "#toolkits": "/docs/concepts/toosl/#toolkits", + "#initialize-a-toolkit": "/docs/concepts/toosl/#toolkits", + "#get-list-of-tools": "/docs/concepts/toosl/#toolkits", + "#agents": "/docs/concepts/agents", + "#react-agents": "/docs/concepts/agents", + "#callbacks": "/docs/concepts/callbacks", + "#callback-events": "/docs/concepts/callbacks/#callback-events", + "#callback-handlers": "/docs/concepts/callbacks/#callback-handlers", + "#passing-callbacks": "/docs/concepts/callbacks/#passing-callbacks", + "#techniques": "/docs/concepts/", + "#streaming": "/docs/concepts/streaming", + "#stream": "/docs/concepts/streaming#stream", + "#streamevents": "/docs/concepts/streaming#streamevents", + "#tokens": "/docs/concepts/tokens", + "#functiontool-calling": "/docs/concepts/tool_calling", + "#tool-usage": "/docs/concepts/tool_calling", + "#structured-output": "/docs/concepts/structured_outputs", + "#withstructuredoutput": "/docs/concepts/structured_outputs", + "#raw-prompting": "/docs/concepts/structured_outputs", + "#json-mode": "/docs/concepts/structured_outputs/#json-mode", + "#tool-calling-structuredoutputtoolcalling": + "/docs/concepts/structured_outputs", + "#fewshot-prompting": "/docs/concepts/few_shot_prompting", + "#1-generating-examples": + "/docs/concepts/few_shot_prompting/#1-generating-examples", + "#2-number-of-examples": + "/docs/concepts/few_shot_prompting/#2-number-of-examples", + "#3-selecting-examples": + "/docs/concepts/few_shot_prompting/#3-selecting-examples", + "#4-formatting-examples": + "/docs/concepts/few_shot_prompting/#4-formatting-examples", + "#retrieval": "/docs/concepts/retrieval", + "#query-translation": "/docs/concepts/retrieval/#query-re-writing", + "#routing": "/docs/concepts/", + "#query-construction": "/docs/concepts/retrieval/#query-construction", + "#indexing": "/docs/concepts/retrieval/", + "#postprocessing": "/docs/concepts/retrieval/", + "#generation": "/docs/concepts/rag", + "#text-splitting": "/docs/concepts/text_splitting", + "#evaluation": "/docs/concepts/evaluation", + "#tracing": "/docs/concepts/tracing", + "#few-shot-prompting": "/docs/concepts/few_shot_prompting", + }; + + const hash = window?.location?.hash; + if (hash) { + if (lookup[hash]) { + window.location.href = lookup[hash]; + return null; + } + } + + return null; +} + +export default RedirectAnchors; diff --git a/docs/core_docs/static/img/conversation_patterns.png b/docs/core_docs/static/img/conversation_patterns.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf45cc987d43476bb30e56f026e72ef9ae9666c GIT binary patch literal 106974 zcmZ^KbzB_Xwq8(%C5Rc>9C$kGfVwG5-l!%H5`9d@s<(oP9wv#Uag@qF>HM)sX^34 zA&|=+*+gMs(hD9orhXv0I!BI?#}`@qsZ>_0DR#4zE4Qr7&$k&KeYdOI8INfuIJ8*d z&?LfwaBvXBL{NdgsHmX-_TkI{1fyVocaIx?g780GM+JrXQgUqz|Hqg9&j*k{a22%4 zI!N??|M0)=QLz8R_WwThpNmjX5DNBCzs=%>{>PP}f^xQhTKs>O4F?BYg$tRY4ER55 z`HvNd3=q%#U$6S_J5f=gL&zFcuGRl`Tk{RxQ z*u=LVh{v}_`v2GwLI}7DVfWD(^}j9b4Bs#933SH5vF6X)&Y|_Ql;5G%Nz2<0`Y#9p zp0=v(qV}cjxVG0Hq=Ln`w5Fv$8|9!N?$@i?*sI%kJ>NYx`=B1>RLWm;+hEngZ?;s| zet}c|2_b7$t7+s$cWI2fd8Q@vG<(sWvfohKQmKD&vHFw6vEhWf<#nyK*8R~hfBW=5 z<3yy0FzT;&a%~{7CA{{=lU-eF7uq4QNU=lyV|6bLYuaUNs~+9pa@kL1_51R(`ZPmg zmd3}=weg*M><#oV^W;v*L;7 zC?1>TZd%h^ZCTM=9URs~UauG9awA6(m)j-b;=`6Gw71=q{`9R|x(JiN|Hpau-Xdp< z_FN7AXDDh(z}lE(l>LuLewC`9dKOH2iMfaK$gR2Dg3vi-DLL)70wd8;54THfyR*p^ z8jcr#_wt+7ST*jEt7^K*+SYB=4S-cU7Ohj0>>hejqgz7ftuI8a2OGR*Q%fBdEp5PuAtWAr8_Ha?GmjXO-ELoiT&AX)>(nG;^;ax%WU1^n>n(q@!u`S@BEWBmqBT7 zPMXyX^{pp>vB(Q)dA=5Tl#8aK`&t58xKjrRKikCk_)8v;r@BVXAx6S6e^5Q_q#Z5Y z+-V^~ld2LWEjLRvCaU<77dR7+-(z)B%8sM4{hrD^^p@A5RK zRJ1N3n8WHa(vU$s(r*9A8x_RFF#NcNFawpns?(CRS(hbm@9#P!I}|@NtQyL3v@m~4 zqM0>N?igjryzjczHl9^Yt5-2en88S1^k_Ff)ySk77-lo<$z~yQ=B6isQ*)B&M8{?m zOHcZFfg&G;0pcg=Je@7`L1!{lAepQYN#4kvA{v4BGZ)YL5!ck!y3td=In zn3MO7stOJ&tC~FLCXc$OAc0X;;~c|l!gA_Al#`eWThHoRJ(^MF51s)LRRGqVK-$IWjpv_VmJ|Ggw3KW$Rr7cw)eq2tO~e45BAZ28w9aA43TH{8 ztNSn#$ay!T*V)oWH(B@<-rs2HU^~4$xl8e-%5`AT2r3Pl+ks4p3%|jd+P$;Ha(KAV z{PnJWq+KsuK))U|tu3sAkCduK6<}t1dR>ugl+(LN>BvFns6pq*SKm8JNg1)Rp`D4< z(X(KsQg0C6tWdOG-^XBwIVYm@uGFhRoqY6k2(LAlcg`1|6pB}3{qWTZ;}%m61a1if zGGwqehPQXjuM@s#}@OD2np_47)AC_MNjgB=sHBI9^)$YD_Hod*i|Zo+cWNE zDlvvC-KN!p+OW|xw0EwEYZee@*Mq4!r*!O60*6fQ2!h9LD#bky@Bs^p0V|6b0(Dr^ zn??IeGZ+%HRzZWGy;c_I_e%E0KA?S5w42!LE8WbUz?{1&X*j&aMH&=MysbDxjk7OD zLTP9@ykJ>VDlTp_Svhd18X(KruG1cV;y_c*OAy^VxgV=t=}ICX5$?nTs?r8hiDKa# z+_=wOMs$P0{)v*Yc>wuN<4tkyCVl*8x9_P80?$uYU0=m z8eq6v2r8YZRfFp0-!)KPzkH9?BmbkQ4yR1w_eUO`#1frBwv!WWi;%+Hyw$1(YeQSs zi=KB@CGo!PJ8$M1d;0GebdCI|5|dh-rz_K4#UTj@V))MvpHP<>BIk0OuSO zmKuCW>zp~Yl#f|o0V6E^HrzIr>CzWhktkv{I1`~(DXW}y^mK#vbXv4-fYIJIhaOUL z>%MLN3o2CW=WcTE3TE?ozG*0PvV{(KYyx#ZTrpwbs|h;N&^B zHT&?aoch}b4Yd|F%u$9IIZG3`f-9en?AiY4u9`AbRw@zfDi=E8J%W}>>k-wGuZ+u$ zEgvnfO^tEzRcW*qAaro+?)shj&#wm!$-bxE-DJz|%`wkv6)GKIf{aNzk0>Em)k~QR znqsq`4>l6`ohFJ6W?DEB%zgrPhV!Eq_r*lBU{PAtnHB{I0!nUl`FrIGKYMYg1_(Z3Y9a%f7EeKGZVl}m-qa-?Pt%^mK#K`OX zLi{wO4qOPeGPiE}q>vK(JSG;qsIM+nmsf1fTx($_dgFi(KJ5)hNa;rA>ReZA`p?B6 z-&82mu4+^9k}qbf*r(iu{3m>s5ay!Ltr~F{KrRr3i;wQaekJIoakwQV%XOW*fMqZZ zVNzqK(Sx*)N2T<8Lwgrz2%uoanhB3(>c)q`X=RoZ(|`xNY-@0$@IMH`>ea|O6f)LP+0UWf z__J^)A>5fzEK0n4bBh+JU}d(Cu!X@3Q68kD_cTU4Z|Z5pc!#R6aBd00s6>@L5!LN zl?JZD-zno++Efh9#!jMG4$l2R7L2kXHFbif@A%i6;aSbnfT_nRIye3UVDd|-AES%@ z0u2S6%ERkXmD)?)myr^YZ}p(C_F`gIV&HsMrrg%SCKQttI~p)>I|bu)Wsj@z34d@@ zL~8-`34dezDGvo79OWC@E}Ftk3RrOAMKI}TS=?C%a14~J$Que?M*yu=O7xLt9^0Cx zbVd8PmDMt|kXorafGWzuvG`YztPVw09hjLt0q9x+@^gZnvRl}`M%Hn=Kln?17#D(i z_8G90Yiz;v4koC7(q8D2-NbDA;m( z?eO`Xz1f*jNB{dXX3Lx558KHImHozJ*Md`;8=q{Xs4@O=cknx(R84_K#059#n=fmv zxkwd9vGTJedB0F8W8Rz{bdCTj7o&Ih^Q}`>%G#=f(`izI81)Rw+T8{n>y&qgadgD+ zF-y$Iwli7>M*SKN(bIvhf!uhW=cX5uz}EZlu4GPOKL)VgOsrt0U8yZ&K4WqxO0=W! z>EG&^*dWA3kGI6m*PvJn`(B&bjDQ`VdrN*ROo4AgU6IZGXN@u$&>FbGH4WC(9E00K zkWi@Qoa`e|i%=n2<-#5SYXAEa288;5daB(T(=Fk;q6isZ|-LK@2CzI0^Gl9%uVIf3J}Ex zi)+EhI-ZV#q!GW$%v4Tlu4Djnn~SWM#SHz7F>HuVVk*NKk`#e+jRg$GE1`67@zHP2 zdnvl{mDi~9H5&o%+cf6Luh|S`S}34np+xX|kfU>^Y&UmgU=2Z`lfqO~g>wz;+#|eM z59pP+X|X<5!nRp;O%Ol05h3G=HvV?q&Wg{K9xlcvY-e@52*>=9SD{|*pi130I+5JK z#U3l8;};c)u(Yn3IFLzZ{5q5IS@!%Z5<@xCgS6b|%8`2#nN&#t7g8p%G%I<6DtLCn z*>a>Rq&G#69y>jie}U)o%#&W^MN}?92YHLEySmeJcdX;!yKS6Vn`-GXYu-(mAtqHI zzq(`d>)B27X9yiz=-=?Q*LzBjv2Q)r50vPoBJ;Fl=&t3mueR4afp;=vV}g-$)rwyD z_^Z$|+LX(_l^hMkMya@gmZFWaUhyml9;ntzseKloY65b$&xWQ{pSh2ZE_E5`F)aG7 zE~wy=>+Uf+Sm~ik5dC!eneAQz*^r;_Up8d-ri4MoD#>xmk=&d3ak*M?ro&#pr1H`K zFjVPJL@7U2-EjW3;`S?B?zV(~7*)QGA95!Xul&w*cdj6TqO*@3$BM%Um2kozgjZl? zk27ZFb+~zrR}xjP(b-w?7ZQz3oUP;ClkuHiIrk5Wj`{uZ6`jhPFGi+2DUpMDBYG7$ z`-P}Qyr88hBYoegK@7FgW`BATDC*SCHbR>o8$7xBRAXOjn1c#hgT-691cI)@p=h8R zYW!MTXZvxowL;&9Q&=#BPv;z0^EPcy7M+^(NuRhsXe*y;L zvwk>aloa>uZ&;UlbP?gs+_xZ6NK;I1gnMeb;r#UA*!tyKJ9!gc#W+NIkvgEJju`4G z?%l{KY)AcQ_T}ly1dN)q#Mu~xMCEfz=kcjcSGN__4U;XCZ#ZZb3+98-QCV{MhnP6M zY4cp0aCPuQx?c~H3I~)pnzTC^?A8(EYE%divpb~g=KKs;iaUl>;$Z2w015&ENRK0< z(~Ho?Vec?6RcoU_*Xh@yPja0bW~DMc6v)4GHV+Zeb84dhZiTp9MNkxtSbhP@6kayY zwhiBP3m+==SFT1@^fORkx7N~&|FCG`FCrl^mz;L-?aqqx2t+F6s@dD}dXT3f(aBN= zP=4P@LHlT_3?Wf!|88cm9XX$mcs;DsgeGcpztV;4=wO@(9^r=*rv87}sEOI;aV-dm zX;BJS)o7ADj@?FQNYQ2;ZT?^B*+4(e#w-ZW!(Ht?p6h30kqjc+-uP>E2cnhi5+sIe zF=59LiV_Az@j@mVppVVyac*&1o^AY8ah%|L_ea07PD=x9Z;78v9#L<|jMJ&kJ9@(T ztl77@6wIxnI+sU4yzTC!PAh!@AB;G&gBt_L}s_1xBPyZ2jd9 z%9#E+tuqtm@{}AM@3dl>l@He129gIC|K{AV4OuGucIbFE{n28r$b2#JOO>Pwx zVKk}wz^0@@#>-?zb7<62)Ax7mc)a`=NR;?L0374PpZl_Mi$GqcfO6%o!CeEJLKf&@ zte+^S5nazcgcdj9`-V+6ZQ}by3pzoc3%=)CTbedU&7oX$v=yFTKA=;#4oysn24s0V z`@m|?ldt-PI}AR=X0esy{`fTVZM@hZesd|;3=!zbC3l9C$Q&KEJh#fY(TXs4)@JGR zk((l*T{9Q$SRwnTF30m!aX$72H)Hk(4VJEPWJ2krVOz^Utb|^Dz_R?ki0w{FpR?^C za`=mImO$;=Qp#Q#r=~slk>mGeA_vV@QHkn#PchfL4+#QWuKz@hvq(^f#Yt2Y{O_2x z6K6_#No1FOiRPL&MoYs~LL(FH&d>t>2R9YhL4VM<`ozgUgL&mgaShWEyhofVq3cAX zi_?)sSHm3qt(+^C^K)?x7t%K?_?lpca8Z*nxZ}?_ zHG0Y0Pi~W9vgvy2*{TF=F`cbE!Xu#j`P6m=Le!pUgwG06)?+{8&IUEq zy*j%@&Td1f=`igoJ zR7IeWOovBPTz^g9a~mIKowmLzo?ck%a~5c?dd#JyS;X$*VrInd)D0=@=w(yDf>Ek1 zCmBmrT4uS%xhYH>9jzNW>1%$LJ(-!teUtNnn6W`F_Nc+$;_o0f&HcdOBwL3{?mdO&{cy_CVqXzz;d{}Pm)^0yHjT*cS!U`n6jyKflYib~A zA^w&+qt($)y7@-)P-?aW6N)n)a(Wk8T-kmS!=W<5>DofWJpHAgvQ}ilH?IK)z8T0| zhjkTAb6yX#Ddyg9S(ri<3;wF?&_lg86Wy8Lc@rdDDbU#N6G znYvvJT;$%^$!A{x2scbw%9xF?s<>`EP=oreGYPEatKm{#e+zyT&`t|f5Ks@`QH;49 ztfyN(HV@tTHG@L^^W;gtl<%6UUxFr!J~9VfI?-qmD`fGN8jihv8CDt2Oc>g2m+6%& zkM6s^`zO5w(S-Aklm1o&l!e2cWr-Ue^+_Og|9WZF8e(h$fq1{Qy#oENpWP|wE!=d-%UyGKRJ{27c zgOL|U&tI$(1akK~)uVeSy=(9nt_P**$SbXNN_4~28?7fX;cC>y&Op#99LqNQAiJ&> z#2pwcYOqHiD}8sEsQK_yBGuxXvh6ar1IrzeTy`_DnmSIX_iRTfr@od1zMF;;FLzdq z^r9!D+2wB|J)ccrW1lL6Sq_uGENxzl)(<{7knRKt2ED&>bt5xn%SyEQbP!Uj)u+;A z?LxC|(|cQGJcy0y6Fx;J%Pa)-x}5k%~D&|(>Tn|WAhRd>1- zK(L}hISMb%zkVsyLBf0$K)3!4!T7MLJ-^R!ysp$S{<4q}wq#;et(76)TxTTG zYgpr2OISI)5)3eqs)ie!#d`*6a58-(E3kc%*#M+-Vf2V5S0-}uI2UMj5_SziTjQIS z(6Eaa&3Lrtz`;;LFW~n+EU1-K5o|IK*xflnY4o3?3Q{H0!4og_o+`<+wuCeF63$=c zGU?qoe8sfsW!xG2l60n(ZSQ|Q@gWq~dmh=ai}CO1;U%*E1tBaL{Kvnr#s7So_1*%n zGIT^zLBu6~zp=54j5{3bq~rK`q@;njRF4IuiKiAZ0AvD3Vexn~Wz_zXq0arF}K$@70d^=%&H)XbwBV&l9DQ zt;&3t)ep@?+(TIIS+F zVZ)|Qms)Ee+a^^qN-lvA0u$#!@TX^p=i}@wa_Kg@k3dLh2cdlMrBEWu>1b@SMPDb! z?4pkSU1xu@Zvsi;zJpDA^*}rG$QHxh>v|4@?#y!6Vm7U(nQ@i=kc88p4$>ALUf)Dj z6gv7nNulf|NTuNeI%d}nUZ3udPNR4FV-MFkE*r@XHXdL!v9VBN+_>$rjg|QPKUo}e zSk)CqKc!a-X-HNNU~3bAcD6i)Vf-MjLd6{$nLml;jJNhkVYXu$uReW}n5U2|UvVSK z`uyXc7@K_Cob$zNI^)R#ma&R5iPS>!M`kmcXSIMhL#vsn z{QWuYv{RS;dWrYM^4_4)a|89M=xArL%I~W#YE;E@+IG9}75wYOkOY$@U(g>8rl129 z(-wGHSQuTi$>sExq2zR@=d;cDi}+vekz2#xyIf@d+_%canJyX*C-waEJ%J;vmP|;( z@UM-@ST)V@xqFdiwu;190TXorNJn4HtE2x@q2c^GIS%{09j*Hg2piFq&22T1O<$El zrf56q)=15mspduBJ%%3XL>`@RWu@W?8}psb_0~yr!?zdmQc$R5koW_Kq9heekC9lK zn$MCA3Xztgq{nv^x4)@6$kj2X*Y>aXNOo&(*DLm^JKZCHPMv9j(sUo{LVC}-WIAu8 z(4RrDkOSV2wiIkwZR2ctD@8lb7I0HEuRJ8se5UFH{U}+lhTM=pXeO!OZ zE&1h_9S_;9mf|2)b=upzPO-s!>HlC8AX2W89tT@KfFB7FvK-X zR+dRoRJyn}xte^nxNbv51A%3-diF36{FE6w_$Ne@haNTW82f_%$R!f0=xp>%|F!e# zEW?f$7g+K;omI!&tBA+9xGCHibYZ8g^?)aw266?uqk=6K{}~g`fd>GWelus)Y?V~J zceT_fZKUHld3H^G7V_w-s?Z;la@j5g_n&PiMxDck@kBW>JBO_f1{h!8Qnh>-_e$(i zxE3aZ1*@mDLa&hEjBVEp9D+p=PR9^qSw?*Stp(WBL^~G~Fx9agy;JFh>7Kq#oP)32 ziuIpk{@y#^{CKu!8>+b74nm+_xOsk%<@j3_%}qYf4$qP%Jsx!=>tXlvM=;n+wgNFIilKCkYHtNa5?0Tg>%L0i$ZKaxhVJpQ5a zwtRQBOHmTe_2)YsXfyXpY<|mgq4_xBTmO!R{47;v+hh>tMTW#7OvR+qi(5mh&cfNI zoClMGLeApLz8ArGht)>UXSD@}l%`SQq7t6sl>LOet(GlnRSfV|%31bF*ywv4Z@VqU z6}gP3ZxrpC#z;i>Q@QCAQv=jLPm%bQAmEMLyL zVPo7$n58%L+B;P;8fj9)fSJqt5G-7Znt=kZqwDRO7Z)~?1ueY;dE21&pt^VEkxPhp zHQ<{We?C{Q9JZM`OW*=WGU%ne_%pM0juCf4W^ap30=Yt~?nl#Qn){%ph!lr5K1N1v zZ>yq8i=O8B-WV?Ck1_4ZybvBtI&HIk$z*#Ai{X5rEQ*rqZ;P*$a-f4 z!r7jOF!Z+8Q2ue+axPqkF#Fww>-fV_ilM0aO5$>UYoF^DEV6>V^6}nWo+Kf!$4I?n z`SfK5K2f!U3G}xS8;ZA(ppA9mX%)>{cNpSVa;u;#ZJEWps^y#eok8h@_TI*uityW4 z8AsQqDRONT?Qv96daDCw>ShjdsU5D=4u< zOAz8$37Ri$dzkMt4#w~MM14>#_-DkQ254Bn_^J1vDlZ8Y*C{4yiVC3?~Klniq zQMr?G4Kd$QV8Wz~hZ$Zc=?)_G>KJGv_0^8Ue#ystFSE1H{iQdU#_fLB%k`A7RM-H( zgF^X%x^-F6%Y7=2bfw~%cC#vunuN5U40MVMNj!H}rz8h@FzR(aii7v~x6)J-ozAj- zx~i;AFRnfWdyh-{;f~a0#UXA=&LNO2-kS@Y5qJFBjPxEK44S3KpqqsOYrLND1_e;m zf=P0`p1(R;W9S?;Q%XjOpzakclx}&*!t5SB(&oljQo3{Zc3(Bfo2ujm&xi?;u zG#gDsy+Ks)0g;pbp*uyUhPngI*CB7wdX z??YmH;#l+PE7A0)tx|@u@9lh=(D!KesbVCwh$YnG8h%gX*Mr12sM+dIF;?^3rW#Uu zf8=_^C4F%C<6&5%oMF+4_k5Npk=h#Pm0Z{CV$9VqWQ$4B*_M(Qk6MJ|cfwFf8{4i@ z`L#&>I7nRJal{%^UO6ssPe1?eWcIU|25TS;-pm&arL+Q*T|+f{td}K^j}9DZ?*gn8 zaTw17THy^v8INgCnOJKJyZyc&Iijtb3wYg|4g3VFWnmmwYzmoq83>9|e+*OdWkg~p z6uTabiuETvP#e@K=e9t}b4*=eYR_DG79)I0J|S_$qblWGEBq6$p_;^(_o5VjJQFi# za_eMko1c~6Kd)EjPb!XTvXn6emSZ)w7PNCGA> zge@(Lqg0kxn8+p))I|9Sa88n(JHg7mm;Jjg@1P`V9;Ks^P7K;Gh=M+Kueh82s>Vn1B)h zDS6xGlYQ^s$z)SqZwXgF-DeC+H;dxK5e+Yk#4Fc^PlvURbu_PtAZ@gG95M0@GHnXc)vM^-g$1?>#eqF=>s>$JX(w}trG+3f3J zZ>yv^L9Uqp@M=R?iY+^llhc{T>0z+WsmRqeR9mctn?TCbC}%v97`6Z7+Vid;b7c)8 zTSzRpjPhZa{Ai7j5F5srE)wEa$?W29YDJS)*TyO?j{%|k>Ryw zEQ5Y3z2&~j$!Ttp`Vb3Jm>Xeap)#qaOGu%|UK_Kp+qZPe$?!|ncj=9=`u3=mfn?6p zdbS&x-G;E9+C)~B1eDPXI_XHS!-$canX}C@F4I+?tt*)l*^0WUu+O~nMP*W|$8t(2 z!_ub`7LnvF(wd%sj7sE3?1NAmrBjDQK_=O$eK`chB@A_@*{yixP)!#r=G)93HwXno zGyW#JJWjdpl!Z&ZX&yXHjwmym&`eg8I-Wx`t9!7`I_Ed;JU%e{Y0g@yBusMiCj3RO zjT|yeYd*{fyBU~;@n{&|w+*sn;m6L$YY(^DFY!3@a%TEQPW#pyXBRNh>OSX)qKVqX zh-0HsmoQkO>xnD~B}ONDuUe?-GszrvZ>%U2jP6^eyRp;`cr-Rvw>mS8(?gz=+~A28 z({iO$rJJ;`8b`w_EjBw~!UjrqnKjJQ{Sw3V__9?{8NKU|)hrz(tCp~LM)b?m>2flK zcFtSV{>*)HJKA7nyxq^l5C&XoXfH2__9nthAMGRG1(X#WFf0IX^LhBad==_`=g zeVmKG5PO|9UTCKR!#kh{UlGOnD8i_#c&)t zTUR?|#fylhKLTK9Z|YKwwgx}?FYQ}1hQPm&2Kh-{)9z#SJTCp@pAEJKQ2p6zbi=~Z zM!?@JXSVTzPdaLdUq^-^cL`F_S&pbg-FVBH=Ly3Ss>2slIGtsR{;b+k611iCVp$N^ z*z0{v0wqd9eAg}xam^)!mHobT;KI)NdpL^>X6ndZw~MvVkwz2XSeyCHgk4bk+Op2v z3pf2cdfl_hM349Mp^_0PDc{K&?_k^QO!kkC!zRrfWY?Rw&C81fr%pTlOtev;r+5f< z)ZAy3sU@#_%6??2Z8H_lA0U0k+x)BR6h5q#SwLdA!*orsa%Z1OvKu?s|M0~`FwvmJ zTr?0zfrng#3g*+RXJPtE$wXV2diU9*AT)taFROvACbbPGVeW-#;|N$9!Fxp!)lHN&8zaeELUO&5osg`kd&U?a=Aw16b+npLH zQ@UawATTirqP^OCtip*jyMOWI)dy(78`?)aIdijN-iX#5-677A!Bl4sSkQaE%7$u*L}^2^&JGMb+=@` z8gR5fjeXp%XU(?2!LGc5ub+&f8hZJ}Q0aI_&lnZ*qRAko1I&0a#NP&JR(H@4yXv?{ zto56S1%Jmj$Ku!v?o%0UI(h~rs|8qLie?Zh9QO@fUEk2hVHe(W5%s_Ll)n!LIc8h| zdRg{(xLF0)shc$NxopLMK#v&waX81Cg5}ssVP-@oyb4Uuai%!;o^$ zP)y)<&&EUw*qoHyUOagp)=ZO4HsMklr!tJy!o(sKu8Z@o@kU1xu^CI5J%^Fd-wSkG(=u_T ziZcpQdT*5}ddEy~UWRDOEQ~^(O?pXrQ1mdXx8}810gdY0Bz9GcK!0?Da}! zn<CeG;su`UN##VDGZY@ms0)}lSvk{2&OnAu@ zi@eCebFsrQys*_^K)n6$xnxrOQg){xS!i z6O-B0a9}2@OudShi-T)ArsZuQIAn1mk}!tlV2S1yk!L#KBZXGGayVLph#1+PtiPDfj@4fJ**AJBx)+ zTHOR)>Dt{4@|Owmf!n_8&ra}un{bHf_0ZBWCGXX~cX$=W=|LLKhWl7r!)!c0;}mvminU_(w| zucGr$-iSqFxbjgy)RWz$7)`QwC8wsnr#CRs8z*0l26UV&dhdBTOJ3_Ou4}1-7HrwD{IpcaD|5G<1Yj`k-%=V}E!ZFXuw_b*;zlKMA-(`90K$P9 z(t~Kss}XPbB

y`sk8fKL)1W{xL4nHP= zlU`b#!RyV*oP<_CN#37>Y+Ue1%bXKot8|siI}5a$?t#o z$x=no_0W<z6i@SJ5FS-JRtvA==n~pn9 z??l?_!2G;xQ7b0vT-kwS{m{n|YCz+#OY* z{wV|&L^_;VmHch=H~y-Vi!WE8P>Yb&j;`VJcgnyww2Lj7mO9v;md8i-{solUnI#Y(V9 z6BgyN)WuaQEi}X|$OI49P_QXBBJW}G%b?l#s{3>ls4{oLZQ(uON5ie`cozr6Oy+39)r=Mt$O@QS9*bAX+d;A#{A>br zfJbzJij}Cg66>im1Flb(1fn-y&mB>myJegmxdrzp;z21@hModAh~6Af`&FoCaDG^HSf|)Z>!{y&i5^USt@+iHP^& zN+N*;eP4R7SY{C@c%(89Lkrk?sIy@+EQEnXYDmR)&*3MSRA*+A1yH4&;A9&C;vN`| zzN#*nr$;D>)90wpe+??auTs6AP;}~yARm|G89ycxEmPBKg zU9jQk=Pw-+dn>#!!|Wo^Ssj1=OZ0)DqcVS5NN*mofj!hVsW+M}aIF-N|KiwCVy6ex zx!*yRl~ls(Cjdph%trSV76HW7fun>;n{?+Ih1KsKCRUH{@8M~Ks;(5y+9KpjutCnC zT%Y*>6f6o%VtbW2DCrIzf9Jt`5t;`F9*bGiRJc6kugj-ZWk^$_JMp zA$Gxv>=>5JMep(=&F>P~g6D`NdI2e(GPfU6@aTH1#3BCT^FN{`l7`7g5+qd{keqil zIv_4mg#yuiO5|hkKAm|y_&I?9v4#eWSLFQgGU0vX}ydP!S>aLHNnn)S0Bt6+)|CPaSyyzOdu1 zM-p<{qK{KC*}(awbsCVet^7oG;?m=6PavM=Ly6R+weQ>R`ev>Re$1fJ zImEn;L3Kdlb_NL|q?72rTsx(jR9dBQ>%c$*tpX=>kZt6_+-<@{bUcJ@y1LNq+z2wO zSkZfaE!D)jW?FADSV6cZlCzu4;Il=WM*`>PCv}r6(~qQ`fr9zyu1A*ycg{{VL(b1k zKUDG|<6!{J-WiVH)_!x+#V#=zG^bH3> zu?TSXn_Oj0Z)ja^8bDTv1~T+s4%c`k=e868Ci3mBf|DBqJDYfKdQDX`a-9JUCz7hY zg_HLEq&jNg{(9o7Lv?b~w)68zQu_Yo7HcSPj~trrFA+08LJ{jO0J16fB3UXNlc*Da ziLbvx*jy}QNV8&J6%XBpTgo3t#kU&&?Pt!RXtCA--JGl8Dg?gt0KK9uz|}DowTXl! zTKwUFl1&@<0tk=7tUO21d7J)p|LP*TYLRIYekKn zHrY?MoDV&+I=?~qRNf6TFO~*nT0PbuN9|blbAt^uoXXlq{XZ<~bKMN6t}gyC^l2*p znts&yWyu$MmcNzuFvr`9*OTodBI8GhxQTbU5RAoRFpnlDyZmtqY6-^E&gw%YV-4Jv z*NkU;@~&xZ-sJ@C2aC#&1P@u)5_1jZG8Q^8fl&XPTlsAySar;|w4?X+r$U=ppmHQe z4InGMb9=TMulJnc@1Mteka`Upt!F&{tLhDYXh77{q0$U`lYYaeBkqTJKsDJ9Voq-c zmZ}i0tsswl=w2z6mwXQa=wg?Ns%*93`^HwHf#ju~V2w%`rjqN?tPuC+S)^yf9Ia>j z7rFF!X9f9ag!XqtBe0hI5hbvW$Jw(t<0YSdJE3$e2JjM!)q8oW^dAoHBqJQ5TzFRP z2K>4y;k!F*bih;Mv3Rb&>3zs5YnxkiT}D^PKjD1}ESJE&Gg?6NY$eCV<@)WFffNFMcjr|jLL#TK#jKaM4XGj_IA3ge%+Slt)qB1`% zY-KE3 zZ>B?_Fk%lo_W=>6hz&ZxiOj$9Y53MtX_^0IHfonTsJ_XmSJ=ab(Y#-4$(GWjmUViu z@uU{4)Q{P4I?nKBmvR|7U6Z3=r|aHD!bkWBE7AUTVZ$Zyf|U86CDE#bQIAccTg9qF zmT}HjHP)|XT3w1x=e!r0$DW#FnbxWGLXQWj&fW+^m>H+tfTJxODAKqzxThUD-!HJM zKVKWunOI4Aj3M=2i)*FVCdjMBZOXhXUgzipFJ5Qg99C+0XdKp;9-Ba$W6HYlIfzE( z=*ZnXw>Xq{{J7rHE)Ub7@NEDy$DuvS83FtNWsUvFd|HyCvM1!*u>8O zUJ!a-1t6>5hb+uaUyfP**MXb_v7&k27=F?m^aDuUJU*?8TEYOaN$pTg=b zr|Lny32EQYHHzeZG0qsD@JgIpvF*JjwF=rZkK*nvB4(xrY?k^$A9;4Z~154O@{N=C>7Ho_~U>I4;X zPIDg2X{?h*b$xkF>sg_2^n`k^zjoWDG$sn(2>Dg8sFJ*US{Z@s@d{CM$F!RVcjhAe!42Mb7#aMJJBne^21XF4S z2OmVtiJw69g4r%^%ZZAC3>6H~6;l-`0%^chqV1FbFz;_C@vINEP-M)*gp0ZL5ZXVr zQgV5E!Q?%|qv^k!R1i4oMX}H+94?jLv9AI&uEJSRAUianH;t3_Qd@)V2arla=ilEf zNiQxWFDuC(Q4!BR;$WjjP|XIQsu zP-`C>84=ah&3G6Zpz<6WYj&F*<65X#Y2tNzFejfDvh+my{d-1G+7d~mzn7Gn8*3wG z0uCu!MBNCZo8*pPxb26CNb&KkU~Q1~gfoYFYHCdwO>S?*gtoEOx#6DoNXPOM=&-c; zo!kAaRfJjYni>v6A5~I$xH_|D=8=#s9o9IREW?{hc3JhDv(T8fMe4y(PFGS<4f?GB zfX=qZOTPG?bN%pXLlBqUQBIeYkOQCuoH__GgZ<_Js|XhpzsfM)5Bd|tDvi&Y=Le72 z+A1C{bl^aQ0tGn~TaLd113OeYo@lOAuBoFbYVo%zS7BxrmKZQNC@l^Dda&c@$nx6v z*cu=?~*0ab=4j^ zA|eo-BF|NpMlFtihoXO#kWiSja?4l96*FCKV*1E#33uFf!$m9va!yDr#aH8dgAVQ) zx4s0|DbHqiHVm(_hP|s()O6}A^YT|#XyHKXM19#*j5e3kv|CJ+_k?&|0!_D5iq#Z< ziU7%&(A-SzEI0u183B^t+)t7qK+N1v4!l%`6`@RsqjI~da`yF+sbTAVf6CvHkMhb= z5GFFPCQG~V7b7V(BPqWf#PH$x zP&~)5hZ0jo_w7u}`gr!2`C1bcWPDEFT=D45^{xO;`|Xg$j5J!+go=uovol**(V*JR zZZJte7*eUqk3%(0byIkhUFPKq4fA(c5@e(3427ZKddfYUG3VC0J1}o$TM993 zj%~nHo1JlPD6E;B9}cQlv)(mQ5Xhb$#8gw&TWv0D7*o+2U21v&Yy&vsIQlyL4e+FD z?`QWitw!0lO2qK@npCDJ=bpZEpEm2QMjz_{__dnfbVUVH)whb|7^pzLM{n7 z)KJCxmvU`4;m$ZHQ-aABCyP3ZIb;HEN5P>4x|z%nR5<0gb41fDG+P%q5Xaf`2C;@7 zU!B<|>@x882@9^#$#-un$W>B0ifd2JBr>lbuh4m(ohx+5S*Pw*I%HmT4}&uxAQG>L zD%T1L)Y23ME0rlvI8Dd`+qFg2zVbR(E9ZtaebD&LvGlj&c+zIcEOWI(>vg;vQ@rBq zIk+^rVs+}57R%gs+-Jg}LjeopW}S|)dzOAu;MD~cK##Yl{lDBU8GT+n^OtSOJWV$T z+2}ZzV`gPivUEpMiZIaw7OSg6qQivgUPQ%m#Ed?FAR$FFULMep{HV(?!$#yx<2=nm5{gi#e;~R#QL(8i|irU|^4Juq%Ho+HgjjB3 zImni;Kh>sY>`GVzMyY8=UnsY}OTMjN*LN1A)1*XKS^~i|7LP4g9PhG&nfel(#BMWo zH@5euzRJqJG$e!1Vl?ta#9$>?P){6*;$E+Gsa&ocf-u^z4 zVSPbMqtD+gW`rXjt|tm)@UHf!VA(8YF?@xI^x-hlxB`|5c$8b@__t5h+wmRLDt>np z@Kka1GVw^&hQP5RBlYetYa20U845ul%Nwi@?(bjf)=Wmz#@?_ca`QoU5$*wUzP6cIh#(se|D6(sO= za&uccHvBWaW+*thr7H_67$k}?9@HJhD~EFf!T=Z*3){iIo|-nnZ^g8Ynf;7d zw?r8lN1JD1+o`Cz>yxw!oNbOF;$8<0Z6A>D+$>#KTRfdo6d;I`*pjwVv(J!6t)HSL&zf4 zXGy<7cv(KEs9OWcKJnHE1Y*j#YF2W{J_m>DVeegJ7hDTxaMY`>E#EY4i^QOj4lqP`KEkFfmB{E&d#R;+~lXBr*1m}a_yd;9DmYwcd)!*ABB5fp`RELkRUi?^Nr0R(p>4} zp}Uv@<%y8g`8n(Ie&rzmCw$gn#r5aQ*-PqxL|0e60pMqOikh7p-0!STR(G}>hgM$x zq+VH^twh_YEtDd@uycsgL(|=jijXA2f`Y(@x3SP8br~Azb;xX^V4w$v<`7MBGEq`X z)`YX9+A748No=Fo-I|uZ`hR8N&%|LyR_yXFH7W$08pWqZAR!0uLJ7=K5{KD1aui~R zCL>`TogkW8NC-vUU!SyDjXAL1EFjkT4B37`ZGmE8UK3k28s+dz?$BxaXrA?~?#5Z> z(RFr~tSkejww-lkzVU`##`&pC+R@5rePx1kIqA$bvs zwVgJ+eWg<5_hO?3%D;xQRwhquh3TWG(Jhl_e$y3?)pFQa<={ta|eN4Bb!V6kNdUCr)Is z%#?x6feLwaWbT@6XtX_BL1U;!lOuWP8;hJ#p@#PN_wUm_lg5N}n@*fXZ4X+Kf#Qwb zqAO_%Zc8^Y91K5=`D1=@Ml2VyJtK-etz zfv|NXUGjTWynT_dh$N1`Yof)TdKm|;R@S*vVg9WAOpX1jhsQHF?T^eqns)n2t(*iG zirQ8#){8|H^~Jrj$8-VN)-CLc*8)#rVZ*C+(15;#*y724&vNbG6nNUruhFr7<@DNo zH%l7rOb!J|60K@oKY3@%rD{+)Ik^;8tFIG$AFmPwek>~MUkt0XqYNH4s~8xTwwTS# za?Cgj9UNJDPv&~U*W1og=$f~{xUc%r{sF0;lNk9f7MLKv*w#r^*I?U4X9PGjV#yyn zJ3DM{*J^vhkDE!pc~+`B9k+dq+Y&XjBOR}O~)F8$Z;zqW4u zFX#n@#1;dvhP3K|k#0Aq`{qxy%+bHHSw{k|ZL48XTfry<0XJBT zz)cti&QF3kw#kDndcz3|mWiqQ8yl9v?5qH(14d|@41UF&n)PK`p@-7T{TX$k&CF-F z=ljpC5JK;6u8Z~n9*_GH&`VwyV5!^XRh_lu;LW!)l8o-Rrr)OS&KeeDJIs~hdS*sj zH^{R?MM8YEp}eOpfELFAvohXH%?|g5=NhS>Kkcv`Jw3U#O&8$TEx#JRO}J-!1;;j8 zl_Pz_DFwcdmpnE*A#g4XE$-=Ivy6Tj6OMO$VlZP&>Mj*W*|cN2%g5&xI9@93qf};- zxzWXfpYi>W-G?R&)m*wVGJ|o%Lb|+es0z%?SBc{xog%Wx_JV@8OU{|vPV-8m_5sw@ zB>yqwE?>qnxl|~*8f`W!5pkFdL+$?-K<*yR7Nek`_=Ta7`@_aW>i7(dT;DTYb4{i$bzy42=EPuKNoZ)j*p#LBAtCNBT)h0W;JbZ_0wB<^6B zzze;`Wqp6#`se!p-J0R+dt10m&Imyr4ynC^-Pk?y)ALEyZc)0me7SbL>}r<}pL&CN zp>jt`*k@TGpgRnOH1_%B0y{A)8iUn^3TodN4^iS|;SZzRwIKi$CUD0kGl9=mL<5WD z_~HHPy0^EN10bH!`HUJ)?4bbrm$N2=wPw3v?H1G_0?+Dy-0yzS$S>XC^MOh5bxfZC zlgUg{V*piLQnC+2MKhNGgwy#nR44?|kJ6y!d4zCG;i4mCme~Z8qjP%GC&D9KQw8|C z#{~Z41WNIAp4E*Ig%84|^PuOu8tcE94QDk&1PYZ@M@L7a@>WmR`|KC(HR|@tlm18TZMw99U2&hRxbQei1vAKI$&e!cCOtZ zs*XqZB=jBVX!iEqx>gB*d4E+Xl9Ed@$I@~{VW%trOAgokub3XP;f?zso$7DZdJ=cn z#FENjbgbyuyodUWmE7q+RvO(8$Y4sa7278nK^`0?c|wY_y}xp~oGa3$?N+rwX*pe0 zuvq>OjpVv#RnKJTNf50rR(N5HWI%-36;s9@L<-y&ywa#jYVFYnC}?xbpiC zIi=UWj-`j>a5|076eRptivnna145szW@)|hAF>}Igve_JVb6~T={9BJw+S{~Zd%}s*{hw%$XcD?L@W}F;Be|;*UfN%Vh#`?IA1@XgFHjZph708wevidV+?^w=IkI=c^RO0x#QO*Wyy$^EkqFKEp+rj8_y!BF85c9f|GPv9 zRVsrYjH04>aq;>|FDWACyMs~x=1Sw`9bX@JW2mU!Ah9%aG*yKI`PIH$c5*oWlU21m z(sO&oM?q<3<6hHNI`w)WKKJ>cwwRaK{`^c?JusmF5oyg+>0oBNp$_>yL#*r!Duc3#hBuV1Dih$UPmGI75hXY=y{mX&}<>7)tZ ztbfz_Arh(MA@gLc514*55NpKM7K?P6Kx{Ja;g6SuMjC|Cpc1pvc(dsgumw8w6e*1W ziU}Ev;Cr{*`;SG<{2j;!n@s`->lVnSu3OZ`gZ*4dTZWCUTlqQXaI^hsL~*0}&w(Rl z`(_VkIY()NSQs+*(D2wfV@ufS0n}*&Vc*H2pl4g1i{|GFLZ4pi`-D5hShRV8bvcM&$ma-4;vng>M9{sL*LF+tiZ-5m~FTC8Pfg8 zXw~aw?X-Vq_}jBR9+5*V%h#)%$^E?eq-ll0N=r+d?^i75FXq>iwZkQoY5UHKWr*Rr z^J&#}A9Z{ncdCt~c)tPAiLke-QrbcdGDYM&EiFRvuk1nKDHYYXeIN0&UdMA@{5a== zV6#xxVjOJ-D!e6v5HdcOpfb{#<`;6VUEU>c=WPS`mvLeQa_3ZRo1S&{Do2Y|*{~Ch z?sw)GQ2;%eAgVhUiG$7O-QJ-pD;o=Yz1ZsQg=ZgrA^(scgAB-H&6GyH|9gguFnH{? zAOXNR$jBgWgog1)R%a^>|7MG0 zuBY>lSixhHzCoHyY)!cxa-f&;I8pUxR&aUpvLAikBQWn~)S`;`?WF3=0O@1{-)Y0A zOBHiuWQ2+iC1^CdX~P{3;6vE7z3!CO+)w_@|IUS9@d6Wz#*?$tG{q?$x!mr%`j;O= z@^Pv6P1~(d$mk0iIZ1B}*s6MMot^o!ayaLt_(*P-1Y5tb<{`k2MUnu*!h6T&|D|6n5_<`A8jgv2Q_@7RU8NwQy8=JbxMl5Daw;dHf z$^)Ix=i-GR}v+O#@^GgFCw?DcF`x?A)E8W{Y7fLkQ* zD=H>taC0zTNI(u*Mh^;A(y<%O1&d*xn+y0OzEc5Z%izKaAasLB#e^Fa z79;J^?antd3k?nJt~L6r*s2D|IzRJ-5()v1PAP~CuTVnwf5hBCpe?Z6 z5JOu?qwiEbuGyd=0*0H+B(?_meju}R>w=7xmKrJBd%j)`xo!tx+1@Xk7y#_@Jq@)~ zDJ>vm`@$e?`SJdS?;pj~`4_l)EBdq+TjJ;wUh2YLa3rF8KWI0_-gMX4>5xFia=$N4RW3elBY6%Cgj3sLVC|fo)L;VH*X+TDXdZGQ?XZ5M#!8)sQIj z?gT4^aL3+{xfUcHsVG+bBt+r5s>-AlJ74avg&oz{rFeg-Ks|S}EZHL?qWZ{)u1& zcK1as)r|Cr!`WFBJqF$RgK^j6(qcu}Q(~cDcEjjveU*Y^5Hg@$7=*V)$;wnS*O0|k zI)yck&-6SL7_uRLvQY9cfOU>{-oV6UcJo!eR2AkC0aQ{M6Oj{A6#iry+{t*t(E=Vw zH~|H&E|D>mAtnl#J1|qR`KT!UG#%(_v@PD~m8BjdT8Cdalbqh9m#o`kU#cR+uHY(= z#+`e4bbc$r2b$ z^4flbJX4mq>LlJ%i#EJo3n^yZrG@`Ebu_z_kzGYImm=@A)$K>+%XAUp4%blIL?Wi> z^7(wuRrtS0XHzF?)&Sz@*|j?^mqb+h50D|KqV{GbH@`$`3|n6wg7F(rh# zQY&j*n@Vmo#=PEmA<#(9*y8;`SR{Ho!VgdnALkwp2V*@zZgaMeMXwz>7=_#B)s7Bt z`5CwbvU$Bu1NI~i(|%aU_6_uHhsou~Uai_7@{>R#iT+X1xuYzo7>I}ilDaY^yBOZ^ zrM7eXWx@_z(^H-YqVw$#{86>*>+VhP)>?FGNJw`3RVxZ{X%T>8oTKu|fl=vU#-jD_ zF%&%QcmN}d--M4kJi84cBzgNwFBh9+e8ILJV*yVX4G;`k(IWT{?=Jvq#Z{C?CueNh zjDAFwk3F#5h)k$4(4rDQbxM;mt+yqPY-MLl{F1Vpe_h;v(w2glm~3Md9b?6KXk z!9zojR6a9vx#lpLtJfE-EAM}Qwz|t!yje4PUR-~;3EfvQJk16)2n2D7qn?sx12x(^nLy zr~OcmJ(Lvl`QJY^e1ENIQtFY?cVagT#<;w|*&okQI-ajBYkR-HKACEa1#SZK{&c&M zT@N!|E&(L6GNdrWbK5^0H~jeK5>hi8nO&;tMQP&wjzeyRGhJevyEBkj;By5jzu_u$ zCXvD*FmO>Fv0UN#^i=)T>t?*mh+sLE^bGG)|5uv< zeYx~Jn|2fc_?8S;Q#o4-_|;bEj-GDGGYK*dN?hm1P+_5|2=Y1^j|&3HGh!Ru%&gsM z=SzY{n6|5pk*|rudMksdSRXcK@Njvhe`(5XzKQoM3i%U01qDULpsC_n%>}%*1q_lH zkT&Q1Uct{zA3Nf3i0yDJ<-v_*UHBH5s$&h=xV&lJ6EMc_&1qeY$zH(E-4AB1<5_l^RDa{P&_{Id~vL= z-^uv_9aO`P9p9(LNKU?cIe>*lxnX#w?oxg!!Qqo#PwBG&4vp-6-eL}>oCdi6IAqd5 zLPFj(JrM~Vk`iw(47Ol2hK}I^+G;YZ+b_Rb&pAUJ@SN`EOE`?B%ycM2-fl%UjF@}g z&)Y75Qeg}eK79~)a(ByNRzv_a(hBtB`^&{{UId*z$5uRjyDxLs>rQnQ6$OV3fLUf6 z2ckGHx?-x&*R}3-stU{)Y_jBGP4iT=Q_L5YtsyJF8JufoRC|4p&^3d6VWpP$SoR0o zBPS;g6` z$G-&a*f7cNQIvuTGeF`EJx!!miVPi;Or}asALIMzw=*e>?#ThGW}d1c|Heo8lSh-9 zjS!?hO)nCQ%Egty{8v^5I_N?sCLSRpSgOx!ZpF`kh$8!y)Qs8OVfij=dMXeUB9oae zR2E^(n_k7}0_ZB%-rVHHe9yz$v@JtGfut7)dPG{Kz@OQ~)^&^D2(;h}hBRYS;hOix z|EmR9S3VUKHsdz4>sK7d)36hy&SNOfnVT?j^BjG=8m1vb=1XOk5kJuJDSadz1sgS6 z!`RkXVehEn1G4nEyuX$G7U$0i=l~z0cQBT!T5A|qYu$eGG4z}#&3GK;EuQHo4FGaK zFYofXe`GQlp-r3K8h1zeF}#96PPKkcs3PDRFau{(e|&HuR4NhAy04O!3~iv9kqJX? zp^1|tDZFM7!y(1rjOu*q2i=_$#)9(aWm@bz6(^c46!;LB^fiFnaaQXBzHtle?PgCYmj zmBlKiYCSNxg9E4_n)80NdlkOkLKBuiyGtK?zt$X+s?5i{UHEL-bdn8FMbTgu$fOm} zu(RlmeNpF>%@x>(TGwRuepC8$zxZ*08{*OEJT{W7-HW4fxh)!jSqgoW-?XXy%^Tvc zn3mgjbN4L)SqHT6_C3_OTE>N1UDLOj&B+Y#HDI>1?>QtZWEj5hK%P8w9UUyKD(Z%3 z-4uSnO9-OU0!Ky{%kv3+h($6|pC`paT`|~lW)RL9_O$|C5GysJkSg|Z=LfIgN)!EJ z3={?zxoAw|=7$^Ik-ZRRs6Oy-SdYeB=L8uFJTF$BTJ{tvRRD!-}KI+z{y%vF;Cj&o6PpZmOpg)E-l)A;7JlAoDG9f}p26EX4 z{xr8qQqo!F(oXeuiNn?N=c0$~oGY*4htKZFYl1^ahk9+MuDhjrEsu9LEq|kSc)ZeZ zW4LI9$`G+nM5N1Io1>n9}%!SUpKj!~r^hOH4BHrB@2=usrgF=vMzs@NQs1CwHBbzG! zE-7{F@$IMJa`n6HkseV31f}P4Mmh;3R~6^&_EQRDpfosZa8(!CKk*V!dUV%(5C%N% zqxE#bkCzy)!AkEP`~8$|nn#0R(0}n}x8~KiZF1Zq+}so=e&Cklm(3n7XuF8;Pg4`V zCPNC$4^8lJSfX%jemzk^VaPQGm;OC>haeA+R=V0EOX6gSesr$`a0@Y0xNHfW6d@-u zAwGX(J&=6qvGWz~PD>>X(e=LnLVkw`2rDKpkNM#nLWGV$Iy@^70KfP*G7e$W;LXDujfp=h;Q6GqExlb7r0YiSpa_`h;!KCE!A4Fg%67(jvN`uc)!+U*2ZzHjSY zMN@fWu!(TjlcVEVP2cB;z%|17!Wkb|b?V2H?x>1%Q$Qf43@s@=9TLVHsIol$N=1#5 zX)%rHG)0TjIN0g!q1AK%bx?!Y%NuXNv~o>p)hUi1*^K7s5w>`sl1m1E08puhdO1l< z8#_24#Z$}^{sN1xxx;3&F{ZyjE6{Fo!*ss_5R=xf2v8qyO_j$7w`9U{N|;4*6{_{D zacaRhAdw7X?9Ws&ajUD-9tjThBDie!)sO(2T!Xh){T|^P2Ohs~F|UtkH0CxqEP`9pY-u-rzdefKzEl(? zNc||I-o+~!5Tf;?pvZ$)4~#e%w;J7p4$G>t&eWiCDvzC{H{tnKFVD_huFcu1)$s`R z4PZ%bTd2l<+Gy0a-!R*5_d=HGbagTz2bOhvagmku!N@LPMq#(rV4(;!NhY&mcq;yg z*0PN;9HF>A(>MTy)(zn{dN?QhUc)uPLbSDAtq1OAGHz~<_ptsVorVlS%<;R)Ovo&< zZR?fGde0Zxw6(_IOI9qIf5K(${2`jXIO%MCIoh)zlOy80b z`G+|{>S*wzv%1BTjai_}h8%^p8ZAs9At_?AQoFj9obWg8?C*_8;fg?T6UjTD46^W4 zfOer^gT?uUek{mB49w$~uYBEG) z_nGM43cw^6BI8XrCp-U>{oX&i@v6Q%p*@uCd56)#XUlJfDB4r;a$m#H4)E2r@YG$e z*KiFMi_ti4i)HN|r}Ie5(H6`^QFxVJUijvY21CFkui0V+AUiS3L6A6f8BNR|-qJOa zI#~O%rgW|g#R7^cE2uk2P5NXbk(d`QsYF0q&J}xZE zWDvfRKcG1b8@MnKVtC8Ad%aIal1fOYV@!Yn!gR<_{*4tm3NHh#J|x7=&`Hk0zd`hw z(O{|dG@8*;IFmEoqoCW?586vp1E#F3F9F#<-nt*gN{=)y4&C#Wy+gfo?J^xzFl!4Z zzE*`^dJs|5!LZuz5Q&?Ged{cIJR$*ER62DgaONcnmpO_#FxF7BVf34D5Egm|-J{EK zEJkRkV0{MZO_4N>@Bkii5OW!S$zgidBVL^Qe==g zD4@v5Kb*{-1sroJ06kCJ%2ld<>%b4vM-?XytVM#F?b+7BWySX4GwMT(XL2P2q`lCl zn)nN*!pFJtpR&{Fd_0;5oP!DJYGRI+yeM1_21;hOnNXO39e33f)U;kKt59;p6xOJg zNTxWJT`;oUaE!fla->~Fx)u1EPO^x`OWRh37N*tlK7A@Rz~%?kA~6o&1Zd!E(yJ(C z)ah0-+@}XIfmukYYZ0*vC~!s{zR9@o_&|Mt;U`X66TaHU5A1&%9b8se+?a80IY~it z<(~bv@gZ&PFp_N(R|BZ+&^2&~3_Y?i4r$Y|zSH>rIUs=^dKoCh!8`nxXi$)da^zsS z&}#-L!RW4Jp|U3=+t~1sxh@w}dZ6vxwQ2>&i!ok-&^-ZU(n}si1BNWZBwR2M-~1AE zJOxT4vk*o(`udKRLs7yfpaRQ;=rnahyzCE)r2MPN2v$-LC0(24*L{xXOp(r0LYnP* zFcFCE8rYIMARX``g{S(F{Td)|xEYRR*UHu6abV}R*`_%BV49jKArZp2m`zepppovq zA>l-ChNqv~gw@qT$7;vE`PYDJqnUX>603)Xwm5IQ`N7i*+#J?9Awdh~%|_#y4ex+$ zwGt_W+gTY|zdZq^3ZaGAP?&qJp%q`Z2v@fnDgHI~9ue)QmKpf3!Xp|D;R`(2`mt7_ zxPwG1u*3?{%?BzcPE#6)UulqW4!Vmpl=D#>0h+4ba+xV9$nIjJW6S};e=Si7_lx@^ z<=-~K&p_b4$lW8#Z8}Ay-s#EFU^W}PHJ^*+iMoFmyMq9`{smG@qu!1b`VgFpJXN_O zr&}&Rg*OBY3F$hUTP?L&WSYvSm9E$D!ulaHn{~FTPr*fB<0hutb5L0qtArb!cov`U@ zUk_UOzGuU~gU+{cXJAm?u3@_82E^dMvTLEWntLNia?MGhxxXOm=WPUq9DoxXU<61B zL)@(No^#3vkVrvv2XIY569vFB)e!};rxQ+O8EvD8`@mIKM{+p)11jI6g9z;`o_y~q z_(Oes+#Kxs5+4x`J8KMSh79KXOYU*=qKJk~BlYe-;G|Ka@KRQD#7KgdE~NgHr7RY(dpWH@94oldzK2Wbl$(R=@n32Og+M^Vfu=GB#QqEdhr*I1KpR`C zvBrm!ljr8*=AnFB00z@8%4F&j|DbWmLeVdQ?X1P>V%7YuswCiAToTb6=o!_@ z%wP#{N2##r6#R))N_l`rNVe)@(i&nnB+!r_2{4h|5Lz`>W%%wp_r72R8d`J+@w*-G zx97mHKq1fJxz!||QLdFdX{)`h(Hmq!j3_*Ffqo<`dZr*7{z$*12-X0PhGx?~!4vJf z<4Ed)@W1tT`D$?tv|aQqrp(oQR1vHV#fUG+H;Xzxa)6ams?{0>&gG(>{{_*k z=R>R4u8tp3fFJ4)H*G%H-?9Ul3<&g3Szsk29ZHu@hsg`{Z9#@V4d*jmVWHYD3W4?r z4MJx3g|>n5-VsnnKTxzV3>8IO+eLW>%MVO(PYMd%(&P1VM_YLW=V8w39@t1>QgZAW zA_E7?EMVY?H1ridpr8qQ3p58UAU)E75+M4fq|P6V+2Aml!|mQ)^}Lsekcfm6y%xHP zzQoY`RzWMnan@tl66)niBf-#;VVp*=->Ebf_E?4~z=aS&=+rgNyz|_c<8eBc zuspB6fHnX00K5Vj)(T4S@*fShl^O$RrE=w{gYop@Hc4*G-zqF8sAX+RtJLhkrL)*G z47k!{ar68{7Y_pCaAuWS3#?&QDuhK-Q5cFE716jAS?yE)lK`n0E|ZzUo=F=U!3u6s zS!*g3ZV`{q#@sfg?oAGjh?j#nnDEwkdyi zCP_ICL(=yGfh%_@i|k`@S)g~c5S6OB^g}Rw&ezv;JrJ9%o>eyz#^XBB>}wA$II_$8 zc4>ecmUgkZ0rbSjPSa)r`Y2472sT}};7`#Q)(os8gLF0%HKq1C!>|Rcf7aZVi!v?6 zH;%n9DDG2bN(ph&N@xUM-qFYyV&Hw~z6lw<+?-LJLoz`|#_z zgqI?8G<=vF7layx8~#xvsR*QBt}WFX!OdQzGw6YFjawxyRs1B_KLXkum<#i>jY z@xCRXu34^IamgLTC~{_YW&Gw|qy+X0Jusbr*8c(jjJ#5PzM{EsiQbitKi1mvP+;)Z6dKl~)iX>7r36t`l4C-RygQ zml-)k#!KDqLSwezT4-n4d>F++~$n7Wl!z1pi#lA^V*K zAd?oUVG>NhGtqn#0bvb66(ENkEES9S_f#SmjNT>MEuL85?sV_5FUsl-zs)N?5Yegk z$9EhBMHRQ&srPI(xPt$Z;AfnFSbFeAOJDyssHA`6{np}OcI5bcLN%cQ+;Iar3kPzR ze~T?Z3)~)z<8r#+QW!A>`!0Lxt4;sGjul00YF+!nrCJaq=uV+wFh0MHopc>Pv`B$( zH9h!45Sc4YJ8~#GRQKsT&Te3*_j+H_e-TQMBDP#JP_te{r@kYFJGG_Go@gBpbBW=b zUL5-SKNK|OKAB7yV5AMBYKy5ui%2@>zQGC#DTP`9RI81cIAH`!ZTI8VZYoGAU>Z}k zMv|P~uk5((O-*cC?XGeP!Ju-!@ytU)NEv=VxkGpSu%HYrjr%cshN9zCg5jlJ)lao{XFWl9rD z8D$Z9rnUvKNMzd&6F-%ivT{YOo)DX-@?|_U($F2}vU^*bcwsQejbkf@Z@5WUppc^7 zH!B85=pqKMMp=s^`i1@c(7byJs*km|&^GFi9TYfJ+u-0O87)5YEJQKxkMQ3M&Uj-P8O>a?vCdviHLmP_8n<-&gv$+3m4f>dM`-5~ zBUoo6KFmi@F|*4lvljXS22lIr1K?`)nndYVG)<<-f`hh`e>*QRG`3Y>dD?}zPaSky zcLE0vLZ4+aIG2P>BqAX~>#L`pi6C@FR#li8NCh7C<#et1bUGl&LcAFYJcXK==Eb-# zn4(8|cj_|sL(w_Z^@JdfT%9jLIjPL4*mSW*C&~vc`d{w1*g6MC6gIDKh&?Im zj$1$`VF6WS;qUK~Gb8cjYQ13J=wf5xO+SgNdZzDx2h#kgl**?5Xf#p$%Hke-lcFc6 zxBMwPb>J(T;%*{plYXV7O}DZWarDT8)b8egFm3=xcVIlaz1^cJjp753rP7ZPp|7V; zzn?e$1JmkP*IQCcmBUh+Am)Sk8a1e^;}{E_?e0S|?PSsvysll02(MT&otEk2<#t*t zsu4iIZQLsJ#&qd~lSxbbjMdLl`*OXZh`s6{QH^(h`a*Vx?=u2xyhC?cYr>a-pk zgg$70Tv`5;HLloUz-k?JdbQsh768-_*<;nHA&Y%V=}Dr9Djl_m?3GMLqo1WC3Xl6! z2s@sRW`)X(k;)ck9FA%WdMqpVs%D2olbUXujcS&ljrVs5P^NVtU`HfyJm(>lrfsM8 z5f{PD-EcK~JkTkX$oeUbq_feZStQx=LB&!vcIF&0IUOs;Pckz4d!Pi8h=fu~tj9=m z=#Th1Fm>1MGETc1df1`x#fY0^T=3z)drY&BAfK@JDjC_?HU` z)E>i501%Kuk&^Z~u)#&b(Q|Whh{*6DK#4nKi%rMcfgDoFvg2&@`ZwqZ|10BGZA9ay zPq7LdCWWy2sGx4t5i}A3h22nRXQ$(M3hcBxkK0-UwrZC*j~At(^$cli2o3h|Jt-Ji z*Ai`FEW9`rUIeJ2?v-y=IF588Kw2;({NAJ0Ky3%tf}COrXvBeT*J6g@G~1JDR6!gmfp}6e14AsAi#03)LLs2EpQ$kza-7X$wKrVuT-QH$ zU61+;3oO=m9ozP=$f`f26nYG4(HJr#%G=%uT0Hz$rl^W_?lC6^sMV~%282X8F}%;A4ciwNzks@b z)A{{e1nb`1y;Pc_3s911lW)buw|B6rBe5TQbM zwhz^bdT4rA3WFcMGnF(xLITPD)vOvQrIGh^f35Wd~O{D&+o-FbW z9O`r7@2ERLpmM?hJMBo5SfRZ}sj(pZ6Sq{xN~WdgVXw9wSh8*+sEc-e$9ijG*4t%3 zuiVT^1LgX^X=M%tELjhGBD6#b{?ax9wH`{G|7eHJ&dfV}v%okRRcb+-^*reDv{dxu zn?mJ#Lk_^E7?9szIo7K*77evdR7#KwpSGc4eV!At5O4%xcy1)plWK{i`ucT@K_;LvgeMJ%Ocsvi4~0w?1HkM2Dfuz zl1^iRz_RRU1~h0vJgzy44n+yB?n6|6C809n{@aFjLpTsd@--X#F{om@kR2-S+yygm zNXGG&#BT&fl;7{_o?P%ya? zpVMt90A4zJ){N^GB2_6?FQ|{pLV2`ZPpCerSUO!4APFO`-=1y`DUC)God5Y*_Tqnr zd6(u z*9!)n8=g_ApGKbbj8*hJdxLBp=9zBk?~}fG|;0u$DM-} zPc~g11SdUeveBN}&TKTZ7ED6{n0`MJIKb2uVeJknRO57f;eIty??~hJ^xP?CzH&@r zJ6Ti!fYj(RvU@bL;}7TawVyhkS3%czain4tb?CCu#pQdi#c`sDhh4!kjQ&6oU*V5> zgQ||z5Bu&01UhTHPPZHRL8HGJ`meR|zd>E}yYaECCVk!#wdr*ToP0*(LzpU6b6;wK z5udU+JMmFpf6V-E1D?-E#eth()h-v>qp8fr2cQ5lltI2n^Sg}~7y92N0HF`1AAaBA z068e64$}yD5jcr>evRZf1Roo=#&Q)+Cydu-l z>~cYn2*?^s?}5G;1)z23j+OK;7YF#1o?lX8ax#`AX{5>@S6*kD4%YCbYtGW=+uQ`L zHghY%q+7S`>GZta?t{L+1KOZUg~L$oHU-?El660;MmtQL??llAJQ6_cHO6n4A4c&!#pXF* zUp1^Q?RpNEcRc;~2r*0`0kxI@+N4?^6f)bP-T;&K4R2$)R_@^L#7XC_&E-Nq2o@v3 zT__knH;c#P*jh~4RvU_hl#~*X5?O*=Zuydj5VWVT$7$258_Ex`>F|y=JC)@? zemXI7__aT(abacl1;?w&vCivwTabuGmAFs;uNJ_@R(HEMNU_C!pZzG_a_;Wa)mjr=xr#Zdb8ZVMY2TC8)WritS$*;Um z!oL)1D}c2U;Z#$D6Prv-ktV9(2#e+N=_3Ly3#5UiAb`d+-+aDoxq+{i@qY!mt(D;5 z;F-Bng(Dy#W&woPR7bvcO~7dtO-3oJYbXrw$oIs96X27`J0A&_KHoX`QB#HxeTEQH zaHQ3yVt}7XbL6qxAKl}#%RAo&da)9DUrw0|wGSsU&H6)8{%_q|Te{!RHEo_%BRNSZ zLeJCe93Dn8)0rl+e-Zrr%}&ozd-xgA6qqk9FLm85Sk!fik_43=iGc31B5e0$8a4f$ zqknV4!xKXs0F8ulh(2`orMI^?g_;>X)X({BWg1Y3?cNu#)^~cnI3EK{a5)Y#GC#RY zMs+SmUejt*-P*Q!!RbPj{E>K#$?S;H_y6uRN56Zm5L)}&5lCe+(x<$?g|Rc!?+b(Vivc=zs`*~i*=(qIZ05@_ z-hkdNM7H0+bD*-@2Y9vu11cMM9(>VZiT;zt>9X#x83!SR!_?AWdB#U-KTp=Mn+&S= ze6v_AlS{!>SGWGztn(@Ocav3ln^wUhW?d-}fm@PLFez-MMHSuM$3Vk?BcxWCdE!aAP zdS4^ue8 z>HcWq-pPr@pAt#@khnvjoI|3@TBX^7YGoO1zPHrw#=IH_R%Ozjfzzyu>Ygi;Q&7d8IMv<~fbO>BDp{k*PTAZeNZ>V^rgclffO6y)J7?W`g-$teI9jPV=i*ufz*{H6=&GN1~ z<)IxrgVj-hW8FZ?r8l0WyvJ|~e6F}S-u09sCQkDElGdItMu_@?rJu$gM1f=Bqgv)d=%w$pXJ zF%$x`i4#+E@#JB!oHE)LP_&kgfJ-qYwN#i?@#{Zo44QBN>u5G#SJ7q}%{QD- z2hyRw5TwskDKKai=qnnJ{JKfN$U=;M>2lWT!GXZh;cS z-v>h6*`YiEMl=lGRT6H>suTxi-}g%3f9~o*S0?Ngvf}t(L3w>wIRb7n?#PK!{8+!=?mS{c%vuV4}#Tu6Sy*3l@ zs!8QY6b!(cylm zfD;(uV9__GvGdQBuGE>t=z}yms2DlW4f6y2*IT82VQ#ZU(#9PK`a38{r^rQ8zrTH% zwHA*ffqd#Ffu*hqfv%Id`2bGR#AqGVWFmtYn_=s!H;zV)$@rFkvF}W*;!!lQF&X=Ake81)JI5Uw>6ZO$Ue-;<`|}OM6gA#zfnL$T~U^ z!6?dPawON|>B*iqhsB{-^c0VecJ4l}?2y^qKN%cLK&vbY>hC9h4;+UE`1_XtXHc=0 zD-=Re9z;Wmf#5cP?MdJZElaEr-$dSXP!>Y`(g${IySeC#wF{Vudg??mMI_ zrB+oO6PFAO7*cJ%{+O8k9eD3QlEFg`9^Nluzf$Y#UV|kl7sp?j>iy|N6=U6KY8J-vNxqdkUYIB0>*hUmDYu ztZK`Wi4tt^9dP{ESxGNC8pE_C?gFxBN9L%DQiY>`2L3YuF++K?_=}S(+;+Ybq&eHC zIQ4agooyZ$*PNA#_DH+NyPZuh0VgiYGCl7HTP(WmW?3I&Os*WO(!4fVfjq)KwUEl! z%rt4)*R4!STsG-b?@+xiHV=mfg)DKuP=q5~+Ag9Te&5X561j?sD%#Ud(DKG`psIT^ zg@SoZuL-0+0n?Fr#NPecMn!-A>T?qDc;CAH@ZQ+>3<{{HQKDW)`~Lo&_@kLZiSgYc zplwCQ!({;^37ISw$c3OCULcx^l~%hU0!Z5!ZB=V=FbYB~??44iA04b-b;}iUD{=od zY}35~28pC09Am~-&6U*H2R@6Kn2CeB7EjZ@DxG0*aemvm#0&+7fWhlX9ED2(}o@aSzB__ zAUyUMEP7qZ&CSh_d0$IJ64vSXfJJCnH>t##BB5Xy^)}f21;FA}#|{h$WBR+F;2?!K z;TVB-^7v1ezi!TCNt0u&<1g?ce_>ooZ@{|;F%**z33G#r3O8Az; zOM8cd*d+Q@I^lb^Yd)jD-|d2i?01Btxs!zo3E&7$*65kQc>s)Rn#&BczsUZ1`H()m zi%lAx8t&vQ&xkMM@HWO&?|4W5$+wqvbyyGOYumZ@8gZ6_IqeCEghj$;-@_%iFpLF9 z~+HHNes;2^93 zvpQ{K7Ud_{jhI~jx0QsZ=m@|GFL}`rA#-!KkyV@u?e}`CQ8Kn|8Bhv;kZ(GYB%97o zAK(&qj)I>Dj85fJfN`y)GS*5iHQ&BW18SyrOWXA4Cmj)Atk^-g8+4+41Mvf!jof*F_$_ zez#mD_5e-fpX?~E$XUuRFBFOq_F=WQ!tks`CAJ5(PGMkpX+r z{!Uzhw$IF;OY#_W>AlC}r^_l1=cv^@nwAcS4>2yPumjj+XUe<&i_z7LW=t3(Of>U3p`5#sdi9CnmZs z5uZg`;sUKXV(9imOl&1&bsa843FFZ)uhwWTO9szr{q_-gneM zI1Y`RmBcPIgeoB}bdFyw3m6G7=ryA?E?GDmNCt`t%A)4TJuHcsqdUBAqcYj;q|qtl zW9FPk2MOZA?f-Veh?8Vrp2TtBu8`^A7>Z-?4^%Fq8BYeEPU=*YnE?ds;@i-H0})7#y}#TN#JZ*JprV_EGNqj8_#ygj^> zNEL{JtI`;!$`9!G>Xl5!k`8|}$2-=#v^xP8IK#Dm?Qzxk(PjIXty2+f!E%;QAB(@Z zoHQht(?j-qObFt#Hx`<+V{=~c0xsYH`r;thUl{;W3SuF&@x`LC%pmN+W>>FNMIU13 z0g=T68IL-EC6@FB;*o=h9MGGfm_+~fh9XuH!I||;X5j)NqP)x9p`p)xnozqaL;;sz z;!qNOz(Abw9Ug2(mGDpX>j+M(e;h?Tv~V85!9o z7r7kGI(jp&wCdq_;623!^$Amcd4UWK2~~7x+4j{G#qothF{|EA+{VBPd^Z|=ADj~3 z4cvZqKoI$Tp@?9%H&Dma9uMNcA&vx@04j*w9Lv3nUtq@+Q@ zy&|W*(S;mI{S-`|o*s$KrP!XHUWv`&9Ny>utUTE8ug!;iE{?a$ed4HXwB^gcRYrSl z>hq@zk9~h>f1yllX83v~FY2fNvJmcje(-N-^upvYs|_-%S=UApmuvlXDev10{MpD% zl^&1zI@rzMVE^lVBo=)`v;=B3>brz*^3TR2NndGc;D>;Ur?skve40eOE&?TN2?N4G zB}?X<6rrCcA9TFlms5uwOao@Cx%anwfql0OayeWq@?Ep9C31g?qp9@(>E^y1L>Zr& zt=-(_ZJtFh65kfAI2HUeV}KYYmb`6ve+#E^G|p}i^w4q@g1*`x6O!Op9OTm&Lx}sE z?vPlG(-hJ7IHJD4@sR>Q3Efn9S4`Ur9FZe2I2d*7dzkT$iOg z79SCdHV_1uuWK(df`JAjcv@b6+eZ?sL{pL+Y;lk{F#Q=&`fi;IY) zGov^`LhLI92NI9Mq1_Vf%j26Igl?09?0O@N7|rt(J|zXVm=c#Oe3JhAuM|hDg{1%c zXVI2zsG&j-WhC&tYZXYPce2wzc~g+$$bQ=8d!|pk(sMF+4yov0cpw31v{|HGY-e@q z#~iC#u@A!I;&we21xl#N>2V~Vm^NFEc%$xbCqQ*oXQ=_FkI%@?>+_(<<+eZm+1zD| z6Cf=cZC8{ZLD)+4xVUp)5AvBk!1}qS-M8we_zQuSF*!aSCDKGh5*drm5&WWiZv1GQ zk^afJT&_h&A(J(dKs?ik_HQ<(=6X9Kk~tzCZP|sj$NS`89=}t)^jg`nd|oW6$X0~fedzEdOYJ2X@qNi)Ca^(xkqpJ)W{7q?!;tXipq&ZB8&kuf-fXYU=<^fuDT>cEseu@s zBQaqKqAAN1Opbj~mS1|@FW8O5(Q%|>#4*ksgX+Mg=2TYul~%)>?!uRXQ8}P~e-KD? zS!ZejJ+b(x*C9&a0bo|LN9F@EWIzA0+pnkl0UVj71~SURv8K5zcM~3my<(&FLQ3oO zA!;mPnjf;Ji*CEe%wNV{Q7i`iaS%ROJ0@V!7}*pl!dNlifiPZ_d7?4@bi@pd8p@Sdh0+Fr6mw3+Gi9+m1TQyn{W!A1F3&1aT;YA*<6SURvZYVGii z4aY6UzGl=t$CR-EN7vMotFeOCy)f3PoUL`Om(s*G881Hlo z->#Sjc`8nD*4+!&U4Qd197~@QJill@b|jT1{6E<7&PumwhZT4aLjW@)K9R{5UGLYI z?)b88#qDzbjoarOiqFdi4m|B+X8i)$#0+L4SsNg%%AkStGr9ifuZbG3f}t*8eMd<* zSNs5qHWNz(VYBQE=S+=}$R2p6fe^j~&Je6x6thPxF&o~{^}!ToA3{5`VN3VUEG|&J zMdAthl%tB}veKID*2Lf5-bUTu-<}!9Iw88@AWdX-!GwS6B8n2kp<^G^7f%3088Zqt zaTS|1J#U9B1z}G9Ts-}BvisDbm?SEn_x*aB{TTO0qm0LrK&>P_n!$``rjQX@S3E51 zsq8QMTiL8MW&~m<1~cTR;r-^+RQy&f93(7=;M3Uv7>5%v7C}zo9ZWm*zJaC?;+xh$ zJI$B>YJ;FIqauln6|T}YURDQBe}WINTB2$Gw%U=mB#O9*Wb%44uk9^C@Xlk?Y9?6e zn@;}7FyjvV$qbeqM-J%A+u8FJ<88?R&8_*+eD4oHwpRG^cwKKYeG}l=kLxh`fyG@r z2udc0PhFf(OXQ3|Yv};vg!M10thnm|RBE1MB(Q=)R3d0%_UO=dKF9N05pCx^hE)eU z9zGO5tEmz*o&TX?A>jRt*pgY{SH)u@sdXvEpFSh3#gcfuM`nsi>LABUz~37OOr5}~ z2@fan)$YuSXF9r(big6kS=86j{RfyDrh$i4H%$JC;Ks7k zPf6f(pfA8D9#;;cr2typ-)VmJ%okgn?q~Sk0;a>LO{gKbd|1$^7|;pYSmKcYd_=S7 zb$43+gN&0i{R$xP`THcDZjaTj8k5pc3GpxfZq;DW2k+`KKpJtv4b^fmHsNzQQBDM* zz}0f&0>h0r;t}1GJDVIQ)7(rL?K2s0`nSMC9wiqQw{le|#Vc^vr0*|-4LUGAV}N=Y zK{74akn6*jw@U`-?7^pjc?nCbrtg;APVYs0naOq^%Qp#J4`eEtnbS$TBBud|sBF2r zf()SedM<6g+fcy!zlLYd0|P3T*=MN+2ySd3$U`}bY#i#WWB3a=fFFi;KA-UfEmNXk z_7EX*1muBwwSdfu!iM~>6nr2OR2=A}FH*fBDv%K4Z4Az2$S&MlT0vKL0B}%U=CEC+ zCmOOQSTi|8j1b?OFfeyF*pyosCu>ZI4?I`k;S=Y79!_MYkt@3s7)_-fy{K$fDA*=) z&8LH&68(5@cwAmLhek;xlj>M+zHXr~XTZe)&SZK?tg18;X4JXGtf^zVvx{>hULQX# zi^<{N0A0xmq;GC=JP2<(4OodxsfEl2+SH|+!>QuP9M4JvPA%{N)Q|`UMx#+R z&-Mi_R6c`wpa>ZRe{mWx1Qt+}6opAM3wTDC4}h3D#kXk`bMo%19^ zAyw(VHi0$spCVQ6T?$ol6G!UQTFO0Hvz9w;@UNb_0;U{+v+X3+ych?Un-6^Ymki8F zdj%HN4;K60DSh{0Cc>UMYdL%M9P<*|n!tDWfw=}Fj^At*Xvdz79wwKY;TA=Tr)+r> zOAZXF%tM`V`SF2uWd`GUl!#ly6`&0#f*;S^mN7-M5%plliA^nEw*lY)c+P5Tb8+b# z;c+|71Js-u9D#1{u-ABtSw z?NU>Q9hR56|6;9=KR{;vkHw^k#lP$1uG-KC8m*w9pk_&uPyV&2)>X7S+U(|V zrqYJdv?mV}k5zjv)V{0LViqbS_UZefG@FyE>0`(L<_AGtCWqr0Pi;Rd$bT>A0V^tk za-B+quY@@1C>-|dWexDvai6|Q_b)J-wjqxSPX6G^D5aWRAX|&06I{KH%X*&k7jmCa zKi6%+yro`LYDUeY^G$U9II`|d!3a(17Vqb*2$(9&SN%+`Mr}lhO>B+je+-*#vyl&=nb(d0a zhqZ~F=0xhhLvaH{jA1zZ#xxUCRm|+ydnV;YQlciI=K=2IXxm{oay2;yro7Q-(jc8m z-C)=Qkp;uNzf2S&OKsiF=i^e`qe@j_Vjs24s;MOfydVcnn2*{_z^)@7+F8B=F`C&s z4@nfv<#vh#bWGBpzZa5gIB4!{w3`6h;eCOC2VDN%$d+ZwI#}Z3s$UPrCmiGj;*t`f zI1jc|mEoUPyyUeM{sWLSjlsMK>^N@oyYCrfqik({r{s}W!@^apH_?qY-VY~|WeQW? z{t!uX^?^xXL&}r_W`6VhDlc$`xT&<}IAA<{bkgsTqJ^&e-$IIDfH6kUu}W(uM9fk# z0a|i$s)pi3vd*eBn;#MYCB4hd{X+Qxi2*_rlUJZHeQ`aWA1UF74Q($NmC4GOw@YXv zkNs(>mD^W>J|g|!87>G5342CZ_|^ux=R__W5)so!X>~uB*IafarqKYJom*Ctu~slh zU3|T4H2IV1{U18gD)Lj&>iC5CuFh&pt1+|`|B(Zyz1?%}fvWzl5<%%*T&H&RnaUK* z*(ZiuVg;~qKy;d76b9AlEYV8D$$#D~|5yU!5Fm5O?DMw(CX+$MwI(~ePUQ>3fO>dN%u%NcPG1CXU46yHAM9o$gxvCUT<}c=H=sa zZ6rbD5dg|5xLsU__x&-U;?gfj_*N!TTcrG1Unec0|6WPDk>SqBO2byrBWL0-G zux$@p_3WV#pWy^-QHDr>hftH4N->}=MqbDxo8t4V^Ha0cZr+)C{;vT;fich4lbs8h zklr*;jz5`>--Av6VZ75H8Das0Y^o!b$y(0WO0wnv35Y)Wkm&2kK#s3Zx6-T2fS^&O zP(0T5Sbgl4@9;P}OL_wtI%}?bme1B&M*ZU){sMVtx;0;jIPYpRPfF*%%G*%6&8k;w z`F|Sjg&zC2x7TOah3}!+x!^@{V(AE5ySpMw)dov^WhEz_d07nKXl|v=DW#o{V?IS*%;`w?3Cg#!yHU3r_M2>}dZg?5qC`yD+gtH?LOB+TN!3w6h7}2PM#fWT!%e#B zAKLNl=o3kiwFWEn+kb1{U9AA>LbY0-FROLQ^xuIC&0&@Dz5gDp;#_>8ljYicS>>1i z{i5ywZaMw!n@3BfSIQ}5y3zpsneD!Ctr?(>p`K*}Exe$nCJsO`%Y43x#p4)WThqaY zKfbBmWBy)b#Wr+kErRtw&eE|5jF)R`hGZw2)UNDX07_7 zxd?qcUucG(WXM)PQJk;0P*`NsG_zT%vQy!}t2`5RHxN)fmapbYg*r{KqgQ`uz&QY)@0#y#@;;dJc57D2#SHW3IFUCFR3sU z#K{aP>8K~MH^J0mYha!IXMPM%+i&`2H$WV-;U};gv_Ezc9qkBnt+^qPDl!lFEfyyM zeO0;nL?(Nqma`b1*VS2uvb$XJN(xP?P@dfXZyybt2UcR?H-*4OiJd z#n&(MYQ^&E_R5h1;jhP6$jia*h`z3w)(V}}Yb2_*c@%TgeU2NIe@zVcz)uNI|oz{a=HHY4kB4R?(}Na`Wq#k6$A5k&3YnKucni>GN$%z zjo$|ea^)l2U*Yr_R+LSfBHjP1Ej5$f8AI=i-{}l!prfHN3OrpYw5uso052vWpD1$N z9X$HU@NsOBkdWM54iV8n2BOkolP++#!c&;f7Wiw>44fSN6cSLwEoy9COI7hUqO)U6 zMJS4Hg|U~2z^^eSj{A(`wS~RvLq@IVb+53ha&DY0I~g^hWSiu){HYats4roHhw;M- z*Nr`t+Uj;VO=7)NMSwRO1dUh*R7ZBC^JLH5jCr0lvr@T#Eh5DL&Pv@?l$iAMz$qpvaGCkyyq;#>K3T{c?txQIUVDX7npIBbxkX{e58g141IVPLTQL2M zZA|>X*{LE1ZaW7W;ZWvv)q4o_W2l)G&w7cqQe5Gu?@#g#XMXQR$aHavn>~Cgai)?O zt<04FB$e2(yXZHXLCv!#^@>I!x4+kI^LKk&DkIq}Z+wKs(pmy53DklP8GQFDyY}rf zb0jzcB3mhVu`D`#gz!40Gl213U0pd*Wd7AO=>d+#{vcj0H6BQS{m%dJULZtc(G>un z+N#=?HWH^(TBbmG%FD}R`e;%KOMe5Y)g#nDN5AAQ>zS$$U;9kPI;XjFU#ai*O~iw8 zju##tC4Ve%>Bv`;vG__x>Tz~|aXk*u?>S(@*mtyx$Su06g=JA`f!SFZQCFGAB04H@ z{EuwvYNWnuiDtSu+u-?7IWSWf88T?q6;&xM>aU~`cKH6BKT>YN^wqjT)U#_**(O}; zEZjwzq7>O&GG{B6PGQD{zNW7rkl)5(i4 zbxJvk`OB@v-g$eg0^+-t6k1}U`Qx$1j`Oj0QS6!n6)Yep>IJ(A%ERkVoMqtp_pMD` zMy*&j9GT#-I}q~p8fbmWRVY;M_4Hy#dgVvD*P6e{@p9bQgSv5Ej|yPOO*p>)?xq@v z43-!0#g=qcZ!E2eU|ej4^_(uBTpOP2yD>soAx^*)W{@^}2{B(vGqUjVOy1MW3Vdq| zV+tmLz2VFoBZ0`kA!#3Xe0EdSwp}uph?^w*Fct7=rr2qkZEbdVM;V!{)S0PP>2g)L z&O^^v0p@1tY4z;3qE?0De$343bMYmEm(m!pXjtSnP&+d7$H$NCo2$ ztXA5TQdW+?N~T!Nxq3m|6d0GIS@OOXRRIcFC<0_;pgvs^g=U8XXR{l*D!WBaARyjS z0pv@|LOOh0$ba6mo=zY1xxnjw61PQ(y1<}D$HI&DUxm#ThGvvuKF~QvX4BEGZh2*K z$l3qQu18=SRf+!R@qde|%r*F~K}*c)Q}L`U8ZQSiD?FeN&;1E_tbbS09^!)9{a!N> zFsX|`=Cego^F>msWhISD_N|@CVFWJ=Ma$S?Ys!Ci^eWX{+jgfm`AWlVc;*05dvqBx zsigEDU16zxC#K7HKQ8uEcuz76x>|SARVQZ@KA-FKg^zaCSF4VuSnh}RWUT$YOcLbc z7?@})$>PLJm7&U0lM6$#IkkGFr2E;r(H|07=xu5MnE3Vameos4e`_)k{~Q1|S_rjj zSza68X`UfPD5TXU)?#jD8-YeO7#eE?k-m@Hf@xV+CPQB32up?!Ha?axS-fl!(63W^ zM~%~}AnhdOv!V!!{%bz&%kaotbzNHUOxaBaE%yo@g}P>>Aq^c5)>FA#|FfzRBO7XU zJt_0J+-Y(V`H&3&Sn`^{Y5=V5tFH;bBO(Ep<=uW@9 zyJNpRc3^4!>tOexN3|uEQ?HE$FX{&F>dFHogXop@UbUyQUQN-xO6)4|nMyRHhxyHX z8gyN6tL(Ubmck9&LU>@f;EOevtH@7(NJL(0e&a~l-0nG!1n{sZ`ZbQmo9RY?&s?MR35OAwuFfF1K0qHg(oBbR(#`4EpxY~* z_4n1!`>r(u9-Ys=pYi)Qv2Ph95s&3&`vY-$>#B})pCnDhvE*WTtVkmI4SfBAfdf))2K~WwZDI=gUZ-XI7*lDtq1?UH`fr)!ZpGWwgccjYTl5yOb^Dz& z0)nU5X3nOqQ8!VGyxZz%XFUv4$X>WYR}t1@gN14BVpBa~^H2~XAv=e`W=R&qIcl9l zh;7aT*tNRhS^41<{41AS4 zybb4&M)#HRkw=xp3anIC$i=M_eSCVF1p+mUou##wCO&#NjpFp?u)hXp-lIJj76`Qyc;1XG` zTLm1ZBX@lPLqy|KQ){LYuG;{UL!Vg8UvQ3Z_`#!hM&*|c1r}2|4X!8Z>My`ZM23P= z?p>G)+;k8w){)NK1gJ$=J!-bWbrJhf5%fEaaxz)#f_^Y?5t+ox#J6x3x{{g#hYg$a8Rm|`e4!(Kw?W#ar8puUaX6hK&A zYo4ANaJbF2pwG$0v>o@Bbv10WM%xkZy$<)G9~~q1c@5wjO0KS%vys&63xnA};7}3E zR$$j|Yn=c7QQWTGb(}TJSMJ+mPQDSIM#Q@YCLkct>b1ZMTGJY&e$HS7&M}GJi?cL~ z(16W}EDo!lAZS%UMZbYxrstq@@J%#$>~3Kru0nnJ_K(n$`MD%JFRvwO3C=3MMh;#| zc_S{D;Z{IC3@N6Nn|A{DlsAN zKo;U^XbW}UOaJ^tT3Q8wfHfZzY@uk#;Y?C=Ivd7VEBREN^#tGW7)t-eM>%xZb>|A< zv{}}@`dqe!$arDN_s-jWLh6JwlL`{n_)Y#|KOTR^841*9xcF%-Hj_B*idmH#+yh} zI)Lz_)nOarBc5HgP#kFk6eGYCjP>3oUViVHxH#V>sS3yI_fyL<-dw9Bz^Lv37AK>onO)?GAo+NooMQCxnxXrc5s9oTxPOsByRGR>|utdBgZb*bBmQK6w+* zeZV%&9iwFpG2aQq-=Ird$#6Sh0qySSCqa27PAKFnGISL*SD# zm5%U~0=MFnF@cFKQP8OX!DIH_CKDUZmGoA-(_)=18Dv<>6k@Y^z5*$*WaNU~lceMS z@E2>-tYzLT>Uy{Ud;DkuFW5o}Kpzfgi)G}Qg;(SxJ_5zz370kiZ6kq$(J zy1K@D-OLDB?(LOvnmAv2x8k`5B1rneeqVVHI;)%MmdTgdRzLN9o_K|8`zQH|pnyzg z*_cmp{WhFMj{VD$RYV3eYqYyq;J1Nt^1$?Jg^uOI-UJ3dmMr?uS4l1vGmTZOPz#e! zqOlP%c|d191K@L-dbCa47`D>Ta|dQ^Sy|QR#?gWv_{1F)ZsnY(ja$bpzXh`WaoIli zsX!j#*GHxb)k!z(Sb)#|EY+tJLU`A=%^hkVcx)EtC?#8@Emj6-~-j`_FJtm z0=D@l&gmZk7UQ9%-_sE}mjbmk&)una47yAHgx9NEH&x+ncs%CL`gRz(0!&u{08*f1 zx$87pK4-bASDAKAumspb3{!V20nkh)`$SzuS8FPA*7HngX$en`moTI5ZL49GZG9~V zdL2|h8x&z3u!>H{HAd9q$km99QekX~4avYC-saxsnGGN-Ty3cWShFOXBcEwm{Q{g7 z-iqb@eT!L_s{0S-7s@Oc_ilJIo$0Be=|R{!I`om|vO~PfkJZ&$I$$89F12SR_@Qpc zakB?Hp>$V+tOekZM}y8H#ZRXxS;Cb$`eef)-wmheEM_yHiCi5oa3Y%*bab^UjM;X7 zw|U98O@b@v{S_igF%9E0m0iL7I3lDz*)rUbUM0HF=`8-1@8!T^plm%5rvmaQX$N_V zXD%*#Qp|wFvcUg-Xm0X3epch(_05gNBxbxOH%ZS z;6{o#GHfeO)h6HpX+^Dr1|obTfv?VBd%G7hDj7JU4s?>PHu2!^oM zqZWm!8&I9ga{E~+$c;MHRuQ~Bq7(7dbJc2L);|lIQL$3g(%1_@BQI8tuL8>j2s~B` z-V%9H*^c{G5;{+G>J)vq{PYbX+2%?vv#w^B)Iy zZ8FD8bju57y;p5|LPzkDIfoMJUNuW_b-?&*Xk4#mxYP;S;zQ>SN_ETYSeA4=LMz*v zQU^l6Pd>m;{Iv5N!bUWp4G`-#!?xSu<*o8LK6+$*UjHPWP5G7X8)r5I)HyOv{L?@D zQIHs8PVBE2$R5lb$RWpH0n0IKSXKD>@+Yw!hfZ6r#Wc%DeQ~+!A9~X~9o0@BPwiIL zV<9mcU#-c<&wyLFN-EPe#A`CQTJK!Q_Z^(dYeufhQZ1s2)EcDH7b9~4-RMF@fu z12>MKN6V-M7j>;m%_3MKmvC^vO5>1Hc}%Wmh4GxDy{w=CN~6Kl{uo1(U*R~sWs1er ziu(av7Q=jidnxxiR2IT?510c8SGNEbMwBG0@??}Xj4_X=0Z%cO?2b2fTT)wJsU7yrM z4r9@!LE9H?z5Y8QLd1{L?vq5JApiby9nXTSamN+-QR$#aHwhGg>D~YUOwnJj#u(;X zU2QfnA`mfD-G*Q(*{m3!x*s670{uMutjN*1Q8aq`sG-^_!IQmU*P_z88B!_``K z)Yjs!@ZAbKjQyWZxeoKAW5I<43wMPZQlSKJk= zXu`~3;|g%uqEEUZ?WA8mWhzhSgQ2blqN1=F9;SQyaZT{HMVNth1DL>U;ELiF3htqH z$IHl)Q{M6jaGbd@tS3R7JB`dkO#5ygO+WmNrfs~BJyKXGUVbfp4y=W` zruR@1!RVKeoK7G8Q%`!g!$I$-5I4O)^ITfCgsj43hJ)492v?+tg2?hxrCcH_1;7*T zA!lR!obVsWjY%dT>w zYar3$I#d6rtz&q1@9y;w6BUUG)laeboG7~Gmw9m; ztjIWAX{_ne+3!!Iwzj{xzVB4KqiF0{)Q$z!5YTScXbPJr3&>BXg$T-+Q;xb-jgvGX z(l@}CUI}<$-R}bd?oFU?D@YcNkilwO_H9W;hG;p$Lg_Bo zezmtp0-Rc}ZhS4}#;P$>PxNs1#UmhTiQ&dy-WZ^-R_}(3nGON2NS+2p|DwiB+5=$_ zKzNOb{=j>}r%}ggpKHk5H87w?@osM?{Kijr;84O`fDHQ-JN;>#p~YReYg>8?Y1RLp zvS*qEB0uINYtoDadB1yLy3w9=c{IcFuOKCCU0KhGB9tg217F+EagK|A+b2hYuXOah zWQ_K4NJ~v~@!{iB{>Og3%k_DiML4ePh8XXA52{bk+gi!T8Oaug+L!=?!ff*e6_G5& zY3EP3|?HIsQx|W0N&ZrUf{cfA0#qxXU+VttZgA}lzDMv86HHhRhPv&%^xJr2us_7 z2<_ya)^pDns9tYx&>KFU;ix-C@*XrL8?H){wsy%}t?kG+4pg{6z-;}&UB0ETr)G%X zj)CNik&x!dgY|piUDi|GK>6w`ZdGyYqDvEH2-O`5qF=~JH>xpwt(LGXn1`>UUp5pi!C8@0@G!1!xqb$60)AJ%eW56r#@Pr+UnK+WW-m95#5?vcehZ; zrV9d|kP?6cn*(sw>U^@FX*cF`9eKg5q zdzyHy-sg*-siu9VvqPCrGqanFJkM*u7u`F}tD0i-c>q&p2a;sg<=P=8E;O_LH*_~7 zABV9C2mZB>gJ#61pu)+jIRMgc5*PG9=!H_(-ISyt{9$AiL5gb6QMcnx+aQ8yxNhKR zg}zNfgvW>v=lN0~sblU@yT1~6`KzNYG|gbsFewgi@KOB{uXX~_)gRR@_yTqOm*gIz z!Y4MvfTN}f)>!7%l?AV2ibao$h=6-KETojHX|S4`1-j&HBagax-6sP; z63!>(AgA=T+QqdJmb4*KC?2c}_G!pd>BjM;1TU-;L#C-V91*XUSD37;9-E_N%fG1D zc8Y2TM>Yducsqdyi`m2Lfu3;4gfA!!I;B65>FbxZAUoeNdc8F)hkp5z|NO@ZV*-!m zsx2mHr{z%26#z2=5DS(g1C7$s1}T9|TlMkOwKc>_98g1f^+95+0h(JdzXhVj>FSDD z^_EgEsD%mYW{rK_2mayw%j1(l?UNG?)Ck!U3_L69KV3#8Zh? zr~QejPwOGoUY`hQ9&c|AYjLN004%vyfS8BYj8ZvVx`hZ5$U9;g5mc|#WYX@^&no$} zjNAULAyOW~;=*I7OEx4;FIy`_f!H?T=g$(v^xb<&aT!S4r)|Hk6An57!0D4%7js=4BZP+kBj|wnGhg{zm z6h1#}$qx=k_~^>|ij$SYf%jpmt0u|JGO#ujQlDh{iMh12*dF$JKa<9vDVEMd)4`A+ zfJ@egc>i86=Go!-cBWmP(Vs+)zAmz740RB$Q|e2Y9w`RgW+6TRgP0ldS{v$t#pC)_ zYlMFtWb1OgGl?z@6RTGAlRk_T(ufhU`z!gKo&bmvqR#Q6RM){C6WZXWvFSdbW;3pE(N7h9rI4(%#CiFiMc8T7E1@LFhfGSMNcd+YEVE4|9w zvT5uNwxx|XeaZQ@#i}AX6oa)VRre`QuU{&AXICp>W=et1%)y@O(EZ) zhZ)QyfBG+Sk`tEQaQ}zFPY*Z4xA)>vMX^315FT3nkM1?&nJ%03WloMJVo|IWjhapH zewTyvIn>n=mWwU-8D@V->f~BZna4QK`}x9a+3qw8G!Z-?Y?6Ns2%oEpWSK|fQ1S4; z%t95h{D_PIsK!x;m)F;$^R9Qduax*Z zL8G9ZJ~;PCnTABf#1v7S=gijMGicr+5it;vkA`^|IFssXGlZ=U2SOvdWhAhlC3`9n zB5E26;(%Il@h@)DAMR=IE~mJ=(0akam%?E^8@7wV-7O;zK@kPNIJGH# z`LS?|p#hIt3P&}GD=Bk@po4PnD^3fLqP{j9omReK9>`Tg;%qc!oT^PV%5<`pu$8x8Q~hDUHF1%ThJPXTLzz41d|uX76`KJ zi5)K`)XS^VL9!tp&!5&(##8Nobg~LAkFsB47%KTBOaRWSX^g#*#zjDwqWPT|Z}olD z-%jZoF$V{=b5IkEDF($je>yQR-|Vo|fXcZwlL1F+Ur$qtWs26=i0z_bW!9L9Ww|c` z#e`p?#1uSblmS}L|B(ps@$VL4H>3Kz?n>C_a>g^c32bU{IuGZYb^hUg$br?yK3yu@ z@23ORjn~{Km(bYiv;E2Sa;@>6pRSAHl#_Pn3 z;NLS4yqF~XBL8yFu}DB)bfRQkcn))+03lf>EQ*2vugA~YiUovdp0i9m_lux(omN|dJkzY8TCDr| z^qcPw37M6zy6{>`Q+#R4^@F0YP+c-m7)CkY7VCoh=i}1RL_z+2gCf6D?N;z-0Kf+d z1%_=)OW9owcHb3tO2gw3oRPbGEx^LKhc_rT0VLe&u!+S>8owbR2KN{MD;gd&0Y_*J zCd|$l4*bWPry{b2F}00KLCVp5Uf5naqr3op4E9fkRmg8x*~=E(M{DDOA%3Tp$+G@*{MXI|7Q~}G z%*P-v?)B(HgeAL&&7oT7U~A28poL*%fe?n>zb0RGiLP@~2JWs5_ahLpe-2^`J`;`C z6cPrRmfo`^bQy+leu_8D@ql;3`!zt?f>!(j1aVrsC2Y%qHI5&#B*uxrsS*#1s8*!B z`-;`UHGgEgz`OMgtP8q@6w)C-bd5fs2LE*3kr>KsJ~6W4GL!vM$eEjX=MV8D{EX&g z)-s1~G@#R1Ia{5`<4IYkpz^i3QJi_!acsH*a41%(U`u+$gkmP4^v|neU{H(14dmN? zAl(*+z+zN+z-!zA@Y#SyUIfq<&dP?YI!9|XU-ogG^!217r7QIx$=F_JMF#)caW#ChR? z(ILDw5e{n|@<$V*or~f}dQUr)m@j;YJn}k^@JLapH#fVyQ|YMC-)$kGp6bK(x#&~< z$v%BYWLw^DI4DE@$3G-j7C#9B-M;0j3g zRYDH)h3hs3(yfOwp~bP#j43IErGei!!XDfLNAsl2D?)OtS1H4af~I@Y$rC>No8rCh z7AAy2%&-uAr?NTtY<4U`!?q)+Mk55GZ<+bz)L6Y;DMy>NAgG)jp|csbOWTiVQWFLU zu7Bm5D>^auphYf=#Rm+jC1eseRx?~=9CcH7CIE7MI#g^sf&dApvI1Nn9?h1+Hil}S z2Mef33%L%`sMWRzxjIU=11D2!|1Ij9{{^d2ftaCZ@*yj!jiyqo)eKOFhSl}VCo+Uj zA4gkE`07di#5p0MO8jrgb~`NH+&kbN31#$xdAqv`qn*t#h3Pv4^4=uxTlmvCuDBU9 zX5ppUd+B`Wo%jKrLB?sPAq9b!c1@WVHr9BrNKF~qz_jQ#Cvb?(J9e}OYfh8o;Q9(t zqQ>x{Z1SsPWl)xG3h8=!lF9LPbejgk=^?dju)npqvHGHwjel$D%ue#6DA}-67jqU0 zD;*V>s+ulrRyfqNWYjS&kx2@>DjAUyL3#V>(EMvJ$uftLF@@MWBApa7sC=(3r68qq z8kU;ns8xDKs#ef^SGu`Fd@rCq*y zs91(tZrT4RWk;$;P2GS)Sr$Qg=>;JG_96>svQ08qw+bJm!s2gBUh=>8E)s}v(X8t? z(T=ka=eX{+Foe!y!uhpN6qMeBSgVc|lczx*d@b?OYk^ilUj+j(-D{*Unesr#Fm zPPavo1G*+tfdzr7pl-7gx>ezj7?Uy4SXiv`-vt7sOQ@k??UJ9f0hVzl|7*RCBIRJW~-$GAp8A0r%}$6gK+67ur)%oMPS2>`QL>B8tNQPbbab?VQH>B)PD ztkTwF*+qjrqQY`m_~;L4qNpk49Z{}m(0%vXO8<}$79>#-U} zxFGxfc+xM9>wUDc2%ms&U0$8C;qm^85Bo?zjmj()`W6YRQnGF&&Po5yo18y`g`(zV zbmK~^HlXG9NWGV3wsy=&wM`9aS+9ny)nIYek`{inmU&QA)J7;~FD{$fS7W1Ca`2B` zUo-O;`BkAepPT3=RE4kZbZiZ7>y*}gT6QutpV)1Xqu4r}rjs+jlzCZdepfDdJNHAG z)1^=rI+m1F7MX%}n4L^?9b_W^1V-wqLRk#ohYJ3GeO6nBC)%{3NBir4a^oNcgXw`|IT2CH+r#2=DupQilUk$ zlAZ0U5JtaaF5cb}CkT_mGZFuyIg8sW))Q=^+urFhW3{b`$x>B?#%3dMq`)Rbk>80} zm50Ey)xS^u17C8vl3rEXEM%h+v4(o5|7WE5=c@N!a&DJbA#C|mfq~QWG4Tdo+o@V$ zw$S~)R%A*d&>G$6au4!YkC@LHnlDPS>nPKJyj|3xU(&PQY8C5_gCRUcDdn>UeQlx4 zQZ|OECp?_Xb0fPg70p*l$`9!+{g4r*hV1zg*$(Zv-xzWv@8muxFR4H6;8#!!u!{#( zZ6G69{WL_bdeh-`b#1()S-0oAA0{1gWyO%I`lWY78k%xADZvQmrt&qKSq?PZq)}}9 z;>&i`gf-*N8s+?`<;fF?pq3>!;!#ObOnOX|$ck;_hw_yK9H(5utl#3zp|$7dQPVgw!jbSs?nAx%Q4m$48gB9Q*HvZ@FtNj>3EHXhs;jez3%ryJ!* z92@YQ%0E~Fo)!Fo`wN#xL6$!QLs{?uV)-G2-<)y9e&3%#o}3hINDykQ+GQ;v+VjsY z5p}*(|Dp4m6`M_v)6r2^$Drw)RE_VynnBipXJ6%;FdT#Kj+@pnn;5*EI8p!ST-wJh^`Qt8D(Dv`>VI7+!08CKX7=Yw@|i>(t@jc@u;t{K+SK1Y}0*eCfwtMBcg)8>vVOgBR73cez zPg_%r9gFGaVc53xN6Ybwb1aXrGRXy}hP`4OZw9cNtfs_bJ%DrRHLf=ppFfN+qeaBQ z*^-_X)_lVx7DD_3vG!RzDl3s+uMrbWEH9}=S?xPoabrMR zdVWfqwQ8yR&~NE_se_cVAg`6;x3;eT7HV!m`l$pT`p-Ixad<6hY}!^>R((d*$_s;B zO=&qfMBshImF+qxT$gUOafPra9=*g)=39`kHI<8EyluM&gCKQunv_jDzECq2PyZP3vToJ5hq<9 z!o4!SR8T>viImc$p?~iZ$yqJNaL=f)@bKtN;zCv`%n(UT@6;YjS8S}5uXYmSmAR?W zXro~rccb`~%FcfNn<9uPu#3|@`?aGOR?T8(JI&VIW-xd8SrYBD(uDPR5|XE9zl%=Y zc=>XqtVirbBSFbDT@O05K|Ae;a^}`X1PSuhq?u0`Y!DWMhS5T`vDI`j5^yb2wfkHj zuMo^2u0EO47r*TPGI3vXdtxoFnel>ym(XrrAZILRuViTv11-SSXcP zPsSVQJ-Xg^rn5TgHqkdroym3|3Ak-V);j$f*f002KvFoe_ZRhOc&JwvnN%z;nQQrN zHW2UU(*e~Ffy+#d$LL#ntz{5XbWocmk*h@^GBvdr;1?mQTSBptoedN7MMogM z(k%o7nt3kJTmHG1$uKb3t!%}h1BSZMi%d6C?SvbfUbQ%O(x_Ue0(}m=%v-;}tS<~q za*X#?B&Vsgl0!0v(v##GnskLD+_`kHP1Uw%sGxpQtUJCx-H+E!Z#BhEXZ|)&f0E;o z4soz7Gs*|`gq<*8xO?rzS$%%&piEts;vU;K5Qc!$w|iLCMGSQQBVG};7B%7`$%OsK z$%fBxH+xg!g}n^Q`OxX~31nfUbIa?vy^8*Nq2PswRS0>@E0P5DSFdz8#im z7Dw8F1oWKUb>k#cGh0+5eB!w;H>TAO|Ack6Ex!uFq7yxXcunHpCb7LeJ)Gw}mK~!y zk}!}1-!4{Pq0;`3XVQg5MaE#6!lF!kye)lL zb0mG=(w=o-{ve$;6Fv{_V_5><_fFZCW{81!M_T#r)B>7MTk7=~wt;TAIQ3U=E>EN;^B(q8`M@$8!sGPq_QWB{cefPfb8cz{>k3 zhsN|jpH%@`XhjI)bEEtOd+3q)7azhcosESW(@iZv_F@Cw+)+N;MGvyqbrkk57%O!n zIZpkGch&boe%bG4CA?(r;4r&6B|A6O62A(|W0Xax7h)tB?6D$4Xt(|^&H_!>Ocg7z zA9?+cx=?+u%=P-5*~;;!!{TA61EWZ%-W!k-@Os=B9I^51NAT}4vwWKyKB`+ zgH^NiorOC7*7#C33>?aTc5aDvXR=9m1;Z?PzLP0ImJ~ z`2wEXuXXXp7~fA2#8N~J3YJZN-Wtx79!8xo3QRGlf z#M7Ljzyj+k(yD3M8KLv}{xC64oLlV@++?Jux&~m}!(F?MCiC88DbV0^T9KMe$C+W} z;&Z5HTyfV0@}gzh96uTa6*lt z=IPw+)!x?J$Z3rY% z@QWS;zxfDeJ%DYRFrrPO-X4@!y~eW}gCw+`(WHBIIVRhgC1sTDOMREK;~mAPd|k5Q zEYhv_bM_&BsBDJHNzrIoVOh7miUl$7;^ihZkJ6F2OXLgPu>usH`RZ@D=h=#b)YsZZ z->01C$G%aRb9uiWsA|@R{=%1Hs!%Q8`CgL9er*4cHs{Dl7`bHn>Y(iK(>zWo_Gj(F zf^C=bS+lUzs?7=bnBl8W8Y5czIlW^Vq1rvD)=?sbsR2pkl*uOr^*2e8)!`H)`RxN( zxu^9v&$o!v&tv!o3W~PgWq)EHpsmdl#_EtE2W6+?JD!S=TSo41$F29jh=2gVAQ$J z?jHQ|EM64Xj=%7UU-KvfBw)k|;gfyHWRk5kKTKO_z=a6MG4O=ES@o$pJjL8UjU0#M5 z*DFGtoe>}Nq{`V1lWe!XO+CZ#7zfR^_voaDY-uhSFz|dvE=h>oX76vmL?QYV^ZvTY zaKB-@5{QD^?vPK!@HPMD>f`oe(I1-Nuc zr|`Rz65b=$|rar`XQpW!S%<5AOASdo^l|t^9+|R#u9IU1{0^+mu11Y_^Z#$gK z_m^WfqSv5~ctr1xak?0m32wafo>n#-BHLn1OP<_$APZAkVK>7DqV%bKU;Skg6Ya@Q^E%2)FeUZ zcl}X`RIy>1WD+W6?y?k8u@BOzn0-TMg{r*YoUsSd6*sju;rs&HwcX&6**o~t4JQtUgyz{3vYu*6N zAmuuXvuPAF&tspH4jRV`ZdvEh$Fx)=Y)B~34(m{Gr|$di zl=X2gBMJhR>B!cvuN9N+2l9&p1k+e5`n2WzG-c)aJ~$FxZ}R^+M)r3#GHkxzalH+l z>DV&H;3uIZm?1*F@!2LM3H=@FV-dcxI)H{#w*(esWo8OOJZ=!RZ*va*6OL4uS9 zwl7qY0XlL~GJbC;(eY!mRAJ;zupF6FK4Hlcvwq;LGki;a{g%_1WP9lqma~*~#BrA_ z-Y_`pBST{)vK06tDrh`RE{2H~T->YH=;Ew6AquKU{yAl5u)RgXhxVVCx~&$kia*~T z-G#>2E{UW#-7m@6Y?wFb1@1?fVbKjiA;R#@ja#`9KE^u_80?c5ymu_b%$qVJ`>zK9 z3>IYga9pcDYgwQACBMXh<`b|7Z~B6%ga{OG6%!j98jw}u1OWW{0Dwe_D8?x*s45DE zO%(`o!H7@Ui`s?cr2)y9Yno#Uz~5|ah=ILRw8Pt3k4%#E8Rl^1f3*O7e0acv-vgxi z5^b1x)8wqj@vL@(T=hU8);0wkDXkV~Du6h_ zP#Kb$^J{vcc|1L#r?d#&ArmTmlM?s(`^crUidOpq0>?mK6vkAs zJVDnH>o>;fkq`epOTHjL8h#a7cH4{>=$Z%r*pqqy_QD?_cWc=pd+2zC^}XGQ*yK6= z`Hloymc3P4HFFCb%)jb>K6#tH;U-+8H`y(Ukk0?ChPkZp;W163Im1iS{&=<5?6@*S`#zCJ0h5_geuIaBm_o_gRn`6{j|F zE1VNh9{xbDy$4H z@6OhK&}TzU@HWBJ(KpZf)E2fa#_sDgvFHcsW^q4z7+&&K2z92u8CAoL5OncEWj|BH zsISQwpTt`J5TA&}X41(8_ux&B%pCQH=ZBLp59%ZE%HWvQ*Z%-}p2c#r(<`0O^9bAr zi;_f>|0a6GN|aeWAk0T8V$zkes66SfK4sk=)D`e6MZs6NVO!}9_coxba;*?ZZzhJ&yPCaj zpTMKrWiDvO@NS|Cud?81UW`uzHs?GdxAHq>nOHuw9lV$HaymR$zZ_j! zR<%;P`-e{RH&XAWS+{Ufbve=4W@fo&Q9Ve+U!xYbvQR8{*MJ>rzSqe6RzROaF?&?PQI~la%Y^R!&>eYkf-M>$5fn?0t+;U2NW%>wa zZfDH?3^QR(HZ_cW<;K`uZPDxXK4(?%1~F@7D|}6)jfXrVRDn$X(|RV0kK@WRFraRO z30h0A`#Gl?u&FkJmoyxe39(%;y7{%%ZK~KCLCKOqtAGYV5XuVBsuT9PBM9oISBrAN zXPvBd8T$UvtRha&ZXm_)WmR913^y*D=bbT4T~rr~E45|$3!4b@2zDn{(*Z=3Gm?P@ zHV1C)IdEoqUbb29f=>E`)W4-JlEHXF$<5)9&5oOdzp~#GoSzN9eJ$wn;~-3J?*`xH zh9eQcPt*cfASpAmLgBU9JqNQCo<{B;B%WnbVu{MGXAr9vpQS&9W)w+1{oUnD;tf{V z2`&TkNl~IW6Txc-1fCU&LMj$D&zsYNX6wep05@Qe4sjI_kR_y~ES6vfd!43mH5LFW zD>uBaI=#A@H3vHU&m>^HhP{RBxaqvxAxLmc2Rrts%_6z)qcg2zm*=OwC^q+0pZMkW zPQeqtB_ugy(Q8I6QOv>4WcOld7I4#4UgPQTFd_L7^)jqOuC$t^iYo={|!5L3lGOy`HU)1INW;gehi#8usMN2fr7%Og2axxQ>3 zjr;F(bE%sWVpF`%UC){G1Ga|Bdu_q0#)6ZOt#hH16`E1cgdn?8t$jgyHARH0^yfNa zo2%?80VBP4YG<@hdcMYao4JF%Bo?cKPx?59o`+SYtmN8WcMkc@4dXttPaXxoz5y*L z+yQ81IE9G+;*o%Df4W{W8BG50=z6%)>_h?LWFj){Tj?YOhb;bE|NoQ@%#dj=E!n0T zMngmgeQNBr@p>cKcnazGT)=9Qk&%HAygqI{WX{nC0WyQ+e^aIGSS-?%sz&5Y)^wz2YHWl0Hur9kdG=~Y}@q)}%F=MH?mquV;QT(^uf-!dZYaW=m z@ydR+G=N$=6c2j3ByYex&m;_jqnx!DozFTg|o^csn4wV3oc^ zA{@6HAii^fo55WBfx$SON5hit6)F+I;O4q<9Zm$TIm**E7i(E3HS5TQP z#{}wna0tJi8DvKsvqhOlNEf79sgh`4sWM$ghC0&s=Oa34@LYG6EJ6&!mAz` z%5IuIc$fCQ6D*OD*c(Poru3yO9DTg4tjKL%>b{nvHpJjW8Q6jYS$jl;Nl95w>qAmNt z0`Hezp_7(j=KpZiBoLx=rD66CQP=BllJt2Yi>&isxF5AsN)+8%eBSn~&vFj2$`*)3 zFEf|kkMBlTwBS!CxngVVe2aGbw;P8dVa+|hw#{4Ac{Z@uhXo*7Z?GNvzNu#Y7tFt2 zsaaNA+c(`zGGxL#R3V+voaeEYAgbn1U-gAdSsRK+!x&r_mV zGZ7di*X7yJmQC1OB-T>cneCJkH02Rz)D?%m@O`Ux6v`ra2_?nvBN0Xj;?%^u>PYV@ zj~(>V7tC(@Dp%e5=P!eB%vP(*K@F>>@Z(r#zhy)G^!Hiy!O}*I%CcN%X3P-sj171i zm6UZ%Cmyn_GM}o{GohU;Z45IcrFjYGup1ezbav!&_4cwHc@2hcm!u>kZKo{zh);O) zmdf+9Zx18c4_c~O3t1^g%z{_Upg0DW#w-$8>B1$x7v_IR-s>%$xd1y8;^Ar5bHeUz z1|@>w?0ZUlzf^O+Xa{}d0!`eGUYW%6jbH=)=P{pmJI=A1*V?zJO0&-(piWZM`81)= zA84xqROWBw>)Amb705w>cC8xFo#9FtpQ7rE8rCwC$~rh7*kVSM?o7&gfP^V7tlsg~ zVmH@dYP)eck??Xg_X};DG4HB(u)r^Q?C{)Dvfp3_VFqN;Jo(H+vSb490x2@5b7{wF zMnRAdFGex^^I4rG1E1?tBr2gdO4$VGXc`m7W|5ciqum2Aph-@9-DOdNoxj<;c07?x zDk?@*FhL~p5+a$zzt$aIq0yt^<_9xvrUhdang#m98#A9Lrw zUA3)?(u^4oXKf=Yjzv6M&B~xNs9?;@W**UIXHS{%ZCEt=pdb}$?_}%x<8@?`OP-5P z?=9AsNjf!zlT7Um)&DLdKZ%_lZ;QSliKBHQU-8Lu{sA+Y`cfs&0O`tT%mT$Zv@bfF z^-QuXQBL`aYcEdJLovKd$x+SkglV8Z0@b23w`~tpUcSeXj!mH`bch8>)M%{1-3To0 zq_BPiid6svx2)?+YCByVmvkVS>O@H#X7@tAfc%7L`JM*~^BXl)MRM{ti5KmHF8y7k zfVomo&GZ^c<}80i+;*=n}s7i^VyK3DZV@7YV2>ACN^x4Fa7*$We~ZQZew(LhofYUVdU zIlPel;Aq#IvQbte+%bUD6_B1JF>7L~i>RsGtMa%}fRef~z#5=wMahecEIXn4w2TX1 z5Z)h(e?gPut8$rvff#}pL&+B``Y~->6Z%$3nhhVFs2*6WO@|W6$Y(KdNIJ(dvg0p> zUnK4u?PS(!-ozJD@=pI=m-&cM^=n3rx7}po+)bsH<8`2Ch=xkcv+(g1hW@X2HdM^*1AN|KNQv z7yOw^HM)ZU^$10pAvz_DzbH%3$RRcUrO0k28jv;;TaDkayV4O}m{-?gfbS%Chyr!` zJI3bywPdJf*{1*u%Re?k=~HjyS>(ouyg#{tUaVO}_q3ug6X$=XCN<*H$szbe9?MSB zm4S6c24S&BpA%aqK8KE~7u42*M<+#t4HWs8s9&A(orF_?(z|7-#0jbUq;J&jevFio zSH=~WD>aK&vy$-nb_*rp6Jl?X=N*`~3B*s^6dHE<6Az}_Ny@hyai$~{!oWzYn_3|p zu6iF<5ZiII!3(ZkpgTmzm1R?|ZtzOgaH=RI6G|y1o1Y^=9*6!`4dKhQHhZ8I_-nVn z)I{O5Do_YwLr;Gt{ zlg*Qwy%MoA2M0FFo0qw`%ey4L#$ivms&mS(v*s znVm%N(a8dEW`u_TJ5+CCIuJX>`To)^!!E2fdz`sVE?{)G#~ki1;(xM51(oOV`l z7{Zj*juVoJIB|K~zN$Uy6EX2;2T7MEzKQo-?#mc7$~C9rm~#yFHA|B1e zWuQ`+ES7@V_P6}7E|SO14|`BQCycb1KK=V#k!P`Y*6{;0w~~GXi=Te9o_AAKytK!% zc=~6*eHecIo^PAD<5YWeJ6l_dg!2e)Wj#6f?lYbm9Wtew!uRfxZmc_FLrRzt9 ze&GN0%)wrpHs4emz`z=lCe^{-ug;8sh|^l@?O$nm+=*p)*6Toyv%OsvNEyZt-~F&YwkUu~O+ zaHymx-AIE*OAia%(<|8rt1Mzjcz z1M%sB%4>85_jqi&bn=()HMl9R!k$8UfMn9(JZWe$2A#)1-$r} z2fsNs?E+MHbrP+^YYDBR$>V%JyYI~8lrYubLt$kTDRKBVLuiM^vI}WLqnWVs1v@Q@ zyE7`STxCDQV&Xu{uy^BEm@znYM4Nh_lL=^0kjuyf=BU^99CFBLg_2%G5eEzwy^+yg z=Fp8JLI2JsxDxzTsxn9DGn~hZEWDmQ#hDz73w~Y2zNFZjxHxT+{f6#IO#CS5@HnO0 zEC6%k5VZ;o3kx!)7~34U0bpwM*3h#3x_VY5rs=7->=p$AU@%IX_H6)zt6z;N-YCEW zDo#{TQAJ!{{{hfmVh}U_f5{_C)_u#ddip3m5fExdwLdIcFgTDgNGuKp!HbP}E3I56 zf10E{JG`qkDgb-Bw)#IC@f;&1i~a#l?!vEc$~#405Fv-&;T?A?EywWF zy82uxz*v@&X_v}SoPzFM)QrFDRuT6gkSFzbh?)CSq_sGZsRR&0TNa~K!P`)h#ro09k*;FX6Cxnq)IW#K4 zp$JB^P%eDfm~h$lfxy4{**{RXWo5JqYSHn2Y>g+0W>&IzI$ z@hq+Z>{pu&QULMz4K4g8j)G-Ebw2nWn-d)sl>1T=s067q+W2fH(CORqT;3R;`|x<| zvxZ1zr(;}xkBYO%l%WW+(GQJa*3J05f^O3m`*0LMFg?1#B_?-~5P7eanB8lPvC3@n z%rJwpJD54#j|^W2iv_+6@w**B*JfTWuQQjdavh9qB~X~p(&yMot#~t-ymuA3v}+2& z!vw0**HLV{$oldq3xzDr!uE=EkwYiis;SgJK-J&mPpFcUAMLjVD>qIp)jS#w3Ps>F zaMH>6y|6de?+Oo~-)Y`2&V(X2;%~(scByS;b}>eCy3h*c!G%{X^pR#Y3aJeY4Ev3W9NwvDgB{Q5BI$#Og(X-qu4wq5u=5Z*pXKhiv`6UN1V_x4kb7NvAwKfTr8#yJ z9-kQyx)5ppIvhDjI9!Y2)6zS-V{tGs42$(0ng|%M?h**TtT=h_w|97e>mi8e6d*i4 z=U{-1^Y%ON=|%c0Dte%kXqKDAdJCpMA;)2P)R!}+_}a4> z(iiy&(U_9B9+duuq0P8#-Y^Dkv+A?}Jkr>|FMHdEp$MBd53-K&t>2fjQ`)A>hBx}& zIqL!StLQ9`jutqMlaja3p&_}7&{i5cU!Hx)Tnq5~B^B?7B)VPWw^#kSF=CFv?K_1VOR%SB#$Y7zbfP;NUKm~zWOu$v#j$r z(zS6Dlk;do^~im`%8)xBDJPx8_5Eh~)q(m0sd8Si;kYb+(81)x=gcmUek)~b`$O8v zTf{Nsc2?66i;N|Y`}*i(9R=o7VzV)=99WkiKxW=uIYq$8n{e9*NVnMOh zhqRQ%(g9EYGgumZ+E%N}cfPI=zbnSNfmdIOGAHuEA}sF}HhFx+E;-S?~@qPMYO>_(byA z0{k{yNuVTpUF2fX-KEL}eZMeC{@kAK*j>BqI@W*NJ;qS&)zEF@F5F7|&oB%<$O!ed zD4~f!9#<*qJKdn=`=rJyzX4{5Ln_@fb3lyhty_4n$J)B z*Pm3k)s+q)E19C%Xz=X0CufxhM%!@~;Jor((Xw6E*k3mUyx zzBdd-=rrBZ>@#&K7*mQ)Jb~7_7^pTS1wT0}Wr7OjcQgx>9MTYG5QoJWD%;*W6yiSc zBhets^Z|y-=@Sw!I+H1{mdG%xof#%0c#V(!-E_eSQ}ut9sGy)Ai|Z{u z*>v zO9C&ITX}G>KK5JSz3?LV%K!zk)q6qfKM(*IKr|SPlarIsLlcp3yr}`XKf9Z4rvd$9 z26%`>B&fmn?x9D2Wh*3RDw^^iA5L~*IXWP$-Tyko<9|toCxQY@tL{GyHm|8TF=J4J z2_%DH)S&cTh3bWJFs00ql4zQVe246Z{e&3L%_g^z`{mA zu-`zV3s)fwN_s?EN^A|HdM)(@a;yOak@w9p(+FF`HOGViMs++RDVox&guWf;91Ai! zPQ|qzM8Oz6+XR5Bq@T9E#}8E02<3B{}JcwsVP<4{6*!#3fi1;KnadpqVTPUB;YX!#PZ$I@CjACmEKf7 z@|9cTss`*OHB!3T3&-MY@t(q36Mtj3RtC33iiL zMHG6_GHD`jfY7h5>#uVXYBAWfCn0J_AOBYiP&92^{}hA6l5gA_p6>}xDv8qI`pp=K z>2i+odmxvSuz>UyIXCVCv4Z}h!?mh^F8L%@&z;X;Y7)nmlUxa-R8v?f53!mkif^LD z*q!<0q5_?uYm*6yP^Y&d0+IgxY50ix!YS(v9r_XbR1EBc=V?Ql&}xFnLhW2v=QF3o zLrHTL8_lk={Ce9FBt{ilY=(mj{Qn)mccM14I+3GWjQ>`U}0rTVK7V-iJ*=jIR%a`D$4EU`XXCz!Fp2;i8X!_tlzbt|o zGBq8R1C2GVvW?xh`lQR2-jHSDBu?tK{bp$4_*cAD#Vj1#$`|O+p#8d&<}GC9c!PCQ z0Bis<`$X7MU|bMiMYszOY}FmMrGMN!PgR!`#&t+8IB!}b|Do{Bm%zcE-?_MGW0NL_ z^oP4$pZk9D(vF$m>k5J(b3Aw0%KtT{^$LzIhGw{J;L^ITUk* z$VY#Hxp!3 zFvJ5r(|iHudFx9KT}TU6Jj2RPe>f%dJoLN0z4S@l#_`RF`NNy-SB#?Lua{QBZdS#6 zG!3IJwuK*2m76?w;3p29My_9v>2~pTnQ{$BO6pdUKc8kq%)d!VR|~$}ZYAh6e*Yhd zokO+!(77aZ5TI@tU+j!)RU1Kq!aVZtU$YuER6M+Wk8o{SWYX4Y9@w_~WAfnW9BW=% z$HJ^|jw{4chmA2gD0Wqd&u|L5_;S=7W9A3FDxzfQ@Lw0>D@D(T+IBGO z$wtm(pq=AHHh10Dn{TTgZhq%JBT9bB~Sb)?My?Et{uI-1?|-^) zhVmL3pPNa1xVad0TQuteN?>v$VGm^KrG^WuX+oTcPc5j3@*h5kfm2B`6O4ho0k%l& zy~U!|hNuV?9}sgud%G>zauW)8qFU4~o=_%RaECw5Xe3;OIZgfJZF<-EInzkElep3J zaGm$&3mH1a`fP(lZXbm;WX!fl&{&5{(zzeWl$|nZ(K&(eqTx^JZl`@xbm8)Cw#n-9 zd-D6rMaXvn8$B?bpJrrIe1LFA@`D|i)E9tPyO&m_R<9BbxGh2=8Fr3mka(coiMGju zW8h{-x?Y6x?K`;Uw}#~3fF|Kq;b@$|PCUQTbsWzlcL@z&8M7hs#|-wCeVw`23I!e( zD&yrTVoT85Gk(BC^lCiQg{ub~ZZR-S{nR;r*Nb`$R17!<&|sVq1Og!fDI>lLoq-$n zq5`e;y<2t8)3ghjlR1loN4D;IP<77z8@#p>3YQaK-p{FVkOda-pz~CV2+DAy=n6zt zEk>ipXh*ET0EKSj2)Z)FeEXLLtGYW4=8GzAnLFdaJ(lyvs7Ncqg;B1xw>138u%QsI zM?8hs^SA6Z`M2VAxMd#WT=S8w$$w#g_S0I(1AA7w{^=;6K01M1*x-z3DuX66FDBUL zmiwY2-QiGSeq>3*fIHscnb1h(N7puY+MAtBf1merd2eh>6n+w7gnY7@-LXkCV+0+qw3&kJ5)<*j6YC(E_&)0^ zQ5M(^?eYdGxg2#E{Js=P{lvSI8~;I>^Wd-eJA2N-^{D3h_9s&-o#I+YJxmARS}fnH z_!Cm@khtyQz&fzjix{%-@kWVMgM#E18nhvN)Cx3Exk8}u#IFwK)N4^>lPOSj&f9r_ zWIpjmqLZ|HMfOjgNA(%v_z9NMyXDA_WWnUAOSvP9tUI# zEx4}xPipt~hXO=jY}*|{weN-AUc!z-jCs#7$54s-hpE`s4tr+KN^#fg3a^RCEReVGHc;3Ze(j+ijGshvcEN`#O=97>|=g za1k54O$gU60Y5Xmv3FG5&mD z#aRupE0xVF78o(#>cY({mdSII5q@2|hR*(g`?&u*{A)dtQ0!mx(ZpJ3Ds~X~u0rEK zgqq^LmuI`={Kr;CzeA@!>3+$J$yh76=555?J@o1!TlAlpoLso zr87aAJE%xrqakoPtx#WmxsU15{|X?$-y<9o8*r_aBN|nj_{Zq3wd&68=P{F-YWB`<7{ z);5@I{pp=HMiqZ*Ns*^_)6>TX{Ox%_zb1W@TS+VoF@38?$n5zEiT!3g;%V#`BNw^7 zKYUWtJqipq7GcKF^2;}?kj;u;h#tiZ+1#L8>hYqSP9FEfOn~eDj8jwe)8Czjw$p}A zAEkKj{7yI=^)kKjK0s#eJYM>v@jRl0>pQ9kBhqs9a_oQZZIpaakW>?vuczR7@et^s z+A4`ufNP*uJFla?E%JfY!*z9zP(@r7X(HiwoB7SlUX&QNRNqt46x@TQ2DPdcWs?Qg z6yJ47l&o(V8SCWCOZtWpZ)n~k25$Ahu+|}Z+{Hvstq7Zsb2bN@=&+i zf9br1imJ2hO%gE==qIo2A}#_&%?2p236o2C-00DZ72nCi2Ve_3l70}pFI~IXZ*$*x zWT?8Ol&SxKjZjvh2j}AJ>)%Upzx62d{I{>1tmJlQS7Msm7S#3G$4&pbMv|$j@nQQf zc;-MNLigwpf`^_pb#*UxOqVYU-1#bIc zR~M1FtO+b2SE)pjm0;{NmsP8^!_Db@>-X8(!8|fM)`hj)GDDPX6>$iz zcblN_3lXy#K>kBq-G1h@tuHJ6+o|yB9+UkggJS5dF*Qd|6<+9&u3-dSaq`J8J8PMx zQ~2LJT}PF>1!jG5FXCq9aQ-w#!Yy#$g;8!~6RlFmH2M@~R}LB-joE7znblSB~DeutRXd^xKrquZn~ifPKM{vooIT`ywm z#&THrDYP&QA&ds0!$9q0I@=qZRUbtGt>2sXm!kVMrY;;6%d>90p_Y=jT=S$qK-}0; z{=mroI}%JX>drAt;&}KRzwpSn*(itceG*ytLVBU8d*fi7OJ3Q5ofeJt^B>{PkW8QJ z5&TEGH53ck80?MZ@+v6gh&H@B?aRTMi`Mk~d{m9LM&qZi%X+2`h${0xe)iL;VnPkr zM>MEVCH{@M!rs3OaaK4RUqM3kJ*pqibUJu`s8jnP;6U8-$*XMW-SqvdY8sd*2teB9 z;H!O6PF=eZmTa=XC|JCs-*h z?Of*Y`ZmhJ^+uJfVg!xq0qdv=?jrGKMetr z;n?G~U3#hrj7yQEH_-|i_fjM!w90McBy4kLtJOZ4ly(Fw(QZdP4?T`@m}UOqn8{@` zLBi)0F)<-GgV(088fGV6V^JuvDI#-9g8fSd_1eN&gu3nK28}UsL31*`Rj+qbO)D0q z_6At3yw>QDhoecPOp>aXlxIX7*{BX5+rB0SEsPB^uYUSpC?oC zK0ID*V(#i66DfKKGrQWADE8`19sT(j7ZF3^A9-oF6zVTar{BsHKAFgwXa5FgiZH+( zf!(_x%WD%Z0g-2Qt1v>gkRhVwhVK)$Qd?jihUO1l8ZmnnjUU~34l09YV*L}s(xD9U zTJ}}nj&7Z5nl)ywAic*~uULkO3?j~fiy{(YyAb35(e#yJRdnCmbRN39yAIvmozmT% z(hZ015~NF7kOt{)Ndf7Sl5P>?9iR92|HK!rYi7>gGi&X8-C;MHEDO}q!MyS_mIlg- z;J_kj^NOO3NCXFKjCD(URaspAM}rI;%6(N)N+(cwGgD4re&MJjOCGi& zhePw`01$5C2dh2+9>Ul~D<+m!OYe0+AB*a-w4hk`<(;<4bjIJ3i=goj3G6RqS2JMd zl^sUI82ac*d}6d6v%diCA<{JRE7ty|MgG2}Rh=d{Ll`b9Sf(`ybvc&|LJ*6EL(HDR z0>kIayc0`8AwQgy^81k-dG5>Wst_&51+rayp!7@^%19v7US2_?SnE5kM~)i<*01Z9 zknb0)7vheSG8-a%Q{4p<3~6`X+DZ1DWACJgRn_BDNgUhlElhYXBvjOY{%!*6Px@}k z*r#m)??7^uPvI8X-@{URHb5h3(Xj^al(>*lW^R%~Eu8DHSP|z8jjI_r7T9=Ux9sSH zeg9rkfhy2vLxA?}sf`f#CuOc5i6lXIfDT;J6JgirRtQq;Tw&| z-=KWo&Qd!DrGS$~bq|jlYt5u;FTv2BH0REz2?j=P*Zel^kdxclkqFb)c;JGZo_eo9 zRI9?GD5LgH6KfAl5Fx>*>TS=WtKKtbp5|_bdJs0*k!b+3;x?1G z^Y`UCEbEDhaM5UO0d&{ znB3n5`k2LEim_B zMx4q|vc?m_*PV}tc)@Q#Z5lV%upa_LAG6Hi_>SLhl^h4AwQeely4PO=S1(O>8G#*D}Jpe^8V zfHP7jtwtzSWGx;^Q{T9o{*X1Ft&X1J#!^X*VJ}He`P%^&xX`hTL9&8XO zc!Q*lZCm_IjH(v3X?8PE!bD+*2Zg;oPA3uJ+KBP*pJ|@I%|?aQY0@C4fEUn5vf_ieEkToe`Vya!8h36I+u{{5wD?S%{)iUtGC6Qy)g*`G@ISN=f{ zKkTFwE&*-8aAgs(m0NQvDMcu&&jd%dptiP0x1IW&k z2AbtVp*=6D2*9Nja&pJStvk$B&O5IqolxReE)1Y3xD&mvJl(qR<{32phx34 zNs7k9J>U)ypi~ZPc(kL=+%jD+FI zkbJS7BF-)%{0(rW`cT0fa{umwdqz1tY9<)V(G5^ccbogYzN2{nD6)Q>&gD%jFyBLV zBM9XW``#SA^^;eM5<<)5N+ByI+|5KDJ%I^MxQXNTRrQ=hMO7qL!T~xY*rJ}tSXQ64 zWMBRiuSn|yGe&z3(MbCxwvv@d$Ig)z#*h-`?7v4v5lc61|C^?HZJ>DzJiXe}icpOe zjK{;1hyQbS05~d708+*Yz?VAJTJXPCSQ04A4yRS51`!Rc-XbT=HFhLD&@@s7L9ns4 z(H&SZXlUyp0mA9F9TW1}Iz+?ojX=sj55-P}(>i_0wX)Ih$Z1+~F1CjZf3bgNzA}?$!T7&ueOZXeh50CD9`a6i?%Mgrs^YXDYI$R5*(u(RAiRe}t! zC(Ss~(T?@~E5NfJ0CW*_6COVrtanMmXa!NWd08@P4kLvrc7SLdCS&&`Lf_zbQsFoK#4=0R814c{4tC! zR0Gky_#M9g5E>(=CoQ#GH|N~luzx!D9bBnhj3cd=!Uqr6rHAovQvOM^Jy$U4y}sg4 znxRpTi4C|NOQ#(N3;Pxd8V1>iGF4Z+aHg>+nnph%7^1l-8T3?5)i=dOIR9w-{fWda zn_a#%e9QW65&3s?zi^G=01HjVdU1Zli>^yCV~-I${Vt!++tA#&G!|OAXpym_DbzQ7 z4dAf|VHatduDg&-Bt0bNu6R+{>@Ba11pZLe{%SJZR`wZ6<-^?-aWO$0M*>*~+J~E@7VRxsOZT#hkh<)vv_-+QR{dX$6{Fxir}x}q2K{DtCLr5zeYicY$R{@L_uzLr&# zOr^2Wot^=e42jrxRXOnrH@_4SG8a}HGvvEHVE8##UU})8z%sy8L8>-1?h9c&UxU|6 zsisBL&i)X*kEQ`T6d1G=Y%YLsPg-Oa*Q}|?5`t7kizxf`qizlpZs>KHuQV{X zlTUl9KDtCo;YO=mEnt`WaAkokz>N*%M5`on#?*4^mU}l|`~flP8Q~hE19|VPSn;Jh zxlNiv{JHLsDU}Bp8aea$T&d|#nv!7dE)`wE%5smmsb%{k)vjxI>ot!+(Pustsi|#90bqbu$8+7&HPNVlsa(12}`6c|ExDoyj-?Z;aDw>E;mYU zbs`q*@8Oq{`4w;an-m#l&pa%3=}GerwRkEg2tq3}YQ`-ao9IYOHP}D_iyZ`r6>I=H z6E%;FeF%5w>H=kbA3GbjB62!T4d<}pOP5z%F&7+T+bl6%NUuM*B5`RCd|zmybkyB zG(XM7EUpApg8pZo4q7#Z;wqmyBmPx%3jAI8tix~jNSc~c$G!*Ot$L+&yS1Ub60>Y) zA227TpZ>V*YD$)gA+<7lS_Py_k3i1TzF6oIxTz?(XOf7Me{10(^@&Q7X?I2Y4jcXR zT?%e=W$N^9>!;!x-yCdd_sQP7^u{k(l{gM0{L~f?&OZwmqC_}JD*%;IH2zfg;k3E* zEJc8Qm7K-9fVa$IuwxuHd1~}2v9`=zqS|WP$2gWg|ZW#sUd= zprczWfE=3kx%7`?z@VzAu`{GxO-nU-@;7x*G(UXlRm5d^)O$W+4&AgLQ)D)8E`i-Bw{<){{B(KB+w+Y18A1CtLUHa`XN6VjVD`bw{DRk^FPHJ z9)q;TEFmE6shaJWg^Ybqvx&i#zW-|z{sOW|PK!Y}H2?F6|KkDx2RvA(Dp-sDZpGvc zTXyiAO`GiTZoi3pE+(izaIrFa_*1oZ6eVL4IrJT9j>LBdr&r5Ima$E7UiJGOV0A32 z(Zj4zKF2NTwq^N2M81^rb{vb1Rf28S8J~_qXQ6mLa>wuSVo$V{Pf7G3Ajd<f zDSFyT`5uA6Vj$hOyh?j10a6%PM-?P>m;_ntI8B0j;}+Cef-V!1rUnz&fUK$mm?5?* z%`}qRtRA|yKwm6RX;n~X6ho! z7pbAK{mJ&EvQWy{j4s5$jn*>9MFH);)ShL;@fOvrY=x@I^X}{h%QVx-Gpa~_`tYSBe-O= zz$>6WcAFzQc$o1(*5|O}y~eTDTw-+Ru{2&shqVvrJ9ecmqO^v+%Di$<)LG&I0=J6$ z*fyyVQKR%*G?Othi^_kzMPH^{B5y6DCxqOLzS|mxn7={q$3A{BTdj;zsyfQqrMc0i zTPN#b?m((U)|@xOJ7@Toar^1Sce{GPLcB-SFB0rLV%Yg{yQLxgaImH6q_6J`+uS>Y z>@K%>EHeIG+xhB3YU8bTxm$>K?0C&+BIi6=1&e_(M9|};08E9TG@F*w)f^pebC$z8 z=;+r2Ff29dF?wVP9pArco)dF&D=#>=SA3YW?KG_Ti^W$0hvaP_~_H5HQY**VJZEYTxwqDRXn{|vS3@DU3ShM z8RprKWssqHEzICq@O?evUQA%7oxIkV-G3R5D~_d0vMi*vwKslQ%#dz9=xukxV?HD#=BH{wP8;U0zCzOb|+r6LF^Bp#foZC?vDxp6t0ssLGZ^{mxqv zGZ(kjvuVLkpFw%2V5va>Igoq)z(!pj{g~_ctbs?4iq|XHN#9vb^B2wE;cHd3r`MWm z2ED5L0-f_TV%K927xT{+6N3hKs$yHcj{+Xle;@naN&qi~r-4uV2Tt_3UggRi?IUPE z&iTpgccq3!*M?^uoeWt2qj2V@rLr>sbNPKWk2S}LE1@f!WQer6Kx8gTX4kn#q_1Bp z28d0ghkEeTNB+)aI%g06P)BP@3wk_lo(F7L;Yz2yK|lFz@&4fuybwC0~nb7*Dh zysWlpW0dCn0_imnBdaw}-ce4UgkkE;DBv2%304I(ab+GO zH&w+E#+PM{!gr*|Z&bmXsD}3F6L_Z?8pT<}Y%SxM7bYEH$dU7X0!h5$Qi z2-iT#-gz~-4vUI;_#JO>RXkyeS~IolF?o*i3O9^h2~$Q>3kBC(;W_7)xriV@SxZvQ z<5Ns8n=`(S;MM?4^YWcE%jHWQJ%@*f&1cEgp{YZ09+wYxEP3lbG?91-CjQ{ffsgn= zHX%tRhr+cG6lFelyX%)ievz3+pP@g@6^NRI>UNsqPE|{AcxNhcqKM8uP4sN}+1S7j zzN*?zHWrEC_NR#K$r5}IrFzKgJLQk3XO(KSSCw0p(#%?UaN_wEE@!a}Rv|67#!&n&%a6ym=*o`s2 z|NbkihjN(T#_$VI4Nl1SA`#$`#V0|doNMMq5wrcB4tY?gRqbP+I(b6mu)$Jd&5!Mn z|BQhbckxvLVnNt=Cr(f8GP7@}X-m+!e1hWj9$vtO$vuA?b$$N3QcjrmwFJ+YuH^e! zn=hI7w5ey6f1$hub*w59ubdDDjGOTD(S`HlVl0FKmd|T~Y)%YVS)f!nDC$eCNYCb- z1uLiM2ZSt#;gxPt1$3u!sr=y;%tGv%Lxdsy4!=shxUF=8E%^2DPlH|&kLcb~_O6?l zvG2bw-Hpd_vJL@KBnWQ@LE7i7hgufNg&1P`2X0|meyo{7f*0>LoXJ#9LEI1H5Qw`(VFK;|`?`Z-(k@7}b2L4^V)d&p~_@FS7*c(?J;2 z6sTH2SP@?UDt*_a=;KBt;A&$5^d5x5KF(sv#y;;>|2YF%0D(T%FSr_kulFk!t%kh5 zXA;tnrZPGP-GqRVBJ8`V=G$j54vNeNQr<~BvuVabc(xg%TdB|en@<@-(f+Z%H>BLo zU`UiTJR5*^rzu7ro}-n_x@-NLS--!Z_yT1cLFFKkrXKy(FHa1fH7@RpP&^6kkbLsv z`7e6&Zf34?XTY9v#I=WUcJrk-IL-g6_^Gb*moTZopS^^^+aa?ytX2oC2e5OFeqweW z7<%4iqsw@}bb87Am-`ythcb-Lk}Kv*h5+1uk~F%D8Ri?=OPqK`Pqms5{XvVOG86v5~NJRV$O)W=)d7Mg5>9&BA&Wj zX)mfaOWHMdNzQuG*KfxD+|6^HR=ND-GmgrYWn!8f;`(QA5Z$sDBLN6%z5wQt*LlCE zxo3BPD6z3p*@PFEfe}iA(tv$e{}<_ccpt2Y)HRwV%90a;KIlq&b@`iaVRPYUr~Xn_ zOdlIJnYNG>E6$pRoHifU1^!O%Vw)94#Z-V6AMr_Rd0N@n*=wHdunZN4lShFfit+ek zuK^Vmb#9`xH7ebG=lK=0EMy5gImcA|&op~Rr!Tu!uO6KIeMC)Lq&UD^XZXzT=m((W ziwugAZb+xftLm4SKXeJxiyakcgS0fKC@tCF5u~;EB0J4I7_56(Q+waPwM6xabtNIu z8SDl;aD9|rg_V_?p>wj6jQmQX$W7v8iaT$DA6CduTd07tKr@`pnZ(XL2dYn zhZ`ES2PmQahEqu~=f}k5tS3h$$a|O1^RQ8CWC2!wstYa8N)Ke%WZzw-bz&MIM~Us6 z(n;rYo@r?@7DoBPJtinkCzp!e`Aa@=Ux&Ei*0)I!Fvt@Ds*DvNPVqBjKru@_Vja^3 z|1)=^pz~X4w9~WL`r-AWBfBD2ef5!|&Hz*^d57JvBF)v{@BNDe>4CJTEW* z%f~qr|0A-f{Dl)`FzpeKET0K6&0YTPJ*D7qtxey1MNAT0t?zanOo9PCN*y8!Iw^R; z+e@$(3TUEi`YEPjq?M=NhnfPKqOGx`3{R>{__q>1G}+*l7<8Fna&?>CkABHkI4!>l z=hseTsP;pab+8?@n+yhp*=UvkN0K47P@4XZoqv$u@Etb9Q6iZhJuy4;r7v<)mY#l#IL zuY>~DNm{xp7+_sR!bQ4yQUCnYJLn}@p2l|cfPO>oozn@~0v|rHKnNh**GPbL!VxZW zO^lc0xIjYLgbx>B3;O%GphpVU*Ic!jk0&k`Qmt9ak$%5hBcdfvx8?cfd9I>qW1d`( zq|aF67{mPU)E3cI(Hg;jI({aXwlTC-|7PFsBJi9Rfv{pN3p}d z82Szvw?V7W zPd@%Qm#3 zb~A(4>Voc1Kd}@>2Pp5jcEYXdf0}<5Ef1gJ2^R9lXRE;tCzz|f-ew9PIN(trm%*Runp?uWL2!X&?CZNMYS*YKzszekuqnLY83B9cF@AfEQ#VK#-Wpt z&o(Mg7v&*N;B*AoE8K9gu|KML)bRXC+K~)VdM69i_bf6xqBnBhMY5WZ7Kub02Z(gN zDP3$-4Kx|vYy`Zl9PW07YE*0`9pyCQzJ!pGca2SER3&n^U$^oHTnjZqy`X{6Z=_Vw zsMgz!(th>&k=##7wf=)U@PEILHwAUZJ*J9UA)}y_Ms!?=iNUEx0|jCc7;aVrnXV5! zLpB*uu%3L-*4R$~W>^g*$FuKZqVY?uDkdfsRpcppOJCJOeHJt7qoa@{H(Vjlcx*(Bg`juCZ`_g?EYIKS zC|&L9Tsk=~sR3zXsv!$@pOwg?gwp~aQubp@sy5*$C}c51;I;00IYjj8{vt#p#DdAD zQHk7^!l=-1-DeF_?hIIv+0urgtScq)serWU@Bw*Lv9}km62+xjJq}_%R*S&T_8&2_ z_GA(~KTTEh6)q`5^7zC9EF_b3W+bBR^S5GbAK|Uxp%s_};2Ci%C{t<>G;CDccCc#=@x% z*iPODEKnT`({#R?43r%XSy4kOJO0G-v;a3{NFvjL^fwNS)vyGIf$2mgWw`!cx3m8> zlJQ`JC*qpkM%<;a_?U!BV|)(gINiowK(?XzaMxWp8tLY3MqX*i2_nCnoLM)|rK*Zf zE|RnS*+?GAhzLkXsSq-bCBh8lM~I~2V&e501Px=u0=_remM7+EQHI@Y8?}btW%72) zUjKYPHD7g1nB|6@mUsbQwdip~2r$7J$ujTL!o0zw9?!g2#4?j>J71p=^|FnzeR}!r z#jIDKuh9HD!@nIHs*yz@9?~s+?(u1O3iveu0wCfmun!@bO$tM^F`4SG zzYh#pYx+I+Y@+19foRnb#Oi7kF&UzYs~Wcd4hzN+CO^NXbEOx?VqreWRwdz64&e^o z2RxirJAZJj9<#}T=JUJ~#bf~088k=tM!VBseKzh)XVz=`j1b(QgqN|Dslb84DUHD0 z529s7NX3#voW(-jqIq;$!X&0;dU;`V!B%>!_!dNKmPE>+7OO!ee+9%SmGq?YI_X+q zk^`iqR_7n-A-om5KIN+Q83IK8-r~L&q=4_98tcXD%ZvTqaNNAwXlPTlT$tN0lY_!q z(>>ZS`56!`9|E-$9T^TPOtYI^ITkT(;iu;eb+TWhlf!&AfukfHwTMy1$#-(M^FBt#($_f@e|ucwsBqZmAMnr;mum%Eb%WDje3 zAh(ec2hZm-;F&I=&^P1GhEP1@`EX`XG6Uq!;7g&%8g#4^;~dH*A8fLxiS^v8Xw{D} z&VC?@6-9_pPp&N9FM}Y&xpl$PSDTgK-MALg5~lDB7Jfn)-jcv}pF&!ZTulZ&il~ma ze{vA=cN&T*q!_&a>U*hH#)?$Mu;Xw$xAzfPvf*2J_pA%w^EJA?Y3LvWs?k;ai+8nh z(P2=Z^pnfvRTs+FYG#i9G^GY+y&fNUy~zX zUUJ!3`bv1L3USp^4#k7KYs}=MuMZ}0YV;dRuMYa_g8@|?gp*LtBHD>_4sdG~3V!AR z3`!FoF1OVINlDpd*>waL+@HmDqH0wICBdtt%nup=e%HoPC?4*uZ^%f~Md z>g3mUJrFiG^f7ypRytA43;w2BI0i&PuGzngo-R=9W~=n$k62n>U=WDg^bdsctV{66 z1SWn}A}oSFp=B!+QHAYHuB=Iy51pSNR}S=N(3do67Hwm8IS{3bGhiGfL*m{SP143D z@^&GNqR|;zdHN$m3&UMiPu$4(+TWJ4mk@Ie7jrBz_KOgN$1GHpN!TZ`fofE#&n=t% z(~5RQP74{%?mQqp(uzXCgD{K%F45Hh)m*3B&tnY`r`07Ek*Icn0E_!VK#HXXWO`Hp zuKIC_FypM|XcT-rC12LpGZ$SQP&DGn`LQqH{H>=Yuq`PM_DE4S*3GPa8WApYwI^!N zvK8%@*DA4tUKmh>@ZTxJezZ;X7{#YoFH`}N9js_#*h-&*aDn18z%QE*c0ZgZas=W$ zj#2S{jE)Mt9t(x`g^2c$a?3I8y=$ViNv2OS%~yd4TOj0wGlC$-eQFI1s-?wVN2U2* zZ^tx~Uwzn4L`2eYy}<7Mb0u;g=#t zX?&FnQncRpNtOGmWD2EeT5uUl&p8b~W>6iNWI9On4T}b%1mh{MTj$+ky?c~DG!m(%{T1MpmzB2c<<9&e=sc5LB!8QuHMgWX@M7Vd}F+U z56-#&d=ILDjJ*7r8{;sdR3dX4#BEfM!G7Ses_Qk2F`2N^sMS#~R_ax;+^F{-xYP=i z#CV;JLRwp9fbwG7{~HAP`-h|F?>Rq8PiWBsh*)OA@w;aT19I`m*!jBl_)H!>&4j~1 zhU_nm?{djx!af=k>2v_E_dRtM<4`N11YZ`A@RHiz&JPa{|6{2!K$wMw2&PCr{os&& zZ_}GR9SR&tWR9?x9DYTR&2&81j(dG*cJuei?qn} zU!;@fcddds;(wo{?LGSQ&`e!Gwh}Brt(cqCf#G<(+LgxY3O-q&iwN5UeCZhdZw@kr zeO;-{V@w8~h9eO%svO^)Oo6VwHBwe!yiAgDKO3QH{#PKuu-79%yjV`^p5bTd^g3JB zN%M+%i`l0G_bgU}rdY%cpc~d|vYvp@!vfEhnnZe22V{UreXli|oxJj~HH zG)XW3Rtwl4$gmcYcdnY8@qSjDWvu>>Q|0P*!reGTqai>#%+2jB&c`vr=`da3#VX0F zhBLg_4t>MJ{|4M)`hdBKDp>$w-^&eZXV+Lt)9q9?M|C1+FzqPxy&oHR<>#E;q|6WB zvpKD-DKP9b6gMUd2(Ca>Sv~Nc%RzYR=I4PyRqD{|L?J2Ild@UvTfU&I5nvzh-Jx2a zBR~Z7A~A9JsDs%+TB4IT7sc}Or!QTBs^7Yf93^@`|8D2<*n$>uw59am#(Y{K!1W9X zXvt8RKcecSn|1UmovV5V(xiqD8Tk~6{@p!xT^fymK#pm*pQN8a$n7B-JOTnryDB9u zEu_1dJzEN3}>*w*LQ!PbgHyw+G*hD1&eCMfM>`SDghA+vbUatbj z+{qA96>UHq54w1hDyW`GM5yz`~y0-4q* zal~tum20)9cKocX6EXHeRVcuX9ApvgEwtc4%S5FUK-ilZ-@Ux#&>6bE1f0Vyk5EeR z3kwTziHVs9*_@CO$wQc?Jc9cjreG$)}Jrjp)`JD*{Tu zWP+~;1&V%!l-sSkIb*$*XUnR?&)mSP6-E@Rj4)Uv&JaWq)yId1HV*dpOl+0E%F{2E zK~~D3M>7Gkc9pW(*<={rfLu1E0wHf>wTwUUb_l?zL$yn%wC!qX8n{49S%9!GHxW#s z98=k>&WG;WH>nFj$FbYGjzHiV3a225sfiS{u*%A#&@8S@fE&&x<>je(hTd;C-g#*C zwuatkC0bUAJs1S;;;e81PS1XOkKZ>ojrv3Acnfh-{DVY*{RRS?Y4<9TDqRg2j+pahWgXAK`3_SswPuh!!itU+9On>T4$`eZ3WhErSRzE-9-+uzWh$vP( zpe3*_4J2w;2n1x&q_#Q#C{-bJssIsj$tX)4_mMD}XLyH8muKCxz6$!|o+=2XSl3I_ z&t?K&6w3l==^bU1jl~$LHv@uC90gq%M1wG@$(3hs`R~O}c#}{AA^`^lbD9|XwvN=? zaD8r1NKDM)-`775fa~nuy9B%F0CRwWYr4?oxA_d>FPFu^)?`t$4!Vdb8` z|J?tQU!NyJmwO|?Q)8F3UKk4jO1hQF%k&+jz*hqdi_VSddCx3L1`eZsq1kl#IjVJkrdW*# za_?~Qmi6n}ZAafqYIXcx3Zxn&=#(}QYl`It{3KoFp6^`gA>RrICX5T02Jj z_?fs}QT+5;-(K6rJz+wX&gX4&L*eL<-c)sqBxe{iO+F9VrObW+3e zmXVM;IeBTlA{hq8BA@1{oOr2AB2mKx_T{(6H;T}?JcDLdQ`_Lt#7yr^AF9F9-3 z0^Tr9DTbXa=mu2QjZ_piz&L7>kP6Jq5<@d{3^Y-#$=jc%B;D@-y&fQk658FhL_7Tz zF3aPCun+5fx&{nCKV1pJO zo+cOe1&l)zW{KG^9cs^r>#mxJKtBqVAEPN@LkX^St7D?pTRdXIdL>SW$3gRh6YV|^+^@_g!iUdGFUByBRlCZe2-u}2Sk;~Emq1G`Has`!IwdeS|vYTZYPjVBYg^E z$uyiGcOCKrBVux(U1khN|L@c``~?V;RvZ^3qp%0;;xMyEV9+_o9`=65V%?eIRXUq) z*Xna7cwGBE=>>vwNLY4u`++YB^oj*kA;F;i-oCUexMQa%FAt0xEgD29tG|TJ{SRa~ zf7}+%Y604(cvj1&&4FmBg-NL~6e2ys`XMMj7~ML3DtcDb`8DA`#WX{VPY3#ewI2RJ zJ7_FKu3A^&Y8xWvw-O0|z0f&UIs3>BX$fLInszs9^BYq%zwW}fhj5U1d}Rx z?>P67Y-ONR0oBM>D~6tO%Af;})|# zZ+|Yur9R;Ho6?K{y3D9YMEvN!*9VdTqkimErTc8N5UfhfMfm*$P*u)UAw0l`p@zI` z0bnICyB98-p$^tdcgU*{6&bV7cF5B%;)pv;Fcy;2q-AXtED#f2$ok-Jd%kY_l??M^Uy)oJ(fc0$A0F#w-V%Jkf^VG zX8krnW$ecrvb>b?Lm*S=l#i?j2eTKB4D+3Sx}V#5&U1&cnixX6K15(5fT6}mWw2mc zx=3SIgjUG}A%N~GBz>qUv4J{Y>7KmAq#mvvmubA+LRx9QSJZ!1;pl{s!)7uQi#&Bs zQ>p`{Tj)v{_A@h3FT!S!ez1|0^Z>`-g!u#M2DY*}l3K>Izmi;#?j%hk#S%}-6DIx2 zN=$x!6l#8y`I8ZrWOx$seO@G+_X{i_Q~}!wC@K*4UVif6c#>37!(Q`1(%?q99w~-i z0Y)qzEhQ7U;LG8TO)RA-(fB5&B)QN8c+Vo1(l;GMv)AKHD=Fxq!hBhuu~j=Y{=qre9JdWJ{2O*BmPt&r`iJugT8e5)@KGetJ z7=Fl=Ksg7(8MQh+e*&?eRX>dKm$CRW4+s@7<mE}EK9$uQCI=6&zXK>aoA7HFkPfXMqtiJEeMiN&XdzeOQh}xy6|eiQE3niBRvH; z1FA&-bG}*xNXF!q{FsR(V9#+V{w64geDvW}B7yBL--e-G=9?TW(;Lq-{pT-M@^vb( zoNq$*(BqZ~HF_m9<_`u)yfuH3U7*YS$^;OTphPIA#%9*|95AFZV{dthv4?A4w(Mgu z_c9%&BU$wwd_bdU2;7(iAi{`<@>HjE+6^V@)0!m7vYg3^LB zYzD``yg?=ooBqVwN=VDkTc*2*fr~0ObT`dl%5tr3<{7I*4h{BLZPx-LVhJ;Gv=8YS zlUxZJ%i@koBoT&skZBxKPI4Cu7Z*!yAQ}mUZzm@_g63+AGI`evX)7Z<0tA-X;l{#6 zi$D{)CXUb}n@8+m2IqwP+4d_}GPv~=c5iNc3JnmY^SY(PxHeE13&7YH#JE}R%E;c* z?tbprZN8hGnu!b&?PmPAl>z*j(7TkK0RAXnep<+g>xnz<)4IM$b zc8~pPL{bJ&XXQZiq}Cl1hzp`#)|n{@LC1S<4megx0aQCmKsJ@Y1xXQmR)Aa%!&464 z0U3LXXFr;=E=u?5*+(H_sYk@W>&dXlTX{z3gnb#W=BQ9af^s4fS3o~SQ1P%0^tz|A zXxf*m>B6OzQKVE4oWCt2v|9#!Oe z$tM`2>^!gILl-#BlrBz`Cz8grc6ur~=!X0?NA)tJEJnyT{JazX`Ul5E3`SH*3wq8S4cI3XUe_TOvpR11XOao=94ggJ3Qxg=SR z#LHF({#s_kilAl!XV_D`j)w(;6GV3izz6*-f`DXju{O{}$;4@BrQ z>>U1De}6jfAF;bjP8Wuwep|Ojjho-yO&Jif`Vnbu>4Xqlp+PFdl(`+FrctjEh z6EBCT2S;9~+!LV*nattRK(um0vDe?nhGH(dJLTVcd!i8E{{-Ai}ktNX*N;g+@w<2@?Ak`mn`$>P}_n8D3B#{P*$r zZQ z&E%FG=`I8h@{^MT*`k(Jnv-r1sw&f$l4Ko>>a!g}SLd-DOLbG)cu!C9(Wp-wML}wY z)=Cb~XZ{`Gzv@zDIN5tYMYUy?YKEkEX=G*)>^y}C5lCi_U6#e{oQsVJf;IG8{Ru*^a^3>Z=rhbEoT`yT!m}bEB#bEK+5>fojrD zJwkOVkkSZ+vTXv&X}PtkC_Fh>2f76njqx zdp9#7fW-mG9|9~LH~yz<-e`^GgO6vfrXh0EdHfC3bmqP^#shRj|0*Hl#FQLjpmzSG zlnavZL2g_)bKd|m$g3y6T zR@}=pO}Ns$M(7}9%8_&t#Jg|}a{F*v-2T-YVACuBrl+!usm)(5FR+AuABz-(cuMMj zup;^5ICM<4N4rT!6PwbN?Pos-i|Zrs1(v$Um!ONB+_p$lPRI^-Ht}3?O!JSQ1O`8Z z1>-FpOM(k-{L+FkH4z33Cfr7ajnu)w*jUZNvN?E`lZKjzE~}@F=*H<+D~D_^_ulSbmm+%|k@` zfmSTLk2f0`Q!kS?iJ zODPGU65-J5NKktP?%%(o{16D|X?1td`iDn;;r99S;F7U_Rrk-GY=YC;sMG(pLZA%@ zAEsMZe!m|!FD0tmaa+RfDOejW&;%TeaNbK6XYD$hgOKu&!^aD;u5ynTD^r@g;xz-W zcJy29)3kX6cOp9OCIbwEgSmS^nS0nZc; ziigE|K%=wnD;ZM!pr0kG#-FIu22goP7O&8fw?#OF4m*JA>c$;m=hgFUmD56})0bl* z5W0kKZabA%Eq^|{c`a<`7m)VBcH^c_@&Bm$%dnuf?)x7GQ9`=AmG15a=|&I<>FzG+ zZfTHikZzD}1f;vWyUS;B?(grq{?D6pUL8HK*Is+hF+Ow5e&US$bIO9r)&}I`;yxpw z#`@se;z7}0aAr9Ng0KR|`}CQDr?fYWe1@kWJ~|cl{Pl6(X%hp7J=wAS`*Tm954q5< zC!b}}y{Pv3_``V>!))_;T1E3yRkB~Q??1+g4(F+#&ljDNv?n=|hzX_S-G4)nid+4Z zQA6NNaVQybw7e%iUHI!GsnJa$JEdb71(e9$aQ9V50-AZDsd&*4?qPDGsB0!yP5_KF z9{Pk`alPYck&qt3$N14N^d<88*BtBiZD!_C-^T^F%HZLd9E1iK%97*6Ne z_iwA6^?a8)*dFiS=Y;R^0^9Av_ou4w_v2fmjao6+W9e$umm+tsa(+I|qtAc4B8um9 zVi=0!f!UT!83{196!_j|LZ$6>uYmF3UCbSEuw9;gU;PaV4}TnpaA;`||5+b!T?3Hx zp&&gZoVidoMH%O9tw@;&lEE8CM$mUsOjNAv5$pDiJ^JX0elBgmKqGH~%U%5_lSn0z zwATm3TEtiuEO222OY$G?8K?{08RGMQPQBO#C_K+r?W>bIr%fFPy6DgDn0e2`ex)cs}=lRG^E~czh|3va{ zQB=|UyTHHH4PK@ACl((RCQ~CJ(QcJ!b!2<&2i&_OYLVVE7y7rC5q_>eT#2wOB!u~M zD>JwXLXHM^KM1!&r4Q9DqUEm&?640m6n{N(frElv=6)r|zna(^y41723J?G$j3hMreNc=QO7p*d zhOaS+R$U&K5dqRwXx0y((;DSB6H4lvHBzE*9d1XcKJ)-dS6A1NE71V~zuQV^usZwm zzDRohJ2TT*`)Mo`&C7#nS|tPtObtAGz2#=7Ff177?MmKVDyDMK74WL^i-%!|6chI0 zMyS#DBRLsoqy5&3iS3JZtp8K~t1z^XOl3n8PlaL?;mYnNO!ebe?vmuXTEzfojOrXd z1mZE*4huzBrgy=Cl9|(|-6B*U_{5GH=irZuvNC=tkivhJ&Lp+OCGBC7i~e+=EJ+q0 zTe4a0n*YHZ*Ns8hQF&n>wM8fl#mMkWRd+60i|l(|x*MIiy;@^`GcVP->b zQh!{u{w_r?jv#KzHPBI?S5phM972AW3yj6MIX@%17h;?`$ISIPXw=jrc4y{konu#{ zJ^BOfJ5Wy^33Ko=7F?{~o;QIE6=?YjrA0`eQj!N+6gmRorl2f*ur93pB!71{kZQAu zGO6bcVvyq3WMz*J)1zOeKUbOl?e>Wr*03EB=U^~7VD2&fro}Cf3j*Bz=aM1U#vxnj zJs_+~qP{|ld8~FB9sA$55tA-E-c(y?J4%xJY1WOER zT#*@(x3m+pk@&}(D+AQKz%JRIxvEQj#Fv+su0{wY#iJ?Gh(0ADnYVg0=6l!fvE#je z;8PGByJGy?zzb$lhp(%v3l=#9z`;aCNm=lRly|N9ze_s`E6rzc&TF>DH>@(A zh-4w-99gwX8yeYRz`89}8^NoA{dPDTv4QU&2oPrV zA@s_CGJ_pQ1em0=qw)7Rd%Tt>HtauZ^fEN3N)0K&jZ-*7(e(ceh_mfB;=tfMJcCki zg~-d9@u`BNV`OBffCJ!KmHk($5)FY`g!(t*M4l9Rm%v)0A##?apw$9ftOS-GF7T<) za;(fdk|(N-WV(nEB8ZtH2Doh3AcWW8g&sJ#^UKPZ2?RWo-(%59;<6e?)KfVduMI-1 zk0RCD{&fPy?dAwqyUp_pj(C>(n{y)ei9!Vh5Eos?E?$_P6sUQu z-Po`+HmN@oj}-WI%q0Pu%gS>J<-bqrI5X4s6@jLwTiG<3myA<(TYd|VY>p+YM9J^3 z4c5Kp5)8pGgO~>CkcLySt(-?+qQ8A({Y#(c=KX2fPWYhDEHi>MppfZ*E*&kj^D?*4 zDj|-=gm&p0#Qpw~g=)E>m6y57w(rmGT8L?nQg8nIwYZAmbOV{Q2TAaXTpyk(uY(Zh zT)XmpiEkxVKzjr(t3;zCFaF-*598q0=MD{j$ELAGAK_1j8mO~DirBxq4s!vf*hSA% zPI9VD=0!I#VBT4m^e;r4+5tmK*C&xPy*M9^q5HoZY7%F!<3~Ag@Wo4_4%u`n~l{s$yRL>@vrXq7b|(NU7d;S z=~DNTTTu<%cKzVBSW86^Iy~7lqK|yjiUuP-v0b=5Vr)kCwmonTHh8Mzb-{8m(tm!C zAG7`0HfSt6p0TVOZ*aJc({;rh80jKKqF6Up;{-POL|-cg4k#PHr{ewQldI7qVYKHw zg~Y_0`Wm8UQwut#3-X$vh){IO_u5E+OtO8rGBXD&*pk^;exwxUH}v&2<1dW-Gx0Z6kK1jij%yjk#_IM8x(k!mAMYDIC>z}_d|nv| zb{wG%2nV^+I{F>6_nH1H`fZ-~ZxmkXVhZ=Q1=-#|)eBlhY=H zg~vm@(SVD-JVOY_N5(mw=LZ#W^$`Yxmd5136UR+y<+Y{K&A00Uh(j_}|O3GcNbR zudCc7-wL^hPv5P!=8a!1DpnL-ga|&C6poh~R_R+9-tt0K9clu+nE*I+)0?9K)p=g$ zBNXdqaObQ6OE8QB_lkS#T^l_X4TCkA^3T%F_N01^&CAz6XAyy9xMt@)^;-q#hzh1I zbI6{??yjyqux(XDB?d6)dan$+#s-VL9)yT_brzm8omHn0G)+xbTc`%AmUXoAikLOe zE`0X_9&)D1tC}EWyf)X(I^}&ELltmj-QdSUA#;kN1mxzN{mDXFr)VO6>G>KHmm>OC zu*miP;$s4}iiGSMXqH6`wcw>0i%c#&;0zXhqYfoM@z75}4U^XIrfo?Gl&b8S4r78otNvSV* zVga2(Mp!ef+-e*SaH*@d_CF(2c=ccZk2@}IRo+f(jt8MM=k&cGSx`s1&;(10Hjh!O z6i_);Xf>gIgijVlbjTO$g}}Phn1Z^YI~d_7#@{LzM>l_oDcTwJHv(rRZxCW)fGqu2 zgnN_47$bYIS*m3y+Q)&_)l3H6Hd$(At{v*)VseJ~Je+UjpMEs|g!5nlH$)*kHiIHi z=**t(&QqbkdVP_Jb95zq-;i}mgE*#|1ke26(@sP1^!`S0{JUl~FFYAGdNi5YKSt@@ z*uLWxz*Ty8&YF(j4*L+~=tqy~93r;v&7=)@?3CKK7JaoF9gXv0WJQj|wZ(=0tg|SF zETT;Y3luD{9*gdeW>|P(XYDZKMDTWGd*!N zgNw`I#0A=T50!I5D&+~>;-YHqM+%n zR0{+&v)?N*9EoCr05;(IW(asm>wdrg=yg4uvskt z%Qck1_OE@gnlH8g>e!dL_Hk4=|7kC}J-gsu)~Sq*GqDFg=u?2ow9yDPmF$qjrP}?< zRGG%&2LX!=B32Y{gs11D_fCs9`GXKqF7qOiR49ZetL+NYB~r!4oK_2B`O*nt?NYXX zL&Lz2^P%KVLLvLlcXf<^VOunR9V`-zi%B!YT2Q@hU~yy_67EqdQtHVgLVl4 zxDe@HHr=jPz)B7h9Jf zC>aO0pDTT$RJpj0Xh(B?PyviD5(TrR)v>>zs{akEUmqf z-nnSF;bOeoAhmD;?2Zvj0n)m19K`~E`45^TK1Qoo3>aTQizW!+?c2AC>s=EJ17H_g z@s}FoRLvf!w?rQd9jO`@PS4{b;;Nn#|{0behgi{>s0lRuoKSuTN_~ zyQJ9n6J=z-wB)z5;?ky#6o~9!q!85PH5o*H2q<=KY*M+2|5EH6RWem>%PPH|vSh@y z9m)b#kJEVfgy*#U5GYl}jrct~?2GiLub8X8V5vEV(b4>eI(8y~%qvjMe41$QgVPUU z)NRWH%c(-DNc@FB?+SP-WRwH#_?`R1$s+TD#BXwlAJ0X8o62lpCl@A>;GQ9Wmtjv# zGbbYHWmnebeRH(%X9XXifcapb5%&bbtkf35fCSQ>{V)*!FD-{738n>0#3luBU*9qe zwisU?D_lu+-6gh^lSGd2yF7Jpst!jXm5`e8Wm08x zpUf^|3Rk)@_cpm%095rYBN72|{Sj_eWgJp3?R96n_$%QIh$%Z)1g-Hp@NEpX&^~od zI|j}bMpc&4pG0cu!Fp&_2XXwk##v47l8GsvAL2=*%pKtzZRp9qcX}iwY%wKt?AOv3 z@eN3kr7x|bP@1jo>|OvQ%sBxgz(vM#SYMn&2E=sCz&e_#c5m3*giaw_K2t#&709SrWmxo)f^r(|Ey;!x`ggMWTB)ExIKkx82j z9)rlI($_*g99k2{Uj-VBCo2j8kt`;5S|1GOuu59rER$2nUSW!n`(q z&(xoWET_0mDajB*V()M(^Yqx8ZeUB|RHTtD5Seatz&87z#_Quf-o!oI2w(~-$+E>( zSG`;-TOtw<4*C^-2Wy<2;OoSqn_{8fh@;TxOCp@*&^w<5#GENM-ybB-xivoC_-Y`` z-W9Q6N~ZOfBe{(mV{3<&GhtnBW3#j2Hq=BA3{=K~)e>U;aL8Oc2y2|xU{7$@BPum| z-Nr6Dt66Vdq_kb0u~?WyP)~;oOdm})#n{cl#VHZ^^Xs`>N)DanNGV|*_8X0SiY6OW zu@Am#4n4#qpWi*JG~b?}=3mRpHK};z4bJ)BXLz`WIj^0hc93Yx=fJ`-F5s{>P zoAUoD-q8)90qiSU_Eo0p@a(|WW!5;QqV3nmRz$aoD_tOeq%MBbkLZt6#ak0k+os(g z$RVR6i?~_kl>mRENHFnFwsR5@Mjdy?g=OnNxg=wq+K&nC42zc`Ok#>Df!A{H`+5el zk<_bpJtl4>o5b|>*ZvRiqtR|=BdziIoLm?)vX$oRd`~~C$uE_(__(U~;QmiRKgu;m zcT&H6Y{y3)B5QiU?(RkE!Sj?!35MwForl6Np@sWmlbwlq$_WPUwZqtmX6)Ns>)xkd zcwQMWzdsmW%)8u5)~ZCy+mLec1?*^RNW(K$kUrgLiq7g=ncCjpp{IEhU-**Hl)6kV zdZiF<&z?k~DFw2?A1VcOW<|Ivml5yBX%T--?|i&HEjAs`8M+7cuL{7_(Z?g)ip~c? z)wvPpg?;b{WKb|bNGYAc$K6>fN;}NXz4pTO9LKC#O1W?U+cezWjZj-PYenHxq2`j* z506eV6Yt=<_=zxm0mg-2jA3yX#QMeW3dn|Dh4R?xm))^=Bl1rG8Q%k&Opxk26VL>q zW-D|HL2au7_+_xsOgokGYI2;q_GJetpU-*c-$A8>!*iY{N;Zae{dZD?Sy(TP3B%@r zya_u8@8*XKV&>z^%o7U2eM{xag9T$eZ~bPfhF|*#G^uS_BRqC$Ynihyd*%P2P$}M= zcJEbg4QZyjyO$3_6*m87f3C4${Fg1j+s;1kDbl)B?4X!k>qc!*eHA}Uj+=iaEq@Hp z*)XsVKIpHtF$N=ElP+uv+3?328wu(4eVLV$m4MSpwv}QwrI33Ao!v~Wf+XRJL6uYL z-zqHk1%)TQPeuBlrJh!>)<}6DpUBp)H;JF*UF$u6wvu_Nn?Znb_giT**C|zdDxN}5 zAjwpuEi*2<1!7@yQ_0u#aZq3t+ims@-M4z&DubD0bY>{Nt42f61KJJN!{64K{3VJ- zVg|3g8Cpyx4EUVy?%&a=jdQ_~$Ii-thSz|})QDLY*-_4ovee%!GbLel{m_ZXr+2d{G z$gq&wRz5bHjRYPasGj!$GXH;%4`B59V6&ad>qN@gdUq-%+tZmCxCN~9`2yHTi}r_n z!a;EA%Vj&Gsn8yj2#L?=S3ydg##r2U83dY4=TnxN&f*(al7*4+OOxs{>@9yfNYL?! zq>I&%?_iNr9KORXdp#-+=K@_$x+N(?w{V5GJ-eflgysi>Aie2RshVj2Q#h^Z!@Oh} zwrj4C?$*SmMeW4`MQ=E2b|)K-Me3^7y74=GqM28GBVEe>ho=c^20u8;BWJ%;pth93F# zU)s(Op#H_~P-_kjS?TbFDh7i9wbsi#Ba`I~CH_*sOg4Wznf~%mfoF3`?SQ|?p+9(M z@2CXX4T)fO))g~ert$Saxi(S~y9jesr&jn-@2F}OHU;8Wwtao|6*w*&n*(m+1=ln@ zpUaPOpfQM0%T*;J1oW>7eSeD7gA`pbQbX}6b&W^L#}~+jO7|DrgOihSFZZ7=*v9-w zVfx{`pl}EIJohwqM>I<)=2=9Mq9{FK-l$b42a%7v@* zj$l!TIb8gP8@dRe|%20F`{nlhU$xgIQT0+`=A``%)p!NGzGGqo*H#^8yeNj2UIfQGH=gfxr9D@ z29G@VUdKTGJ=%L~zWcw1S!IYkp3zG4*rKLq6gyhb7?iQ(#!q%ukq84{pkC)C?ai_sTCkB&WUvG&(4-?JJ&9SAYcv!4VAgflVdh1y$c`yxETLPh*s4h>+jO5q_u^ zIh(+DQXT3GLJ3J2F;3xI8(ODv)d=hqf(ODFs$OJO`hMAUkdEWwi1*vvNA+!w$$3g_ z@sMD=U|uT}ouEwXf6_P>Q$A~y0fN&$S+@ z*sREPg)XWy&pL%nM|L2&X5kYyT(j9deE(b|*V(thc{domyTOUQFTZGc^s%}F!h9Lk zwqH%jsh1e2gjTOs8TZJ9*v;vv!E0Kgmw(P9R zZ~4o_rQgNN-b<$B9Ra6=b`N-NlmG!r<%)fmc)ZO>olv1sMaJ)T5s=8JE3a0ni4@10 zd8+;>G4bM2peGL$*SL7oaX#!lR>zeKZqgA}A`NVghlH>^#&62HD`LKO9$MF=svo?* z>mh_9Rm5v=v1dF48rYB3pIuI z!MM3nT`|muns5J-#`T;~64D?c-P#1N30$t=!KrD>ls*?y1bR2}vpx&alQr2g?ZOjP z{_&9;N<6W*XKLs!YTx@!rS^&CA}!;^jUS(gGYTMEyirsFd<}#*i{m-#_H90ncK+EO z3dhgUI!6c$pa{ns$bxR$R>a1MGM>g)$Y;e2@1pG~_`_(O=Xz>o-s)ptFw8K-9lko( zT!<#pqOtnZ-BTD>+Hs?nO0~t=D)tuyl_J=Ac$NPDpT7ajB{t(>oP&ir+DIaPO^2^L z=N`*pkD+sN>Tekr^liqS*H)PEypGh}aql#n>ei5!@-^_QXrtd{(sLP%-zK`@i09kY&)Bm6u!XAu``r8|8;Zv8xoB=+8ie}vjWP7tJQ7b(al7~Rs~u%G&) zEy~v@3Oh+~X5M>><+B>PDsUgVqoYy6Cnn;ErN<6|fGs!c#6Hhf&7lKZ+0|G+^Tfm$ zj?x;Kq&~Uv$3pIk*0zFuKqXv&c3e2l1SZ11J>x|*&s-e}I!NauZzu}6#o6N2CsfAg z$Nv%l7N2)CNcAtEMB@WgFz9qLewtQ8ZXBf!MN>b;~{6 z0o_MtlxE>vX}TsWGH}IZ<%`hcy(a1jq@dm7C)FB6^Yim7`qn9_r|ZGri9Ye$jqber zwVDieNjk)YccEK5X1A}^TJ=A{dDktZnW>x}n6Nqx8py#5;Ip zF}}S9!CfP8em~4nxh!t8a>F@rH1Nvz#WW-h)Xkm3xv^X@)wLES1v8V29iz2B98nZ^ zhSxVf1{Kx+3JjS|WNuk`>!%Y&ezy_M7=Q(X&rP7mXf_>t8c4+NJ{E#X&L%hNnS_y` zH4+Dfhj?b{yS_j~f)5vgtpHnKs! zFW)+tkyWZVKJR@wZuas?)Yh)Z+Z6wT1Ankn&M;$vau$WuXiQ0*o{Oo@Ig$NHDHrgQ z6ha`&@s?_}SkmCDi<-x{<>k}0nQC1{uA{HB$LUj|d?_D{;sBrFI|&pVQJ;eQg9#ug z*70aLaYZkrQP$r%%{<2|E||495o>$jSFrpfrtv1_Gognh%O&J-RFoHMG>zEJc2-y_2dDlS(A5QN;LRT;ylo6oLJ}mm#bsWoO6={e1 zJdq=Xf#s6R53FfK377@}d9q+Ps#6|Zd7K!IP~!x!1dG53d9A6^l(whL&Pmb1MW{*CWttV0JUtxGe$9{u}&yp)|AfUc~GU}8ISOo`9E zb@Tg5r?#M%Ae;G(7ZNTLg=__{#CWxai$U7PGNc@msRP8;u{| zIFj-L0*oTWPCT5k)Om(|T>)(bkasB#dLY*AgK`D=qx9{@_f+Mp6;G$Hwdb?TxfFd! zt_cXG7y0@=Jdayw*GtSA2o22IjTH4Sh zupl;aWYR_}lWD#`mU*#l416#`H`LqIEmrNzvFocKpZ$a5`r&Vq zyC3M#GR5(oiz>f~-N8CX8VcnhNx+u9G1J7ha$ulS)B3T?b{9xvOl@VK**$5{kwF}< zjnaelhu3{alO6oGEfpaL;ARv_FtoJ1rW6+Yi~4^Sh48C^y6%NAYXqT5AR2^_4VuKs zRc{%xo_dl(v4lUJ@W-Ymvx5@``weL8=z#r0Mb3RUv4e?x(WGod=_)LgDKb-oqw>sa z{^K^zl=?Hg>@DN691bg~eSCevNTL4RD5iG0!yQsU=wO_Ee@7w-ucry2g9rP^4kOv2&v5@&kTB`v@X=+sTm&Iu zcx|2y?TvbKKsJ#k)f`{tTi4G*HioOqH(J$Wq|mQOz!HD) zcHn)svuoOzUoV&(|H~e%w5&CqUT>-rK162m-g&n&7n*bXxZ!p@qQ*Py9Xo`J!4qM} zHM$VaNSkvhY>P1j+EOWI0_ z$(sO($fIbMi!VigRi(sX(YkemJc=z0!OR!_uK$o zp`Q-c{<^Xeh-b3JZNC#>vqjD2^Y-4ZCCr&xnV=%Uog%{uNF+5+F07&OEV^2$Pr#8& ztCu!^I1>ug`}TCczeP-k>+{M-Zr8-4oj!tD=(9dkr7QtNhSkLbcE4|XMdT`C68PuK zMen~ze8$CLgLwM9BNwM)T0YPo#}#!|?#-6`p+}MOXRYHZ|bRK zdPtgQc!sh9?%kviaiv#peZ{$WV+L;$)T4fpGmf8?FG)FgJ`MeHoc-q;S_pwvZ!0Q+ z(8QCFuE`XQZDn^mtMgq+6df@sPP>6;>%?ob53xkOA_IpDfq==8Nh`@0c(l|liD0BR zSEtny-ulnJoYBmgPs1M@w;vR!t4^!LIFv+1kgld{EtT@4<{Z0aGYoz!??e=*OdEDOI+PSXTc+yhshayWan2o4-|C-7d{=i;3?9CZ1YYfg;WsVVPr?gMIRB=7 zvh!C@PBD>T>o`B9CwvbZ;}j8VqkC=E8L_up&`H_j6h=r*l_1A(d}m;NnDxnJ%d+!N zh>4sem1sfqV;iZb}&WAtz>lLZ?ilSYEZiVL&gi=4hzSpC0u{up4DC?oA-3|xIM`S z_Gei4U7ID)%#Rd&JS4{uz%X9qtNjPYp3yR5>c`BxO}grIpq(okYG(>|^-7Ne+`sRp zKLZT1W}eG)RQg8yfdtxohSrLpAHXF-PE6-RerX^T~J!0w7$P61kaS(={#-h)yHb4M|oHF zqm?iJLROd*HJgll1fXfMZ>83S{pk21B9P8lnEwX(Rifj7BCqos%5VOMGgYa&ux9h) z2;ykj2z^B#Uz#`@cj=RL+6CB1Sj?6ea|Lb(1B~C6eMFBVVp&9e=^XFULV`^$ZE;*> z<@wmt-iZ2}+QpfX<^RH)yJ~#efabqHi+CgAFXk1{jUk&B97Omce!jD`-#{0})W~}0 zw51~@>oi;wdriNb@R0a}Z)Pf4&|Bd7N+pjIBiNRrp!BD0-w^nkI?(^PlKi&T(Y3sS zp{NLz)SDv|v#`uujMf)kU!=~#fo+ANZPDH&*r_6l=&B5`EEf~kI3btyVrDHdhQgUq z+nF4z>+2D=%O$e7TF<$CzcsV)H;6eQ4bP5_G*%BT*!RtkA?egdia=SjZX>i+5Q-Cp zZWLU$3KmWO6ZB2B4~m}161RM$12|}~c6+wtZLa5JO)Y6JR%tbp`d-K%t!PHK|1%nr zlk%i`pbo2P4~vy&s@bX+zJ=o!|3Zam%^Up+6NVCXL|r{L!x1hQCSr<~D!cEQ7rca_ zL%r9|dHc5F7`_2z0i`MwbgLKkw^JYH%mk2d%pd{^ihMax>#NQ)CCmKxc6_+MtCcsv z?#BAY&$Iqz#F$q!T>t!)56EA-u4-#8m>qW{XU~Tzf_v%GlT^*lD2d!PCmMRPEHDwQ zry>HdqS&=;!>!U&+oaMD0&E=xK8<_i15+g*lV4rN}pQ z9p0T~EP)w@C4!LOU6mU_;q2ksV23av*=@6m_Z5R6*o?H8AME(a?O?jpXctfZ35mz^ zu5xE#hgZeCL2e}SLp3OOYCJpj9mb*Mka~=Xy<9afmnYbC`SG6Z40%{h7#1pF{m(QT zNwtm;hED2dpo0T39~vk&jLV1^7ua;I!{6z7mNw-?+9N#?pH}x~O8|UTHi=Q@d^?TP zrT~z`Jg;se4+5Re@8lrlBTqZKky$VE>TVqh6}yL;@%TK^kie90{m(<; zXTdKCD0WITY05qU9Hg;!FPpWgEsI%irk*nO*$8nulSDrmuP{{n8^B0wT> zKx58aT06ws`gcQLbQNl(=LmdbweC=FHY zwZ22q3jOY#QY@J`6(64#!R)@{TmC4+!5_UBdyV&bufOyk3z%|QmtR`l>wc_00F{1g zb2Ha0544};JxW8#+BPEbp8KhI(qe(G<2!S`_I)`UT{X1&<3Y9V(W`>&xUeE(Yf4Hq z2F&L14IJx`U1+Z3#$HNCa|v>dbwC)*QTSZ10v+=1Ze1S#h^SK28!|!fNHUh}fvwls zr3(}E*uW4#Ux^k1s1od(mGRD{IP!uoY-ogfeBWAdeT1I@goJ^HDw1KH5zp<`N44Uk zi6<^OSlYo!w$(e^{RxK><-0yIQ{pbjfOHLOSc#i(EsS!q!7bZV@i=}~yobM_ol;d% zGdV9Uu6e5io5l=u9A@17k5+M3i}gzXUSd;R0oy|a@^m_MK!2piXrQ%13JsKQkyoPA zN$;!(WQr9y4+AXZ+t!#E0(SL~PYo6(${R2IrzkQikt3q~8)?ik_+R;u;H5WPEdt(_ zVyjwgd3}^|jUv8pjnN+o0369I^qe2DF@rI6((00LCg>6s4!fhVg@uLF&GejC9|Bef zg@%;!hyXh`EHsScbJK6a+lCc63`ExBmia2;b(`W3pcOx|^{E8{LH}(TNM8CFYH2++hfu*C}JP4Q`_$jMvaBHT~h5Qcvyd z3KXSR8qS9U8ifJsT-8D7_FZ;Q2OzlMpsj+u5bj{w^YQ)wIN`PZ6G=5BN#Nn*1UOCw z!D5{SHYSZB4T^>CU#$hTS2nOz=bkIWMMDE;dn8T$Y5_#FrSiE|7ogr*9f+d9#4)$8 zV!>{%R+kjEM3Q{tbw9FT$PY|zRWU=s`9Z#_yhc7}IzU&6kII^_ zZmK(0%{+)r=d$x#s0-J+E7NXC97P4Y=cfxn<@RT(Um>PBGw?g3!wHNfNrcrfVBXaJ(Yv0;@q;f&71Fd z_O?Hj3;u61s!ZFS8-zmPu8Ds=(seI77VS`r1eA8c0kPV|RFiu!5hfO+j%9URelaRT z{1&cH0jB<>pywUQ&ck^>@#I8)!2;u}PTa1mq2DE5cZ6vwp>1a1`x*^3&I<8dk}%6L z{>vepii?j+dR=L{tzhS#JaS^V&>4D|lg=qG7m9FJiH4C{ji+rBOxk<`Sw5ff0tH~A zn3Ewty}rh|!*={FxUh(b`Cy%7f;df?IW z=3bm_*ROcJJNArv)-cIr1VM}i;(Rb?AKW4!zZ#-3*)U0?or9UTXfcMf>3&> zdbLjn5&&f4PZ(5<2vWPB_y8aBOy6uB8T*N1vd>#K4v{1RY26XxH&;yZ+WBVxdnwrQ z?gA=*+z>c$2Zg|SXw6zf*W0YhgG@`61G!`-UmnLj6f54`=x6CeYfF_Ok|5+N&)3L~ zu~G0Y*E;0qZ=H3wk0pBFCSwE!03YO7kgr4pcg&2dXzBeg@|ap1#0)~#7!ut7=&4nX5HbYlxO38Jp0+d} z5YRHIAo!g3Gguyz?FnaCNcI(yeaT`kAGY!9vI>h3^=PJ`!r4a0 zNdf|2(S~nACxTdi2SAv5q6&Y?&vbW-8q)cz_v^YuPpq|*v0@f2ntd&TLo6fkGNoeu z``;)5D=sV;q2|E%K%JN`jpcPhS{So$~<6z>9JqK z!5Q4wHysuvy*b@342@u4Dbp5}m6a8&U`#wziRr|8CR9HW^9t+UkEklI*|SVgc7q*2 zG}>!{D)5T}pwPmMGhF_yl!X%&(+S8;3&{|O)Vj)-OG^ZFpd_H5BmPj^>W0Sqg0#!= zA0K1i)@b{~ZgT)CkI}$kit&X$evU<2 z7DP1~IG_s!7YgTK)sD>QlrACD0XbG-$OD`i_-COzMOU>M7M z<`?ezpHaLvL8d6@mHCRFL7e@7H=`kU zqcA%U@;G@Mb|R!&!Fo{@P{(b$h%9;J=&gY>AYzNrp2=eSZ%qKu7!35yzxxif&RXW{ zfXA(o_b&fTHp6sh80wYe?&PTPq@Z{Y0a6Zqxs1o$nSZlxQEc026c9Ek+az! z4UcRC&ANsF3bb_utR}=3T%i*S9QOCv46=Ytz|^%LhdcdZ-&>ZP&n8#On`#hwY@7)Y z#$lga1k1)Ie+j}!)xb}@ZYsMuALR;^SdfWJN05jGwDLmKp!!xof;KM?M)__I0vK41 z2D_?8u6!DA?5BrR(&SqA#XCw(Q#!rE#!9Sv7nJc%RT+lJ6w+Q}dPX2bRI=2az&v!g z+^%FwBu9e|)o2R-^QTBWaWKi|aud_YkNujg$>4du>2VO!EgE45sLZ_{xWRm@`D!Dw zojC%@Sq%&EuObndc=r=qIn;HZ(Au~_1qMz}4+T_dg-wB7za0OlvPWSKfE6@Hiu|VL zK?Y>5)Pq6<>N+(bdkO($*}oIhbo^n!r}G<0qtl=EwB($M3P!LnGsHnYFJ z!~p@n=ZFPL{`K(7=f~mK1pB^{Dj>N;Ji&f@2#(X5ZYrz9a&V0KQst8h5jJ^Z#*ph>7IBJW=30YAW+ zAA*+q_w<_=mylP2?C+brfH&jo6-X9TdukBV#VViRR0dPoOC^IxnpNxPHZN~jUW;Vh z%?o*u9z_H4C9!p_zf>D$gC;QK#v8=&98C7JwGmp|0tv2up*?9Oe8`AFIH5WLaG`$` zec8@)M-gZ_CY*xA15{kaxB_vzU^j~I=m?`diLGOzr7Cu|^Dd3AZRz;pMG*UL=vL>e&N zSh6tGR2v&~BBF4^n9*^e5fE*}d!WDhA;x1rxi1;)oriurbb2oJd$uW*H77tz-sI^Q z4@J7lEOww6Kc@k{;V;HIqnRLkdA6Jbg_A`tq-pNjaPJeIRz=#`!!rd&LWek=Wk>hz zR(l;atp26trCz>sBBv{B_hS0ZQm%q-<|g9(-)4f!nK|?mq0K{ciZdfz@=f%cZsE_3 zO}$dCsorP2Jg%o)&rYXQ>EAs`rLi#YCGelmIj2Ay9)7!AtWsEHv)ZAal(A?!!<Br+l>%m#>jqD7tBte@=xD(MMHQcJI>aC1ElCW`8}TaUo7>d(1znDR^DlS(405GywRg&D zpUczu-!j=7{%sweOEq;$psn~laV?S%A3p~QMk^39TrP0A`?mq)Gps~joSi%zAO3pT zOTzuC`yL4r){w#XoyyKUf4feSt;Ro#iq@UC3Ad-nVzU69I7#gLz8T56bPg1WW03MT zx-_J6QA?hs_Sw35pJG;tg#44RurE71`xiB}1gqudX{3504!LVfQ=XsBMukEsK0?+g zG0r+N+HuQnK)heQnANq3=D*RfhR8~!CiRIW|2WrV8t;>s(7GALD=YeYd!}65qOPuP zW^QiJy(q8oSN3Y^%!TM5=Evi5k0TUfD?UjSpAJ~mu^(ypCElk2c-U((Uvlsh8((pz_|0siL+I8iMYGr8D7i>yq7PfLi8NRa9+#byvfPmK$v#UN(iEed$ z%%bQH`W&geseA&4N;u_o$5r_V-UfwtfSNizdlRfUcOZl^=cAHsPcxU*vX{ytywtu# zmN(g1{e*w?xRdLAHa9b@v;G~qqH*JWp~6R(d84Fx&;mI`;7rTCgNz_3&KhNZ{6K=5 z^AhBk>HTu=ydH?XaP3g(Bv&)_?xp5pBIq@-Q3uC7Hf zJ?YIJF$3m5fB!Z-`FYaP(bf8Oz1h8l_bcntzh`bBrX4tq_|u>vJWP1Vcdwz>CWB2I z@r6z>gprD-vOdZuE*1Y+KA>TlodJU(H!?ihX7Q(9(d?#kZv*d#s^6Yzid>@<5!7`< z6Yon2bWggjeJT8d5b$kUr_$^#V`E`uCGVG}%ridM z^IhG>q4|#gP?l$Tlw$ORo{XwWL_kDwXxFuCm*0ovI2}B@|3-zi3YvxzNqC&hwyu{^ zMxEoe9HUfcP48_s7ZADD%gy^Hf*o}FVpTGI4)tP^?U9fPf~}5N#9g+SZn36^DxpNs zmZwe7m7pcTi}N+d$98woj#$u99&}78E^F78I~-%>d9lZ6;(HW{h`o~|{l>SwIB9Ni zs3p*ZH=$*Jt7(4Ov?{laLaEMsqq*<-HdCqlkA_X$cdI*RNvdu>74hVYyZ$B=DR}GT zOlLsFK)gbT?8yr?-_3BGoj&w8q`u2#P&OcCSr?EC*%@NVm8OU`U~5-!df(=HD!a#s zI(Gw)&^Wk_Z`3&UiTV0yVVfi&$85L~cVsTI>SQgUmW!3hC;#Z4=yEU5^M{RD9~>;D zf{pl9vrNLYk^Fr_iP4>eVM8o3Lj~#i2y5D?!^Rg@GWnthd6@c5l16~ zL$!;4jVIrRbJj&}&+2JRQjBG}yh%|1AAav2Z>_!_$jDM@pz+&b`J~Iyx8?u>Df|5n zk%W}=@wafY+hfzhc>DG-sKX@pIyA?;)P|WU(0RAJM+1; zduIC(McbKYsOxx!e0THTKEQ}hN0Av%UX>2OvAj>|j;@31(#QwuFhw8lFVp{9gr#uI ztw?!n+GM^9q7|Q)|1O9rV>TJBfPN*^XDBHe=WH>>(wKopgcUR>51*sdB3i@~!Sg-< zv7?bFhWUC)KAA)wgAGe39|c)b0ab6XLlWpu>y{9p-iMhS+fNjwSdgoK1H0vj(iJjLN~-UCs^ z!t2CKXqpntbKW8Qwo9%KAd5eR9O+tu=JLb|eiejkb1N*KT3&985?jqd~Dj<+qWHm2$jgKMMhxXp>Va1g|kvgJ9Tv(bZ(Qj zsTlcC1_lOxQsb-b4=--e_} z%I*76Bnl*Gvi#Ud#kE-U+9_g0hsBbyMPumd+wGGpRyS}H=G$N~r7)wz1B+26Cj~+F zsYHg;W=D&b3m(o=mz5FyOVy1E=Hok?Z>zo0Qpq_K!7yWgQP7Lb@_pN8IZ_~ZOt|+D zTDNq-6fl}calC2hjd4rfgjBYMgjdhgk&sx6tfw(AlPs3cw+bdZC#(PD2zVVGJ{DgW zpLH%Q4Lu3xP`#<%@=~wgO2>25Q^n8sA3Xvc-*4Py!_N9Te?_)sR`VCnu!hkS8q_?e z+x|DB?iJyPA2G=e;FA8M|n!se)MRYR|)mym~Q9S|DK$ti5;DlCMtt7&VA zOU~E-5BJJp2A7PdY4`Lk1*h0}FNMWTZ%<)Az`MM)O8cv0+nMZC{)|HIXT2R{LOl0` z9p%c2v)IDty|e6Nb|Y*}IZ=an1TOJ>C?IO&|Vi1>C_^yyVFNg*TQ z81mmH0eqWnSSNq=(o0z3x&^8}5M4w5s>TRN?w7>Y$zQ!>Nx+k*y?c*}ulMYVadrhw hO8y@^B`b+8%Q)R$3ZEciPJDFks_%aX-&<{NCH=dEbBLap1n!b?)r*{C+-v8R%)y)3Vc&k&)4BK6+?GMh4O$ zBcqt1IS2g3b>F2H_=nubNJE9JdWdtAjEt8|^P#fIOYqhLU5=^g!s+kl>kC)gm%Zgdht&POlmvd7K+VgAl2R?tEopzsQ$`PH zCq+8V(pTPs?w%g)^h#Ad-U=R4Ky3w!9S2QfdgwA{9BLXCUGjqo+kP|XhAA*~Fp)`# zjGXF!eGHgU>B!Ljum1u57Jhn}*RJnf7#Rf(@BjE{7gOp=Q6>N14`u>R-u@ql{(jH} z=7Ja@4XXeBSl(Z)WaP#Sp#Lc^lTuiiQ1AamAYKWBFz!;$i~mzv-moqV1@L2;nUo5u zsUT3OuB~nUhfu}it-2TErMlgxpT`pURNs;=uuFcEl#&9SPUf$#ueW*^ef;xr$XGJ+ zafp)>`Y)wi+M7(R5S(+J>&(Yx?2Y7z(jtunhKMpMgXEOwwtpTp(F+T6THE@5m7L1x zl0l;H@1H%&i6#im1<9Ac?C$z*jPQ2B^4=c>Jbn6f1S)yU$rc@cl-_y5zYzsjM5nff z5Rb^SU3f%AMPJM}dv)_%`Y-(JA(7^`_(!62t2cwd;=HHGqeT z%$%iy|7zb7a*zkBqcK**S)w(?^9D! zTzJH0?WugqS%zlO_87NxR_=TAPoFek1k{HqOod&ikh)bs2BRpU%@B^`kQUp>&0KXp zSX&>JAm-)e@y?Z&3e@(x-$MMx_(v>-OM+-JN@4vSfCg@m(?8Fb^@DG2Z_`6k zg$*n>LAhG+#6A0o&H8b@yByN)7a~e%vxHy%4^8yZGD%cbDz;n?Q+gFf@y5-~t()oO z$LE;rezCS&KICc>y5#os6Xs|%8s6e`?ta+CKhnjwEWocS8*bv1c_}H}YbEJaf9*;A z#rn~*{pD@t@gRc}?mq>;L&GE?N_pooZ*Uv9@S3=}(435e^{dysy6gM)B zy!g*U1#sZUn)&S++^0fo7l0#68dJ|_O-}vRbNfRaxG@k$evNb`Dg`@KXn6~*k@_8m zK;gH4SfB*3Kt;X%O;!rrje?$p`Ys?kagrAl;c;zkZMypU2~MuAl5}ih1M^SJalWp` z=KoAM;j=*UvxK^-|8?uOI8B+3vIyQMkM<6$91i3%9k?Zgm$gGSZ+E z{&@R6a*ERDMFQ8Um}oB7B7`{D*$2i-wBsmV-FS+`_>UE9z91gqN?TfFHk|&4!S>JW z?xDj#!^^~1z;}3AK!+xzBCLQSviOwdrlahOfh%=^doS+#tn1u;-pBm%zw$TlbKvzt zlyy+x^=zIf(cA}tA2X#~SWWmS`qM=30{MGlc@h^{YyMk z68PFZphy-|ZP-}07SrD!&XF0{$-Gmt&-Ib|zl6NP3lvLA3UaDG0g2!v-@2E-f5O>~ zNHw9v{`SM4vFlN6U>efNUjC_M2ema2oQeeFbM^`EnEOzRt@ zj~4?Ox7Ww>N%b@CZQH`8tK-%agoYEB)9cnp$4meUc&W_ z=MUM#C4py_7WOGzRI;NBKYv*uV{s9DH(S|p-eB=IZfQwy|uMf2dHmq{VMB!R)uGpfX&k^ zlUsQWN+|sAlfL|Qc`!%Dr_WMr(ER8x{h&{P>&iNGU;~GtZ=)AZkC#sG>C59xBzm1p zj2Iu|QmUR=);qq-y#4gt=UtH*ni^EqV!yd0c}<<}>= zuXABv_J+ANyj*WvBJNn^>-!V|zjo+mW$hCeh>}~%Onx?7?^L*#XZG3P@b=TM89)Kg;@3{gC@n3G$if4<%nv-;Eb|Fb zSC8TJer?!gCI4vg@T)XyiUz{<)^R5ZS3?Ou?xJLLz4%IeGBV8RgLhE185MWXukPgs zFUJ28%yU$Kd&N+sr!0pC8P`QGJ#RN8hixav$*-jt514lzO}yb?hKLH-IS*ty8xT^m&Z>(eHz@zde0lmX718uB7Q3bS7D3u(aTrJ z4GR$Ew7&SfG97=W$HB9pQnEi-2$0S{o=3qOCxDvkHf|t6^^vb2uY|Ulo=0K+mQ8E@ zlhiR4O?KJRyiy<+c}l#3d`sjD?BCm}8;-_{jp&v}e=)x^m?tlHg>`Ci(Op(nmR49R zCk)o6s%u_wvN6-lU&mvz1Z1$mzEqLHNh=Q>9+`0U_H$?Axi{a>FMvjMGKzfI|H0uF~b=^ud7 z^KN5XG1a>&Y>6D?;}o^iKpmiE8;SD;T7k@e)km`*a*uvpwmi{9TxY&7`mFigx-@ay zBUG;72G7Yd_2B8r@aYYU)=vQqt}}OvsHqkzhq9$>3b>frB^uutl*WwYZn-C5mvx0-4sHaLI@4JRX1Z{u@S!2SqL$6n#Z%HjcE5P%{8sH} z29_qRpOz3burKuh<#aag9+nQaH zu%7p5vY?4TNeg+p*090Wuk>XXlQmg?Y@*_30ws9n5)jekl_JiiJY> z&_epsB`Q)xopo7_FsHzHL2qTUaCRmGCH-)D(Fo%T5s&D>r+`~ryj;?D@FN1OxnkAy{NyhG+zuVdRFw~RhHa#{^^N>#Bg=AESg{S zHu<>i$xk+Q#ydxQtI7IV2>fh+`EV^BfQ+^p-&akC&p1Fa)3;X978@PBApt zd_Q*}y`Hu<+DUj5TGVo$P6oWYZQFUesUOOI%fcafz12U4*?AFTSfsADUxCmT zOim#-g*54V6^ca!NaVimptjr+-;`Bj!LMg~+>=cZ+Pq>QV^(Q#k@GV(mv7=IPHpYz zGY^SrA3X)1I8xYT-y2N`LAo|fn8|2r6RZC&vg}W)uyw?`m3X;xy&wBZe5&FjRg&XF zbK61*{kG?p7>T2V72xMUM`GQkYy#-fFvYBc5f4VuAbYk~j*gBJK%oqByeMg#Urnzi z+!;T$-L%vZYGo5T)~*hF3onbmow`CmL}K`LMN1bK?m5lUq<$~;8J3rB z7NpgdJqQ7skImCmC<)1^i2sm)%a8SwHyVD*f7)z7^c$``+gcRGvl+sWE-7ucHX6Ps z-?jbBuKHF_+>P2%+#5uQh%{s-4T-lpAX8l6!yX}f*~_veE5^shG5IR9cir2L=lEyx z?4RFUV_#dU*i^7WVarz+6`iwzj*UG|2T6=m7i`_^whWd5%HL%8XTqelcq(X+@ zRS+{i*H0H^5A%g7-3qs4%;-2mS9GgZeuke-C|64(m`P_lh=SPb-$oy_*fZabKmd!YBjiK z+0epjHh50Pe*4tVE3G2C<5!gU75Q%{Er^9D53jdd#WE&E$oN`hZCQQ@L?W=0-oV8c za50`XzsdsDja<82nctZLEz1pCiC@BYmq==9_DxSpKoP$|FL%1}OA zW05psQ&)YvIK~rXI<6;6+WKA9j%`8Z@oIypa3fF0dlgAuKEHd|s83CK_1EfzTo;>- zFfa~eQzvno80fh8Lf)~t`^M7O(`w9JNP)I&bNN#+dGeF-NJfP#54xiG5uPJ0f>Z57 zvN4--?6^-Q)80|RdZ>#~hVg!Z;2LIYpK(gY2X=b0o5i9-lr60L+UT|&9zlCQgD}fj zPG{w|AWda)-R`0orGt}fj1Mj6A?jB%LLWJ~6Z8{e=VNP85*TMBhi=216FGr2Ha2Eu z-gRJofRxGikr`T`y>YWEReZCpl6tnhm@Gaxu-K<~-KG-_uQ=ygZ79~Ne%$Sb(DAl? zHp|E#EZTUid(F-g-D36JkO_RCZ?73!bhEj1DQ1C3=&&GakGacdoH6w9b35HDbdHCu z(yO(q78ldbC&@wEu^Y}E>N7_K@^NE*d8si2|JLgAyGqr?$Hp=n53Z&nqgzjDq2Dvb zZ)S*ignH#-gHUqB1OB6&YJGf39dvuFWoeNP&7VRK!Q9=s8=f6tG+KB5^rOeST*fN6e#Zrw)hjrtn6XW2xTJo z5m1z@!E8b}m4OM7E^!Awc^%wi5ZKqJVs+5~1-U{zK`HX=Uo$e0G;cjvgf`JwmruRo z9$C3DVI>_L;<@EwdpsLf@SUymu*c5(q`bH(8&Xia$+2|!l@(!>9P|jfKdKQ4qu&Lw z_|~oCCI6z$K!1I-Bt54`*xF};$Bfn%gKvajXM>J?cwX(gS@mIA9-1et1!*#}Lz=Z^ zr%@>AVjv3C&6mBke}&ZYS)LJHg@EYIFyHI-xr93jJ;k18@(?#Phy-h|9+q`rf{88O zvt{3SX{02)nPS#uLT-yo^fGUW7j7GRN;}qmynxkfg;3&A3cl*ki|vwm9yZLu%`-wT z2oh`>mo+fVO#x3R; zsXp$@?h@LFM?MJ75Ba0oF1gg7lOPX;4~x>Dc^OT|Zrq@BZB;bjn106Q-*7#5{*H5f z5+q542_3?NQ*j&0k)ebquS#jj;jt*687JQ~o0OrYp$6k#wgoFx8};yk%1u4F^;v=M zc;7uuG`k*gF6B}8#IQG3ARM<_8W z3>Fa_qEBNwjqNdleJ*HglY2>-RUv3(F_Q#XPPgvZe=^v>OkaCJa`T*V_jypQq|P^N z11yFPlu1WI2!HBt0T} z{uKy3;iIkW*-(}gedPF(bk;Kz^uEGp`Wr1&m~w8F(Z^-cA?3m5m>gLFJH^HbLRhcUp!1f-C|ckJuAKSG#D{X;H1Nw7 zP4QRrg=4K{Evre|jrXY*f>9WI_=9I2$sKeggj)5F8Ch0ev09jAjbUSuVfQV$xOj9; zI*BA>JgjIhzw1{6*(^zCaHP!Tk;<88$i)wf&{jJOvt=Up_j z-lXAym^;*&XVTKG1nLoYVYh6duVF~_5^C?p+&B>|;1@UMr^aPGf1Fx3tRT0s?ULL% z8FR{6yz(8(hwii(^jkFWlyikA+`!6sH}I2>--!3c1AC}KiUw|pICgk>HpfC&F(Hxb zAW>MO%f#nOi~L2Og$K>y7NG0CD}}?tkDf-lLX^fh{IexpXL2!y()$p{51RSLi6cV^ zFtS`od5iQW#BpSVu~T<`*D=)p zj0Fq?|N7XybOZ4TpZev!J}FR(P>zZn67&yI)8+rUxaz`z@EjX_Y5AQoV|HbfacB1F zXKZe1-d{T>jhZ1-Md(VxZ!ln8-i#8vROm^wvG!s90b9jL;C=+1n8fjRlWljqCP;_) zHn~|{$f5*Wt7|`=vyt__(KnLEDL=N)Z^z8Qnh}1F1(;-)Wa5|;-|3=ZH8NP=&VoF@ zduv&(y>3fSs(VPk-#zS-+_DqjpWoh5T~Z4O@q~Jm9P=K4q7>!TEO)jM zgJ7`=rpHo!j2V3zsE5+cMJ<_9PaYOUyN;R2lQ!VBLu9f;xW{vsEyRWP?+ymLCAYaN zSsbT+e3S?_LvwcvAUu(r%_R8`As$#m;^ z^ScHAZdG2RI*2A?-Bl~(FEWY}8YH9($vFQwwpqHcWV(mwDnn5UuZd9cLSw0K`uV_C zx)7e|bB|*Gnx@taSkyq+nXS}A?P3yUIb;ILVUWA%Lp;=5IKyTmsKN^aYk9Ch{a5*3x}v0& zPi3o*YUx@TQQ)G4>Ene>O6D&{VaNa+<+T<_o}i(Fv>p1~^?84<{$)n_eace6Wmb^- zh;bb8L->9<-~Mbm52u-jr05ePoj1o4sU8f*iddkRA+@hN8-v&+r|R=hepI6$uj3EJ zzVc5w0PZ3tW#>E`3Awt_Q9G{M!rJ;9HQ2Axg>-9bM!6qfRkVZWi+n4AUJux@1^FQ( z7fNdjY^Hx3e-DH884pg~RP18)t10DcWhu>tq(K>{oug%6lY5rFBh;7Q4X{IIiDa*vn_U5 z#&89~eZI|hw@kx`@vC$vVUY6WN@LlQ>Lk8e_b7*}vD0YQ_*fgmzIG5li3n4t+yIXT zvyKOw=kzeFfLDAn1p|lQiSSUAuHE1LP8lxCQoHxViQ%g?ou|ol{p!H3lOMNJZ*|gV zu*&MY=I{|9F4fXCq!g+av1roga)EF8@=!eML)uR>dGR|2&rjW&B6M2~&$r3!QqI~- ze+&)|e)HoQ>_1ol&$!FFTS#3hD;pHljNtVkJewdn%j^+N-@W!KD9^UhG8Ld78 z&K99a_{eO&oI^&0Rx80TS+TP;q-;fr@rnKC0+GOzn6>Q#$^94xejD&9-^97Y-UJ?= z3nHex^LeLROqu}<`tod+v0Bu)KvDBo*7$USBkf{NWc{8oPqw? z6MG!%2<v^j!;7a>iyoM0?g~Y=${W*%qCUW#`eqH!GMrm_u~#N4kcf zBCQWHN%itAyP{HE>v=9x;&r9EBZ#n_^IIEtaKGxu9SC%xgo7GQaM8)d2QBN12Xa?l z`8B?t%L`%DjkES<`#o@wE4+44(pYxNnTnLs8rI6Xt}u1d*4nWNWfJT`d_~QgCmnFM z?DE;Mm#NL1Bd2y zBMb&DVq*Q`xA!ZOC8P_9?Z3-3_f z(anQkH-AVA^Bd})o||U~_S-{xVHauU7Qvb_YqR>7$c;nwmaWG*%gwXpCe*UTyOS$^Lb<`f30Tzf%)|SOfe>b2rgN$>a<3=QwNEVzg{$auKFyx1pti0j@O zy2{o$Y!Jco;h@uB`LWfxDtSA(PV1oGJ;Z}sB>87!Q#X&_KIQVAk3m$9V<+myf@R9m z6Lw6jX1}@tYv9r)hO8vaCpz)1Q!nH!`R^gBC^u7QXeltk3Nmw1+hfjcWK1G*PVaOe zU4q0AXT8XpYFB;j(~K(M7M?8J@v5h#)z?z4OS$HQll`Hp+XoN*JV^VQDjsiX=`+yks@-JS z-c=hgw-dfEA%suS+r4haZe`SDa$9{NL;0#=&Y`WDlk~8NvNf%M#XtuMwXn{ShbAPi z^%T*DtsUKFR)51TB!0jk!8KT)rF_czd26>%q;ji_e?p*D+6~NR=j)N2T-w#y)fxx~ z&PI4b2)(CCrDJ2fcDb>&eQ+sXP;!Uq;}dlLvo6UvZ0nFMS{|-QFr#b4C1lkZ^}dTn zsa5&AE7l0n?BCH@x;(hak=Dl|A-WIb&=cX+3>wU+?Kjklt5v*@H2L_v=)2VxD;Wa0 zaR7VK{oaQiZg4(3%c`lH2eWxDvX`zN8+&f_v{5A9BTSVCZT}=GOa(i^$nOPob3?bA zdSC50NXDh@B)bt8gi20)ECEXfJT&10&zkw*+vq&d4(`x(bfrN*`FK50oV>)$hPbVV4?;RgG%?fRwnQDVo=V-nv z=OoN&tk+NqfJ)%l*TCkxh+n@@_;MZZ(n z@#?s~qWn#``XVd_!PT&qu&jB+zNTl(Jk{quH9C6R?)Ej}1He{oSh4zBQLeT?bOpza zSzd-LdfV0>#U6ZxdCaH3C*V_&|L(zDKp_BrLX8e-)MX+~MQi;~)caDvX!!dNZ%ztw zoAenaT0gwbK8aBeAnAbUSw==V6c?-=#l8ajaFNMsB5|G8^3}G%o?^@1(ljCEQ)BSq zkC;eqt$S;sy{ua~SP4(*%crELt4gpDp%LMII^QGP`>zI7AEm1MWSYvd)B)=)FNv zN@~&Yx#99C&|((_8fxkb5bD+Zmvu?L{P41fQ{YaTeoOL;q4LDp-J6nuRD62$IhSq* zdd7BC9`qXY6-KU*#gF)o4yd@2OL-CJ&-(Vk0MgzZ*iU@b5)%S8&U&T$*r4xsDyGgU zx9xmJbVZrp4xX|Uz>cXOfHX^-X)@qq6EEZ5aM&3Wh*8u;HZOnh-ji9;shh8jC|%t( zKnrynoDQ~Vmr-T@R2xlTcwDG0qB(KuOX>hpG=%L5TVu8IJboZx$>xy^?Ls3GuXD`K zG9eJ9K=}pnwHGIX1C|*IR5~k-6m!6i){p}K(_}+c(=d~Tz=seWS2drO2-?2m>^^X< zPJ90;K75L?b+!M-(0hFX{v!U^$i2Ox^Y5zK-DZP@2UNF+Y6u1 zWlThl4WVGeEO#ZFM#5mbULMT5qDPleUk4X#P4wrU%#NSF1>bTE=L9K9nug8Y72O1j zhb*E+mqRofHqy;<@F+)PeHPHMo3I9>G47R_fr+M%lykhfD7)R`?G6L? zgXYe5rwQZk%c=bWa-6#3i5p9eO-i%dSNu7JmrHzVb)yzvVtm|3EKd*2(*yQ!=4SNS zY1jV3#^T>S>q069r0m+5>+|K-xET#!lV3ht=~|gAUs$4E%PZ=Xpcfjp2pI3>imLRz%waJ})Cn zlsnL9JBhys@AL|lb?OQoxalhpie=U_o*o4}?#ub{F9Tl=io~bG@_OEbf>y6{+uREO)i_zYiu{tHVtUPQunnZjwDBN+ z!wM3NIt`K@T{Xr{$Ga9+Z}-;S@#7^4daHR}XHa0(Q#AdN$nj`nf`{bDMe( zeXY~8*x9AhTFjf$buTvNiIc1>hdSh{cu*O53q8m!O(<$G5ku7bMq2oYYFi`IkkJJ zbci3z<9FWV+_MEyDeM=bjzuP!Ih!h3t1e7n9oZuR!E9eKJy&(DvPWco#Zdn0!g5-Nepc)ZqFcN~ z!-O-T7?_NMr?AZ{3`Iivu0|tE^f`0+$Bg4gfiQC8PQf%V?ItKA`y{W(UYSgEw`@5# zdyFANSUeOgpX(r1^`*LPhaHtzet7R+7RH%pOdgmcW`7R2`$PiYPgLVsg)Z56XHs>yu*m26)(J{!!7x{krCgRF^fMK`-~@ z(!a*!8`b+a+Do6w-E2M(KWeFFgaTlkuxtFlT1}57$5>__B?)>a-n#MLVo7sJSPzQ6 zn|dK5Z8p!zx=8Yp=7Ckr3QzbA`49%v7&aDYKFobT7njo-mi z$DNOUBK)Y(q_5qXU8N-Ox?56ao61M-$_3vpa_UvY_fguq4n%YTtSN8N65b1&9oB6W zw5@Ix346}=bJQPgh-MHPW}`B&Gm-upG8UIJS7$0+K}*)GDP6rI@!KIkc#_lW%SFmY zLjas};|_$dmL9ozUUF*)OXS2$ybsHHJ6U6^XqSJK$?A9Y&a+1&R!SauQoSjV2kwjQ zfw3~mIQu~iydW~V8$CUSV(yv+F2EzQ%_q#Izh`*>3!JT?$g8OPx1MpFMr)93;T|IA zW6;W#ykV@bb`xair6wKDkPuyFS~1q$12M@YoljP zw_OvmgT_^FY`-`_9lRbH_M3;rX~eO!P`yWf4LusqkHA(^n2`D}E{tH-BA_ePM=*`I zjS~)1^(O#~fZc#z{tCMJkqAGRgf64HwIjY21#e^eYe5h7I5>Yl*Stor zp)6lgDwdms8&l~uV4u1PXhcoyq?lUGMqlbe(}xeEXSkEQ=+vUUs)$pPjN;}~vF|=j z@po}(dTnQi080>PZf#p&5USufQn0K+dQgWid8^3YC3XqcIML6UeZu`s6C6uCBE)K< z53&RviC^9dd^jZB7B6+sng@&ocxOuf+_w3NNhy`D26@^ zz3Z#-o`y=N8V}P0I0m0n(x>XqqY^o;%+sVVAr?z)p(bWPBF&CDy3Xf`DAgTPjN^)S)``@4oy(ocLmJm zqRR1PaOBXzkyS3wNy^!}@$yab@tkzFzxR!_)28!&cMEu*l_W2M^MPTXG6LeBx$jw) z9g488!WZ{Em2@auJdBpVzb+hV1Ptj7n?WyVZ*8gznvV^>U$kgdo!r}=68o9r(b*av zK-uaK1)bko#Ty>KZnYmUzeiMXeVNudqK6FGe=IF=)$jUyj)Pk6CoOmvRGD|v=!q!+ z+63n~(@zU*gI7rp((n0z98T_;{+hwR^`wJwxPnHK^dQajat<5O-d|o{RQB(i!{2Hk z#E3el`O6cdgska8$zzIhy|=|Ho9vKy*_bwj=C*$bwm4`l0X>t8^@%!B*aZ4Ay`OE> zF_wc)n1tF|)&7Ni5t{OXRu1OcbF`e>AN$3+LlkI?KalW9AMsy-LCdkwuYJJ)?XF$W6%3Hh~)pIhQ>7cL(nN|W1B-%^2L>%IeP zBY=N;4{{wA$wkiFe^kTPeq%)I)-GR#CJ=>P+dr;};v~uCN%bJ2Pa!xq)EJ z(E`yV$9AmA1k%?aG%&S z?!M7k=9Hp2eNsz0(e#S1D9;|hpC8v22Vrs+>#+vUrb7gTn3!1&v_^QD(4IUtfdnXZ3%6m;}w7kEa zy>#+oRgC$YY0YIhuIM)VS#slGeJPfhp}`%xs`5{1MF;*#s2vDH##bWv@cKKz=u^N< z*xRoDZj&Tz`{wd|6<|U>;83sB1;r)e1^`xJdyw!YSl~_FuM9kFG0=;JQ|N-sbNmAZP<~3SKL^Gt}&)1 z_tO3rEDAPX952Sshg!_X995eHTbYgJ)N9ZG@IOry;>vl`2Q5Rqe< ziQl{Z;*9P(L=KyV*!BHUgIf!=(?o!w6qYH=U9{V_?nAC=Ed%hNtiE7y7t`;l|7$2YV-R+Wp5|gBuaPYXQSUGU{@6? z9J6!ko4|JOXSX|tly3T5-ch`o?)9}{d-%oRR`=AqsbhKWGTJTb1o0of`gUa{zeFW{ z!Q}o27LyIPmk?=g##|mP9n_E(xKNg>7AEg{W!(9Uw7qYF;#6ppuIh{{C{ZWNhXi*B z^NEmv(9052NPq6ujcs~*`oPF1YhqUHgw1PkvNwxQoWZ(-dnpaY9M&MS#-)&BJ24UH zN)?CR!WCgp_=-tqLiTD&_7yJitTHVhT_+xhBa*UR@gQZPW*6xo88CZ>5nis#(tot~ z&`Cj)IQqdH1{!5{G(+sOf;~oAd`BNt`R9p)I486puT7k|<2{IS@F6eV{uY}o+&)hM z-DCPz&U5JSDj385!c;)FyuTEqTaG{ScTG$CK5;b1Eo=PN5V~ru+;%LG`PimuZN$JZ zIDfU8Ek;(OS^OOmqrD^*9P_MP@u-Q1cSJ_@*s#rM@$t6y^YPl?T3WOG`r4&%&ukjNWanYv`D%~RIk4qTp3}qL<*b@hO!I_%#9pUZz#_3YdoLRC6--$62JprZLmv-HfpUXGy9bG0N8Q+Qz5mnck>B45-GE#T4Hz;Iq#IeIWxR&fBJ#w=Mqdr{KfC)QwzU&_GuD#D_zTsmgY z@Y7beic1Tpj%-SzH&a<62L7f3!S%viOeUnxgH*`ErvTEE_e}uH{Fq8u{#AP-iv}Gz z>`UUw&W?K#h(%bYN10Fk?1gJ=F996Gz-C!K_W)&<3;Ap7SlZN1=uqk~t!{9P z2S0O)^8Yula*Y{Ko}Sw-AlVZ@GFFjZ6hMx!3kPU~**&pr{~;9pLJ`7l`%f5D`L;Mu z2tNXTRk#6p6)$w7Fvk729DTPU4rx57DVWP#sqk;2SHV0WVxOp92q5iS0Mz{id3b?> zhMB)W?M=PKJJwJmabu#cuud0V?D>bXPxI64;gHKO0VK=-fIPDYm=`=nyf6rk$NL-K zI>0TEwDLgsA-oK1&*&22!j}M{)#8*5&+H6PWvQttXi`l7&8x}+m#USETh@OA=s%xF z+UjW@C`9~8^U(po)wc_$FOX9uT{NgQQ8tETvhHYN$36`n{uf_23ZPAinw-Je3ZDIk z9#+=}B~P->Z}mT%|MNHv^L4a zO#Vd2Y1){Gl!A(S+}1?KL&V3R~3SqTQ*CxGgeZ?FaD{u=a&HNyny3Jpa*p%$Cc&) zt${?o5FN+Wr;#ODO(xw&qHopyn?SpJW~zj{Er6-G$?4_IM|=T8bzA3V)zxt)0~}O4 zxa7adS_v1B0IB&O`U-z@9$*Yyk-vI9EKJ-0t86T7(RWQhNbw(E0?w45g+O0(vIqsu zLmH&ZNPSsZuOs;_{)mzw`TzQE&e?ZolJ8Q`e4)v5CJjZ=vAuwdUYBza>Ne`Q^oOU> zn4A#<*XF`50YBh%*xa};z+rg-tOPQxP9DYDG&D3^ok+D4vZef|;N(NvyAomg`$uTHxCz?l@u75pySiCbkZ*sD45d zV%o$WrTR~Lb-6hZ3Aou^Bu6+qtHH7terx8%%GWq$QDzBK{Xu8C9ASgm@$sjuqL$1l z#y+WfnRlYVB~n3KhQ9w)cK8;+D;W&oFjl3yBUAt?lw>R?0qhL=8~{aZ7F6B==@PT3 zE$GyM^_2pk79U00;B{u{Ce!7e{{uLPo3$wwpNw*g6zD;LHaHuA-$Y+0JwA(Y7s0oiL z+cnlVBL0pn&MGn?x1#Sf|CI)u_~T&>XUWNb_D#~YFeM#w93I(TNc(IyP;Xm>p34mzkmJ2v*@Ya9pLiSB=!ZT5`Kp1rLO z1Hi+jVH7z4h0Av2vPtM1LGNkE1JsMgDfo7G))$2tAmjg^DG!j*#0>Dk=fv*-qqpyt z9rTUxOEjJr;C(nWZN$OBMc38e(r7tC3*Z6hkwVo-FuZsSgru{ednP3GYkqNYvD3II zm^mk2t5J7CKquS)l3ja7Z!NF{Z4nm1$v7jEDO%zvO+tC`l235?q*;mezus?B1_UeG zl}#%O5CPd9#@aoGq(Lvp`K%Av>6t46;Rot$a{3%iPdw$8ayI$JR9Q^@14w>uzXJy! z&&|xtobCZ!yI1Gx1a&e-6cG`uKeO&S46z;?{qq0%_NsCov}V&}G5%>HJbQ0S?5uAr};S)Nv({+`>hCWGuSaZNn* zHoya>9kR9G3w7bx9?t6oD0Yq0>71o{x$HTD^^Cc_pN9l@8kGEygeiU5Uft|_Rj0k5 zV;FfP90JYF%^ick#&dD3)?mwq^6gPu6)Uf z)iq@7n$=U*2l3U%9e2rluYMy5G5rItEeGkbj~mxH~__n?Ft z4^&C_9~;;T&NXk!0on^d0(ueaO(0({CyQ(o2g#UKTW1E?+Jl|VxBCS^TFgj%a)cPI z*&MT}nDj3$Qw@+~eXg!yHzdH;&~Lw=#ZPBC?CtJb*-4_e+Jx89{w9&Us$3R;VL#ZL zFbo&wIE=UIE&p4yP&KXtFK{&TQy#y(n=fn)d!6U^USU|C^Bg=d90-sbB2*yvUP0q0 zU%zc|nOc|Ef5>(4^y}MAp0&cL%jQ*C)aOJH& z{cx7K<2!J+BzzHXN~1AjRjpb_N`6Fgqxl4&8x;V#XZNI^XJW2@y9&Nr5#VY;m?)ai z^C(z^_l@iKVE}3+1I1-pPHVpj(?ard(j#dV8r9p|sUj8^1%GWHaK_T&-|{GSbZR^{ zS-$SxB{|gb^W5ybtU73v2z+?kKpXNYmO{ksI^zwuwvf^f4Z2+W*c8;Y#ePE!td3*mJAexx&0d2JSM`%kd;Gsrf}w^XkH5 z%^i^;?HCnI1_q>_bo}P{Nl7A%Vpq4SYf2VMu~Qq27@*09^FkZ`Kfb;@9?LfDKX;Vz z$fjuAk*ti&B-|}z@0qgqsBkMX?siG3WLHXd_TIF~-XqZ-a=C6UjyBI#Brv-%k1T z#$JrIhwL?QR}s-fG1Ys-T9I;`ibg|b^K(CUq2r8SGv1WyIUo;3SY*rga&e8hbdn*n zGp=S!C{cqV9h%MGS(l4W0j+aN`K|IJ>m@Kbq4%KRhWcFLtNFh|l#!ar2V&n54`s zm=I1;NhybVTBROv9-y5TuTRa>lJ~zI@phQI?ysX90CC&w*`k0)#H{k-g|LAb8P^ee zbJ+`$B!?4!4&q}GN1a9_`37wrqA1K*`*)Jd+lWk>1f7={|~AltPJc?YD=yy{4|yaGr8<; zX=#~MP$0v@k-$kl;D6V81!B>mY~nV(7lPUMi)|1taRaMHZ-Hye|MZsFHduyKf4Sin zIAJvv2=rBNZ?%qziMiji+yC(2kqmIlps#j*rMr)`K+g<cujd9kg9s6e5ySt-)$LK4re;?YuRUJ?nRSfa>QmU61rnMX$9hoI8bM5HV z$;qxmiWJ0+A6gaBw0Pbe=^h#_Vt zS63#O@uJq&@c#7?Aa0IEn*OLb7nJJXf4R})>ES|x0{$EHU$?MuF$tG;!93@-y=3~7 z@K)dVJ1G+E0>@;mWuVKve%n$w2-dHz(HwX8^pt}Xpg`ZP4s8hfrv32qZ*ewk>wg?q z&9PPf-18nFniw4$qaR+{OEEln_-Th^(bT`P+VB&&>BvlrqQ?CEe2MGM(e<^^dKwI-7r`6!(U$2CNy@mK{;&`4QWsnq&KP81&W5$Zl zd_L0l@nhWRqCT`{AOWD~XfDW2O-*gSo1;kH-S%KOXKtrGx6rlTgn$5lH+j>AK(Si`Zv&gej)f z<$^HL@Pz5$#5rcq>G4TOvtFhI%{`Ek#z3J90IXn~4JM|_FzDJp+wONO^^mhyGv+AB zu*`sBh^lR$^j;sL?gNt(i0^fpzr;c}ICxmRBYPRL?%@To>QF6G&Xj_Zf`V54a5nOH zk`Ed7fcQ6kpqGj{xPKek-vXidr`NdeWG(cl!RVFfnI8MdXyE4Pi1TWcm-tK8HlZ_& zXGujG6qaB8?0881?QK$*`aQa{caQexDN{@G{i>I0ib}Y1P&nmorQA=yt};mjIpK9k z3$6kM7+n#`i17-yg#Z9oTU*=F>hJ_=!XXTz1Ocud<7^`Xg9j~ZFy`m^as8LaUqepl z%vWM6g%zdhdzJt6Y0V4EGT98bZgGr^_^(u*fVz)B^0hKs{QE6W!$?blYvn0J%Cg~E z1V2uYm8J}xe%L7jEdip(Xv8w}^yqa-AZDY(uwW+7Ko!%>@sWL_(vllsb=a>M?wdIm z7pCH`%XJjgf~Fnn9fLeNi#Q!)Y%90bcY7a-lfaneV}!X|uDJy(_T5_B>)AYPmaZynL!Z!^?GZ_GZ^Y;06)2KdFV z1&MyN?0e%cXvhSmwgM%HbKp$EuIPc)xuTji$Oi|o!HcVZss={zJ(Iq3txi zy}k8g9qE@Z^hhi70H4q*p9By|c{Rv%Q7g}X`cvsRvj+3b2j|z$zT4p#xcqh3=Cw-1 zAp~Rvn|7>My!I$$He4lcAuSI_pox5VZ2~9FsS86KUNC98`RK1dy{_VU2zC9PEb#a| zZ_1rZ$&VgAs;N?*j&6#YZdAzPOg|C54&q30MZ(a(Y=~krTCQw{UT+_t{d*AQl<#FH_kug`aplSy*9bGnA`oXPYwsR}w;BOJp-(_wY z85#Q*4u;UzLKHb5qznK0@21}nI@4+97!7<4{M|6xkR|&Q_#ZA=&%1VIG6p7D&-;En zdwpT9sA|XZ8jSJ(gGmDhs`q!QleA*%Q}hxp(TkepUWs?c$3(=CG@uz^=kUw_#yU54 z1G9}dHB~F3gBf=B^tT8aoB>LMY(%AVNik*f7?fL>yuREN*tAwup=&$-SPOzD6>8~s zMM;@@t%n9;e#TPdYrV}x$`*D%nDhKWXrZ`V5i}%ndVlgGy!o%6Y}yBE4lVzBRa3hOFm%i9gvp;>TUqt5;es~^cSMFu)QO{S7N^Vost6(3EdB5 zS_$e%GpbpW2^zh@!dIUaI4c5tP^`!y;9U^dN``45V2B0!QR=f)1G$@e7Uh|^Z4Ra?eJs&NxIdaVjB?aPwO&_ zRm&k9y;;hqJKMU$RHlxy<-`wtj0h>|knj*hsoks`CqI^~RD82K691THuU3rRWYt>W zbfa%&+rCrf=Qmv4k(RYf%(AropIu6a=2w7#lc(M%O`62j7VfwA`6S84kXC@u&lqQ6 zdc+?Xu3Bwbf2oA)gRQn$hiJA#1NU?dQ=o+4Hvlz4G0#2pu`>e73nzXN8NrS+DN4`( z(Gag-|L5x(@P@eoC}LZ=w(`@&_qfnsYDx{!p*%BD@;A}C_2vti4p}&Z#RH6d+ivPH zbC@gco0*x3>c{tqnvOf)Rh-*8couk#HY;j8M3h*crXq@F)>g}>8>Qqvo;ms}s0?dp zX{&#Mq)f!;x&SSwGov)?aTM*=LGW>oKF(zpa+g`L_)m#r_I`Cj$x(2U=@w@$0-X)x zrKjrjDLbKpvC`+{VbE|kt-Yniy0@?I$D;>}e7e)%YWAbZ8*#9tx`Ib0x|T46zW2Bq zxKPf9j?RfHAAmwDPd&vgeVkzMGjGJ8c6rM|r)q~L<+XBoU~_5!uA0pWG|wCw<8I0Ue3l)2?VC=F zw9wZ3yJmS4ucfQDklzq|{KQaH1Ffif4NE2cFfbVpOyY2*Pv_MquI-F z6j)+XFHFY{l)u4qoxYf}05WR}L6;3cHFWZnk3@TIn6p7qGrjn%AUFBH8fxRHv}q2? zTpxw;&Og}jPSSbe!6|+!?vH;n8Fdd|c2r)9?tzZ~qprVSv*|?jH^k-l@gE-9eV;b>(Es;! zP+fq!oU@TE!&T&?g$3d}f;t$qk50+7M}b_I(1=yc=zG24Frnt@hvt@HEl)o-JFxWD zC`hBJXo`)A(e(22y5_3hK|1x6)3BkleRJ*G$uD7L9vK^x99Zlux}?|-G&>GZzor|1 zXbHhu0e$zqt$sPh;E-KJ_YRSR1Oh9>91+3qRLLT>fvRC4y(EY(>fAo8?k-IItmUo= zO;O_i9J7FL4=7SDAjd2OR#I&9u)R;Sq7f7|%o5^JZzvr&fDP}-5R@U+4dU;hnDy*e z10V7(3R({K4XhTTLH0{Kgw8+1(t|4Hip0xnMv7WE04o|xy)A_olQ0g6P{XNMeIU~$ zCINy8)lea90LsLz-_8#N{D}r$xvbkI5B^FP_lN@ar$3xYF~@1}J%$&0D2Bt%au9JiAs%4O zyid*TOi<7Vv>LH0Hnz)9SefliL4vOFM5a-4;zbPb@}FbbvF;|OSr4#g70%Lk{r@RO%*~! z9#a5u)a`XDDN9DUPdK3m<;}Vwm_xLkHtBl9>U1A3e1C)rgNNsUctyM(8o|v|pM{B& zZ71>Fdmy8braFd4yqb0=aOnZA9iE~#fGgcgOw;3{)@LbRnk;KbR+>$<8UPO*1qrC7 ze(m)~f1!OB&^J5@K%vyLYpu`eB41Cdvh^g#h_-z}~qDi8R z+YeDdh~8HzSPJeMja#)hWFe>+x-C=4=qPp@0bbt}=iPdurm^#dnJqUD&x4-*e_>^W zTL8gj&?-cuPwEiKc4~QMrM3zNTGPP5V3srd{%&1<43pBlRkpb-RhzIg^DP3d4ihWu zE?H4gQ6qDoYmbmEBBVtsEDRxd{*hmH6kM{0`TVzgs`aYMi!Jez5O~W#QMD&!YXPiN zm(p3VIizBAh?l~Dnv1G-{_g6bWNC=MjWHh?g}C}m*w<2@ABf=WBxILI6 zaF<~VrMgoLDc?{h@GJfFN*&-f)Y~#oSA7}>S26Sag5*oXA6}U78cPz%ElM% zh=5cuZruSQYmH67P$qUR^$MLYeYHiwE2I9pp%RGbGg;hbew<$Gbt~$}hy4Y4FoONc`203u1j!CJ21<4e_&yTrW9$1% z0lZ;C1c)#iZ|_t*n9_#0OCfFV)9kogw@v_cJz?{HWV}g5S`nsd1tA$~`b_%*ap(k>5j>(l%ZoBtvoQ zlJw<}&ko05-a>!w;&8NMy|IP65lJV=5{)DRkf4}+!6mfilGyl?9@Ae!8yQb<)baj; zGwIuYrkd*TDWHiI)Oc*S4#EO1cjOtHA{0?@CnIFVm1hbFa{5}Q9fSUeI&HT+WAv+h z*2QlWpcIKtE^OC>SvybkcP$2B%k9mFBCwBX@tLR29v+#ai@N8j>-eoUCIP!=aSu?* zet0NKL_f)A7L22ZCm7)DBheEw=x65UVt`^3gtKGQ^N$>C*;H+a8}E_trxKeXEGjF& zqDX%TT7njgUK>Yg`TOwdnbEe;VTnglj$5Xq9MbGlHDfTcR*PT-qAl?~{0%R)H4cUWT?~`;4UqL$N&nqV@cZ`JPN6p-5d}jmZnHOJQjb7Mw;~~pCp+` zxI$9blk^ReC?IKT&=?|rR{VB&XWE5e1l*92aVQ!s1wg|B0+~64X?n=E9~T3wZfmz6 zAa8>%7e+St9PstV0AAO5{88#22%RM$6JN&(9`0u!kI+TrZOmcmNwJZ26pt%U?N&Vl zK1vcI4J-NFJxm~RVF<{)?e`$vcNA_x5H_UGB|~59DCmis87~IE?fvE!$8gxuEvX>~ z4Nk1v=3D>}q|-$;Dqx8OUK+&Ot>coi8|>v5I@l-Zb~2S$*tyMOopsSIKsr?DEVvaZ zR3KHxVtPl=L)zu=fhXHtO{zC(dRELDBc&|BiyWb(Rm3l;hJp~5c)}xG&2ess)P9Xi z?4(mnY-|hkIc-HJ3jUzE{OgXNH-6K}uCMzLT!1v&JVW$o3HnlcROmuH(VOJ6%<|q! z%gP$Zv)kRoO`wkk=4`u9E0exmr)ZE(`Ol=yfK?o_B7VSDYu8l+t#29$qQ@X>z>3kzn9M^IroSPdPgtZ^tHr0Kz_%M{Yv zldJ*O3?iRezg#);X#wcUgDZj2u7>&FUw`NZ3-jqJW6Zbfh3v^aV6&1hFGD>36lIiK zbPcy}CF>C@C+HZWtGzgKbeH=1pW~>&_OF)J34v>k>-|QFuATP2j+e&*6yu<=QWiHVX|v6TA>)QsTw)5^yw`QF%JkT?`~#xw;?dd=$EkKMra z(|7;$_xCRw9T`cn*era9BsUS@N*zjkQhCT?9DcldlP<#dy#N4x$gd*_YEwrGziDC; z>Fn;Fp}G4#2l;&U5y`x;xB?~b^JC7Y3YTg+)CR4j2|Lp?cd(>b5KsvL6;0tvg0}8% z)1obgq;Dkb1664_2c#)vVG#_0je30c?KVX3F8aSb3#f(GyN4CX{K<%9pZ{$GwNz%~ zwTvFl=Yv4mP1T03A`l05*N1|b3x5U3N8o(fF4@V!PUASKq5G^|Iqp2fD`qLIKDhXO zNHhS_jDg%flDOReq?h0g4ms&--6;eb#IU2lK~Yiha1UKnryZOA2t6_dK(_8CR{rZH z{liOc|K`fDBrIYiJ|UP3h7^b8!F8WCKK%33>R{YZfgP(c+d&mi`!v zzErvnbpk{YF0-kmOGSDR7KgxqXbI~h*<(L}o%})He*Ys--jcOC=r`l!1+0h;<%Ndp zu3{m$A@Rj8=_{c3Vaeq>wu=rRSTq3)A0% zx^+ zP3r|*^z9-WXH_6782O*epUY2sn4!Qtv&W7_2b{R=T8 zl|9fUWEdH|Qb}r@|o=7?2W1x(CU%uBS3t7A&!Q{z>x67X|G1MyuE__(EbeDqy zyY;wf-h}n_=!q=~_2t&dkJKCXK5XjSP`CT~b{pW3zNfx(9Of3%j#@!Oc+K`C`UqL@ z?5a-%Db?dWC|uFg#*^IY2Fx4U^2M39;gXwdUV$^AOB0Uxm6u%gcT{&6X>z6pzTKpW z?i+|jl=G>>ZyMJf6%K0&7GQKS7bRZtN!0l_-dv(#xP6TyFYLIMjAaAo-03DI*HyoF zJBG7FjJMKgP4m-eZ5{~puWYDW_2-dL91PxA4`HC?+;S~)P5*k-`%u%5iR1T2BH(h$ zYIS)bDKAed1pR67D|$9OZiv@iJcn^ zZ!9t7H4rw(?D#ajbNX$~Wv!#{n=dxFE>4LSQ3r+3S(JB=NOjzY$dC;Eb?EP_`|Apk z<0#g_gx*CWWyaBbgcExGrKkgPdtY`KWN{^F%Xnn9D2kQ7AD0fi_z6>azEOVI6Ship zQ$bTf*KmL3NgfbcrKNFOD7LJgv)51}%OY~e*sx0=;Ox3R{P1_(4;Ym3!R0)3t6|J) z8iRK&EghyA<*3UBCgQv_k6Ku~c(HnM=;5$9LTS@WG-Z;VW%lq$^t)zN)B|6#DdX{H z|G*>f2Pb8k6?r@YXm3kbUHI)DQFu7K?I9#g=A)cg7=l5^G^U9QQKGZwT-*C;s@KuJ5IRRN^jcy7y$BagLf=2QL16*_yhb-wjM}|K zZU-HF{4-EBkvbjJoCzyye-b>wU)mwt7u9ttpyQv_mb>*l*SCby{BFJQJ-SNFg44|+ zR8iBn$DV5%l9xAW3_MUDccR%>-BTxQ^Yq1UTBkKMvir5c?)4<( zN&R%JD!4vnnWE2oFpseoZep@K7uD9ruyt~ z5$@+5|iRzEg;F@+I->~&|M5Aq(y(9q!lz~X82@3!r@bKs?$ z*C3(hiQHt->7c{?R->|;ce%~^2YgPh>-x5)4n7H}C^9ZUc{eT5H2mM%Dmv|32->0M z-i3@_u5fdIzq=naERVTEBT0J4^HWB=R+p2maymATCdXQ8D_1n%O)RZ|H9%>57_bFx z8F4%*u0t9C!ZM0d)2_%0VnPm$mosa5zPo-I89MKJw@c_!3QO~G7G`$Cw3PTO#hlDS zm%h+%^~qrOKSBMvAlT+8O--E`l^uuW#_r>{Ai8Ft=hbSCd~t(=LC|<`W%+JnKi%Fl z4?e~!_zwHdJqXA)s>|(p@jsMQ1uU6oxI;Ll&|#g z@+|i5D0Bwaor;B8yx(+{S%BPxyMkDkwEG|ZFvaIR#=GP_Hm*=F z(2ZGOONvP=gSLz7lG>MG@p%!V7QNt0wp!PPkQx>U%nV@?jAy^Ik_&&S%=7kb;&UHW z#YdphGFCpcyXKwEnST-&j|cXLNM5?(Q%NLb7pV z?%F>8)`%<%j`X~rhm(8l-52@Cxl>=feVd`Ha0cgTE40E znJ%qU_-N>~O5gn~r5kVZbW{|71X%ehq}xBqzuf{~4MC|JT-l=j;eeC$`mc_sRg_G|*ZMxdj3^LA|=b8JB<0f>XD(8A#3!9Xd_Pn{H4k9s&i3!!y z)wKd0Kj#`~K#p**7hzjobtNAoRrGOIG)ilqH56PIhmx9h@a(kq#g~67m~GotnZ0bU z)06Pw@vRKcUZcxNAzw+#KoSZ6G>QD{{!U1+MU&H4E8 zW3JIlGxGYFoj~>olRLRlMG&k|F|~BAA)ltvTc3&*9fO}H0eQVsk;?W4k;*&_%TWQj zC%?=L9u&ode1^q9*47DqCDnxwuYrSI01=F6x=TJ`waxI{(PoW&(tU;mX4>*UZ}jC# z4ToOuedIoW@3O}2v7xobyr9aZ_>B3*SKa$7wv2mp*$+|czl5U&LimZ`jaz8#6r9=N z+fTS{r0^u=L6%5^}(-O{ftcaB9j5?7(+hZ}u;K zSJ8R#uDbw--11b#%5?wr@z05Oxv2D74Qt!596>J)e{7I*@0C_-NDzhi#)<&Sl^y7O zNh_5E;t;TOp!)ir%>XhZYj{LB{#Yt2aR13D*IvsnJ20vCec88Caemd%|NHct9Iv;& zJkIWaTz2^Gs8wo)g-n)w|M=GVuQIus4%(U?&wLaasNJfD8xTbm6#7DA;1V%ct@tmjL&W9GW#0CxbgC+PLvXq2=UE_ zH`64!OH-m4;)kjW;9ll?pV|(Wp3+`1w}O!^S-2pP%huZ3I>na1_!isz;E%iG;mM7@ zT-%!$f8u{X{cia2jmU^PNX`QtC1<$1OO3`d`U5fIBL9JVMgu;|JbV$}K#=B)lCZu6 zH)F+JapwMGts9%k7^k#c*gQFM2ClU-&BTLUMUZs?+|qY)_N?T34SkNzY;3#){YT~l zRPHt{RY5^Tt6!jX4AZ5=eFdjWvrLBojo#1KckOIXY83;smK+?B)uIa5CW`X$CBzk+ zJxCD%J$M+i|5aHEbYxC953hzh-wHejlrnmCY(dom349Qh%mkl>x<9nXRn6;sQzM&O zH6Az*=Qbm0$`y#0ga=_`p>~>dg`S3r`lZm|O8NJ1-%LF`JPusmBTV`=UNP`uU&J@O zq<}wD4DeP|Cf?!Gt~Znw&J7qw7{<)ZYz`0-iI>u06-N_D8m@MG16|we1Nsc)1spT2 zej@Qm+WEfENb~NO+*j`4ifQQS>D_AY^LHle?CO+Y^fh54HZS3>zyYWfPt#FyX;wb? zRBgBz@(#3MzBbeZ875jR;vc%7!H|f=#W5g9Tf?cFt6H`?H`%Yy&6wyz;Mlr66*@Qc z)DsgILA|ChuDCv9_ZbSx`HH(0_Yk~(!hDbnSb2a$&y z_y(4f8O~fRy4Q)%>io>u$rY_^y$GGE5Q_ZZyje^~>5WG6BS8;}Wt+u{P!3%cs-TzB z%E|+hQc@pG*^-az4NmW>%+E_Y?XmdLs;gq6W~Z-N#+VG>VX!;sE`(0FQCRJ@R@l1b zUO{58)&F4>dblJ#T-PQoE9n6Rd^ZO@m3JwRlkO#QTc=Ikor^Tbx3TdZMNfjn5C7$6 z%S}beN1E~fJ?kC|)|xvyb1xe1z$y`7gv$)(&R)P6-tR}%dbCkJ{-^i(Bxo2(GoEhy zonQ!}3isf3qmEO-t2!@ekEs*Y4QlOBBHAjopSNS0ng}v5^Mj=6jk(*9asccJOz+6( zLPu6<@4M{^AMf-e2i(tOuXlmi0d9xNaW9 zjP9m%WhXK``-gTbA%*sztrFE* zwQw8O^V!ih0by%40g^!9V}j87ASQRH=ga|;@t0rPuFOv1OpqHsQa2wzj< zxTzxbgzFP!>w_2P&nUo^)erEZ!laobVtfI}63CcVrg#C0{+;>bS)`!g;N|{U!PmpQ z;L%&&Fm({SnZ+MjMIZ=r4;w!B{wD(+7?x zsvV-;a~w3c0-&`QL2IigO13=w3wNDD)^Z?91Ra($AD9dG(p%RyX`)|&666GMd@-3a zIBy|a0pgLq7J(l-C3FWZpBEc1v|w3=_u(@s5b$cZ+!A3y61(?^dG%UKGhp=s#hfRDT5Wq%;42K4xP`oJXg!FH;lzo9}$gD zbd}~y@EoX=b?eU7eW!kgv;)VCK$#5v9~YD2HNnf(*9SOzClxw!V>%@3&fw4MKrEV7 z{Dr;@E_{2%v+v$+=Cf4dD5o(08B3{%eul7#yHbgtmvnabd4DZ1o$fuJw`Q##qdmGH zt`I*Lc%nr6pz)Ya)G*1u9=vuq7OeFaDwU|Wgx9-CAM?Bf)*<~97ene}#~r7;eR2AU z?1fs}$#Y-JLh}p#3o=tQHm@E8Nl%PC<$`8JDM1g;HA?%@!4A-#cic(#eBa39Oc-(4 z=Ut-R#6QqU67Zwrqpoqrar%>&>9+*w>vv?U&gC6Kp8POg7n&FDrym4;QvhYppmdP_`hb zqT|5`@#GfJz)WaqdOkz*e~F$h#F)MHM~*A3xoozg$Hd&q5?W9&&U9hISM|kAexeS~ zpavUO8L-rwZT(7ul>d=6NSXs{9h0;}6@_&KJ6iB#+tNlYlVvZoI_L-oFdgg=b8Qo^ z*r=}m1FRNYVA2Or=HurgdSqOPv~hbZK$>!|rlPBE)ufO;Es@b&ni+lIAevlK%RC}E zE)4%Vs;$*IQhwI+2lFp}-m971tMBl3=H}JaXCZ!jkKYVUK3v3j-xPevHkpACKc_N(YV7g^4C!zXjM-r*-7HS2DQ`I#fzel|UO!5?tEI?5~FWwV_YN4E(a z-W?oVlIWU$B{U+VWlDzP{b-+nn-q_blpFnzR`i2@b37Sjqes|)A|JSK zAs;$i6>u2|LhA)rmm5AQvE>Ug%UBG?Jm@S0(jI&vAYrOlnb9EYAi zk)TAFe_Cgnguo}pu;Rr~OE$GhwZ`%{lE$9lQUAb98x{;lwfy^CUw4k}#`c3T6ca#v ze>C8OU=C`A}AAuL#cH=v`3OIvG>KiZ4N&1e@d+=pU zoXYrm@b2Y6E)Wg{pD{bAy#_n!v20Z*nU++%1 zX!qZgEnnW|@L__zgF`7JD@9nCm0&|Z|1jVs13^12_V)+Ovn6j+nEb+;>{vKrzpVow zD)dNhx~}_uyO83V{pR*hOf~vQz=Pl_!3l`j#q1do-ZRQO7%DOGu_SM3Xed!5&zT$` z(1srY!4=9rK#I5t-fYb`4BR)(PJaLSF6Ps7W4<2)i$6Pk25dD4s;nm;P5SprjMd?} z5-n~JoU&l!Dg*q{ooyRi#JB+jxyfak=Mbs|RzANbwsU_XB{rIe*yvi8&P`I;kDyFf zp1mj&c_3i+>!t2LrE-&YA38Jg?^cRRyR*->Ivd;*NdO`4&9qb~a^{Ax|No^^UwjcTY0N(nlH5J^Q%S7#nu5Y9Psq~m<>C{Uu7XW0bnol6y{i~8 zxw|EB-7|TSqtCU}83YZ%{l=(nKYPWw^fNF`0B9B{aaDJ)?@3o*!}Me>I1=uV$!0|h zv!<1@VkY%blo=h}BL5m0gjSQ6+t#Z%Gc6n)VPq597Fuf6by~`;qQNTG955C=*8@H{ zNi^2RXIIyV;E%3FX;<^I6ECy4hUwvJJ2Nc$fX05u@R^&8|7`(57e!${e^|GJLD2Oh zym+y131(EbTATZoRzKSB+2LFG-9y3rYt^`+O<(6)dClXMUo`$ML;Ea#&t9J_<~H)p z4cz=_SV{0VvQMHGh50Zl{mz$Hm7K8=?F_^*5X)w2I58u?YEG4Lx)i{&q9t+HsH8&i zFC~k%43*g?#usfhx>g63Tsl*13S0~e%oR*>eCg#(m)@53@O>}JcQF!n&nw*(*za^9 zWGDVdtltp872|Qn$=&;x?B2A6tL^%#6D8}~Sr2_D+nkv$sbcFf&G)f-pSOCxr+ zbTIEt&R3n@s&tJw+1`x9Cw-3n#zJffR8;v8AgJCWM6uksQXNdS@nh3@2ZyAg@84B< z9WIfa1J(u9JE8OIYf_RxPl)K^=e>sLd2hZc{8lon)2s0lBnP3_BK0HUYBuFfB8C*SJm;ly53!9UJ4auWMeUJ!XtEnA*H$yAzmpTlGLY zptB}IzRY;|0PCODUx+b1?ZeoS^$uas5k2;WE~+|ezpZ9fmjBF%wR>No z&#nyf97C1r(woB+Qx}8soVGg&0F6UKA$4wEo@H*b*Z0PV$jHbSA9OvpQ3o#i``nsw7J8pb8ymQk4QiKeHzbFZHea2O#b8hJNmBAY|W@dCuiXJT;Z##~2uFUNSB6=zns@ z1%`+2QICD{{~3y^m!v=hw;i7T754?zsIcmom@o1W{y~Nvg|bier<;|GQJFy0?6|CN z@2Rg$+~!uN1J=?NUKmXG&RMN-_9UP&nP`Y^ki{1yQ7yNW{AzSDzGAU8zN8m zb78b~O%eOM>tlhdNm2VPrLL43!Re`#4<(TKN8Hm}44tX-vTURj@NS{7W`E;JlCL0~ zL-^0y!LsN_&3<=_e;o4orch`%!8AGl;h;=*`W;X9KPB6X17PKbbm731=ef4$4h|3i z?$66GI@KK_Ug;!ayj(dNTjlgTHS$11eZ8i>$V-ciox)q-@_Bwu$NDIiPq>@VZh~wR zEwW8#zOxZAFq|JUt1b&^EM~^G@9NIdwD@)N%ne;T{W0glcj>&z&2OH#h5o>>Ce6+IcBV7NylpX=T#QIquoe$r6Q})!uE1+M}~7hH(tfTQX%vB#Kg`f zDtnaG`T{Kep|)1Y;<(VkboGbAg&9ozgpj358&RE>g|xo(fu*14mC_YIoqZ_$8ykOU zW$=+gz@qoGQh8Rb*@?Ei#)BWYWo+__j~y@|Qp})81Ifg%r5l_G{@@UnI5UWY35OV; z$&Wo#Q&Yk-y(eFIV~P%y^*V@KI<8s%d9b7Rj7j?G^isvL5s|*2ChpGE3+_DK_D@=k z-nZCuTIIz_w$V%-<3IQCiqR4LkES;PqMvNlW4k+kn_IN6J>}nZYpJOBVM?w?|G^~~ z>iO8py6DManFonJpcoQGhuQxP-)Qq7fs^xp9#c2 ziEW6LvoJO`cKb6vBE)=_oqFcXAdJ?99!Ysknoih&Vc`T>``jqt4M!h;0Fa5PQe3#Rl85~IB?Oq3@R$_P>1t1i2C!6QRTB7UM**7MULgVu1HLhn(19to>pz`Iewm5u`VT+xMdp;dSYO|wXK zaq(ZMclwX-FZ*&!=ZafQV?dfsNl{8 zbWK7?dooM!^<9DIUuGUYP8)85<2itE5LwW93!o~?=_qjNs4K-!j48zc0pll)Yl|0I zlb%kVx&Yq<-Gv+>dbd;lWd2~Xpiw=tY_s}I{g6X zMPztmoS8az>i*0~!d-2*jYm$#CVl(!b6~(c-^u#Ig$ujygg29|StiKG$eiLma$g*7 z8_mx1gH%Nep;b-|t$nWC0m46@HSVFH2yo-AN_f9_o7Qo$=PZthUw=xxk%L+A@F1xFEq-;t5Bhjj zjnWKj`Bf{9jh^zKxXSmh*Q=I=uy|^Ucz}Yu@p|WaSBd~?LW1rM133a`;LZS;&+6w~ zQiqMy{BcyDS?yuyPHAmzSJij5{sny$>aOCgj7R#o#Sf zE+cg38+~^4@pA#GiUBl$Xo{hVxGh!t3$r%p-wUs#YMILCG%Y!os1+k!%dZ{;*Qc89 zJ9Xdi3FNBbo%>tQlg}N8H3Bznz~6-4LI_46ztg+s7vc1`xn=4m0a-BR$u~|_u{D6t z#NqMyahdK?AJ-o(3DRKsRa26IfCl6fTH+8O}wqyx2ju ztpMn1MN9I}akxL1f`|xB0urK7t-jxI87A)IDY4pab9U!u4=|25}9 z!Sp;(qnH%d9iV&>Q9oMs!GM_Kc|uX{a9^Z)vZ{7Aac+|CH+{s==$pR|9#AJM>uZ(PVQ{H>%<2h9+7@fI?3Nt<0M8 zEzM21qG>FT1ZQZbBnOZ1m^}#7*1wQS#EF!t@M~ICN(pc5+2$dO8HXOg0*FRm3m1d+ zm;i=PVT-GA>)La`Xfwn1-SXH&3xbZ#!GIb<&KLpx72UT|841C(7vTW$Z?`#i=h}a; z5pBJuBQGyc<-EHAP6Nx}G#YdJ_CGxG=j_5_PxMjD@omtQ547)`$sZZ^7z2Z{(e{px zeb9wqOLfw>->0)GXl>@78^(YaUZtORwE6b^`}CQdBeNxFG6c4j1L_L-HgP5N9IU-H3+bb_6Z|KHFJs4etsjmY_sLKIqj$n}k+FGI?jAWdP15UHK-7pb0v-SkR;` zFwJ-%ZGKa!aAC?$0jjCL*UJX{7<|nT4Mp#sa>Ark?Gu%IO27<_|5da3`aut}2Ozqx z4TM~91fsD#j>wZg-*%G$S=D-wTfImXR_irK@$r%}+@aa2J_g0+!;$NLKeM_y~Qgvx2b2 zWLS6T*SiCdFu?u11|ig?(3g-71-tpn0!JuD5Ph>?B>djF-^9u{bR}e3mX!)EK#!Q6 zdfI)CFrA(vWyaMG6|aOXy`PoALGqnnk~vt*11cMBeoRJ^g62&W)Vu44sV zp0ekE4Vv;_-+_-pWStiuo@|YfEL-zYy&#dpefWJmzM_C z3Srvo?aK-9Zs3=Mgg;Yikfc4@a~M>fRW}vYoz;9%#GJ(WNWmA`aBrsTbJq>6#=CQ5 zZC>9n^;Yzo$%biP@#ih9oMc@h3baML-t%VxC5!dO`nPm*5M@zhkf zeKZ-GyexCV^78T~K;rMRQsw;XzR)k-2mRPf7vX~Zp*Lsqx38D^I7(JX=s_yp~^ss6gbMQ)#^;%tVu3i$l)ldeLQEm_t zU=)uw-@HOxd+_@32llU;-pTBS+`vVt&PrgK8YuPsxwCt&RRYq|YYYCR_RTs%4w|5v|P*fscuw(K@1 za?^@6*GP9x0=hsmM?&USI@DyXnPTV3R-9;_up8hvHa62qBL_trLv&DIUf)bA;4SkW zWz8RMc^t-&57BvNrX^3vRFC760tgzf9&&j}u5-h0!Rq2sf~T(2yakyk9L790BR;+w z8Z+G>W~Fl3rbSnVgS=t_tZL}hg;ou5GngoOwNv&tSwoPp1ml81+%~01IU$e0%nn7_ znCNI{IKsouoD;MqaP`=BE9JFiuYKzL6YwyesT8_(Qyo;)m-d3RI4K_SM<29-Rx?VX z219;p}a`2n3#M3LwH3ua>|KB(J=$a&hX@7$+P-kSf}$<|AI z5gNPju#a5MKgk`V&TJbQk8dE1(0TvpY=9XE7r5uMAaG^zI8*Qq(uvWc@^6^0#Anhn znGLDtBW~yT}NFHvz{=pU>GaUq)l{~4& z1AKe4f&8f##Fuy95f}px^T70mdqC~U<6xkQib|=>lR2^>sK&#gOtDf1Iq2EWLtof9 z;6Ar4vtT|<7CL0<%apkOc94b8!R76L@*#M<@#17Wqtjy%R#y;SgkKzPpKJ<2((yNLn*EYfpy8YWQmp3zs<*KCO zAM8Bn?IbC7{1N)mtMuoYIAQF^5lV-t&8}Yi$xl_QgvEU0@uDI9n^)jW4GkB&+1ec3 zzLiJVxLKu<5DaYyaTp-nHrCZ0+$rpGRDlv=UC{u8QoQ)Ui6rG}CDj9KtG>{}-(@>d zOm6JZxlrbWb}dX06tEYL?8Ao-8iob!lo1mul-7jZi6>WYt{jesoNf*n+uj^08H%vb zq%$O{?_R2mh8K5%zimIivgQtreWU|f-VG^0B$2EX-m&M{;1xFQY+$<(H||38fG`$k zb7U-u_3s^g1`=o-F5N|vLF{WxOhl>8n_H$=m=664_8ITq|C)u35@`^R{R#?~P5mQ^ zks&xo3zQab^OYuE6=4P~H{r7d9*~3ET$MyPT~z{>_VsTl@?)jMGm=wQ0fLiU~moHedx-_H`wg)vozbs8fpba}kuPO_e``$9BsH}XO8 z)O?j|h}qpSEpiBG#We>O;Dtf5cHf@n-ivbm-Uj1Qn;5yfe`Hndl#%^HMnK`yKo)5s z5eU_FWD^Gc(&a||H)XF1p3?9i!axjnrtKGCRW1NMIOk43fy+Z=JU?wuWTtgeb<8MBo`R|AS7OD3msk@@ zzur+8M|FvSDiDmM0v$@#oMb6+-6%+{xPB9PAHmDP_|=Ysk-~{R$1DDc{1Q&j-fZrV z(hQ)1`VcCHsH{RNN|Od%g)2?AD9-5m!$6F1cH&9Ij73`*9C&>TL|}`M&cYQGH6Hsi z$iV_Z9~o4cZ30At%Ju86#12whH2?D9>xW*Vt3kyo=WVzWKMgYR$iC>l0ZsDe=;|Gt zfe&?cxog1dx4UaMk&fD%Sha%gMN<`(YZk*-DEqTsUa-~}$OpZBpvy+WZ?AP)YWm2F zyvAIUQy1QV4|gE;_`3B>>4dg8==QSLiM-ntR35rRLDna97&Wxua5UvtQe#Y@slVB{ zmE4pAy?!7Fxpij|Un=uClpOU!-n;y6`Hfz?={Vzc4j2|H1X>)!dtQuK zCx${F&Eufe3qJ;`s>tXa zAe)<1GSmqStc30pk3WaDAUpR`SOR+S|M)|{&7w8qJ>+_jIeG!-_#UrFK!^RCP7^vk zW|Ac2z5J0Uzf=xC1kG;&WI4BC4j}_` z(6`VFJT@Fq5((kNLuIK43Qtd_Or6obK&ZFkzFpcWtjrtwRLP^{e8GVfi;d^5b7pA}Wfw{1>kwC5C z8uu%qu9{H(7;xxjnb4hU;m_mE6)J|q}#WWPf=<;ABChsU#2nG}ov`KdqS zYFbMof~K)!m{TwFPgVFq8KFGIiv>(*6O_R#@p+ObJ38-s-~naua=QRx znU!?Aah-rA&@ZVZEC-Y2eOmK`18oT<7q&S_1BwA4-*_=+$C;E-_@|!NybZouu8=oT zf6iy@vmhYf9|kSbDz6);l56l5fgWqDQ7{|EtWuxN0_c+scA#dFitEt=Q-vy(SO(W` z{Elv%|C#yxIWwu?%pi9~HqR4uh@R9+0#+)aSLtYc1dg`3Gl2Pnurr7w(6SrH!)}dy zXywu^)GG&0O+9|>T0j_go^AgAEt?yZAf5FZzt7Kn78wAmBCyz4K;!~4C#LkaZ0Iof z!Rf&}C*bovZihII4XcYx<5dbR0mB|@y34r#_vJdVsGXS4+rJlC3J>fS3>ap9C8nJ# z2owW)wH^+Y2vnnugLd>C-O@5Jq{E2_wECY53@*YaEFSsNKa&!BBu5C}1F;HNEWK|cuu)Y0R5ehr2a6&^JR)Vs*-<`bD% z?XW48iFZnz@4Y@c0~>#5=tBO|jH|Gzyfz@>d|g6h;Na6fil+$XJ1;8x6Dw?bn$;hy zLTZ7YOT~}(JP{MNvSkv40?Oc1mEI4bw$H3?{Kd{{uqgOYM$Y)35Bz_hB8WLpK`bU> z%lkjr;h&-pdjysA!T(+waV^8LK?yUPz&0y2`&!!?rvH21L0#kq@cq=7dIdnB2Aw-r z0MQQySo3g1$P!R#;SNJn{|L&O_VB@GH%mY~EqVL)1_Q3+yC~*AkB2ZFT$*lo4jk;w zM3Hq{(SXBPVb*eKC@_v$v6KU5SM$8^{6ZzjV*R#+Y`PisW&hBbl5>RA{|&1D&nF2a zX1DcexhgyiwkkLPbOu|D2Ua9l{k{ElQXn`v051Y;@T_UH1kWJspED>E7K?a%Q@sj% z4mt55kJ*@+ThyHUt^c-S($~Am1?VhCz@l@8WBejl8z7Iye!Y|c@hW?sSq&BlGLpIn zJ%5h-_4{{^D~VkC6=1tfhdKyZ$d7;Gom3=V0*&R4P2@&30IRS+i~|lo-3CdvH}*{p z@F2#G0Pm>)yi)@u(D1f}=9|+7(IsWDor5$YxQ9pgfcl^hp?>HzI7*^E+5NZgdX7ci zlgIK@ZRtL)hyr zsy@^mo<%EQb&PWK)5tLrEKnl)3BXROTK)m-_yeFz#l>I}Rwdm3kUBc$rpVJw`{d$$ z56Vib!bao2R9VVVULO4okkG5wJLKu3pBTf2u+Vby1JAYJh$u(neue{7F?M8qKq>qj z+JW1K?9GY2~mXW^|DMFr#>>Uazt|6usPw z@#Z7lGW8)NcJf8ru3NVpjhl@92=o8GbFyXDEVu*cf`j>Z*^`pRfDHc7`9_^5G8 zH@N?-xurr^E%c}kcebI?*sk_5H}7%30qvA=AHsEl3Fj#b}Gl>sa9~sVdI;Rd;}8u7dfKM+6OP z(2mtm8Uh_aoy?-?(&B5rp000{Aga(_fGnPg7l)c)UA=DR2=BGkXX*JSRBcBpuqP(K zHWg%+j##5Dw&Hc|Ud3?mPESS8?vTs0m8+DXc<=tqiPLXuTfAjfU{9Hxse2Rp#e1)y zFgG_nV8_y#IZRjo?DWKVMnzfI7+tk@bp6h$TzKPMz;j;H-7;{urCynN+|NH@1NzF; zpWogHobAL9i$<7e#|wagn8d;Cbx(g~0FS*!Z?eo*s^4hq<1OdOz;jhrHVcqx$R)tv zyfb^Eyc7TXm*3))Mj2}Ui_gZKvrjm=&J+c4Q=@2*(5k;?$SjC&d7jAbA znzXUrABget5A84JDz0ii;T=GFoMf;sVUjTq+RuEA&>AP-d)F_W*|#}q_g+LjI@Qh9 zJr7|Zsc?dRT)0W!fl>f2e#ZERIj&+h7ny#(;Bo%2MNh8d-~wyp7J)Qzk0% zQY_>|tx`tsP3X-$R5mKFuL`NVkr1n|{ZM&l^H)1{+8E}Q6gP)6!e7rATCRvf(f&HC_mlMn?jFfWlOv04k;>iew3qfe|9IuO`5=Mq{1%3vt6sI6=*v;U zldREFS1hOS&a<(mBaTP?-M?5Q_->kRVZ#Q;|K?<=5n`Kh9iFS1VUDatwr?vw^*qIv z;@DIi0bV`fgX{UWqM%Yj8y}?5S~tv0?V)AZW+}T1R>?)m5L7f|&SQIZU_(xo;Av(PMKoh&wkBmuRecI4zljg40LK`MK`;2dil;p8=%k# z0#e*+K`4m+PXMAognmc-52y_)0>#c&>iMq;7`h7Y3M{OwLOFS#W9!yz_NUnV4Jr@w zENUt$1uU8Q&ppn3jxFc-M3T>u38joc#P3@`xl|HjYhuDcFU$HWEfrggkXAo3 zR+G$V0bQg-!M3-f8ryJzd`p-@OQd6R>HMETVGY=rmX?-3K@c@k1#>dh$t!B> zbW$iu=SkSkx#ght#ZIN@+^N*gYbN%_z=^lMId34w0l)#_k9Zh-@trJKDjh)k8+^mt zF227<=JL-|^?wJOQjAr&aJKbOddH{AI?*;hfDjZsY5>Q2VuVOx2f@6wP#Fkbx-#LP z?^{j|ytSNYD&P4-j3j1vE)ZU)`zd$cqA1k`lIMZake}y8bNFfNk@i0(5{m}-sjVld z^U)%s8vf2D(};dBq0YC(gJ4TjrTTw`T=DIfiyD3b=d@_-wfR4cQBoYN{Oc!Ri!3k! zj%!(6ofslcA#;)l_ymyO&79K!_VtSl5@3dNFI#f6wf`T2qKt(;SPTlzzb>GlR~wL= zOVD`&S{z<%Z3=%U;nyobbAIpt=Q3;BfoGd{=NBIK*@D?4wn@UmK+udBI>AUXz)F8Q zBMa2RL|*m@ydZa;6!(js_r}=IEyakH-L!N^kwr{{x}Xw}1Rf@G>N1h@gHgIvEPeva zvVDOOL+Sut!Gf6GaHQy=aNoL2A}FebS2O-0X@nuz88nR#KQ2GmGSRUI78g-3VY^!i zKt>}A`cipw1|VpQ9vK58k3cCBUPo|&5S9=ak4NWMfI0*Tl+_uowP6IdwBRylP6izs zBb$AG1T9g~{d)r<>^H-xRG{3@1jJXX*-^Za#*5+1F;D3~%H21t0)#&M(TXDRoo-ny zJB6k3dKH))hy;PA`tzU&R+}?Q6N!B-IkijxX^8VZ=JfCP$@mLp6E=d7w}Ej+G93e% ztg(w`UqhXCzWU8tfHI8V;Q@%hEOKVtgbxd9g1wU8%PtowRNJyR=0Ovg7CwlxHM|0z z0gzItq$iYWXLWQ4THvz&P>3-Xk`lYQ6&Q!ulvaVB%=jCya7%O5+$2Xz zJ4mj|FmmI*HwQ&K3nD|Lpxv*j#b+Ju3dw}`^BLpy3V=%4Y-*Z>;zQ!RK>?*QnyjAY6HAz7Xz4FpnS@3}-Fu-Y0MkBohm^42Cf zQ@22?Z(In581(t{uXa(AkWF0s@C z`|wB>S;}xkzr8uG8tQDL<(hG?maR5 zuj~8$g&WD4fMh216c55fkgbA2f?FB|c8kg$7Dxs=Eys5}&^GD4*m)PA zeyqj#0loJVFijr(U?%GsuHd}AR4BT&KqyPQ8I~iJn5(;~ zP-w@QHj!fF)+6uOU#L)Kw}aBJ`?a%v1d^O|19=ws z_HQqJL<3Hk8AZ3O*?8C@M%~eoH%FyqxT7JZY^^cLSrpLNuQQ;m+?FPEF&_>e>S=40 z!Z362hVzUAj*BR^mI^b;j#igamSd$VcYcj1vWM0OtZLlMnVkb6Y@Pfi=a7+W^tj7c z9*=MQ?Nw@*%Q7+@`xEy2=QT&zq4vo>eU^z$ux2f0MR1ETDRqc}43|!hwMnSU;xo#- z{Gwo2K{7-aJpVRWemoj3;Qr}3zGJ!;tEhWR&9eiSzd)C+mW zDzDowG@jD=iL2(IKNSI{7cR=CtS-Cr3jms!O$0Mm97@gRGB6V<=~;w_8Aw+CeX8U< znZEKo`gy*+a@0;JZXPE=pqqMJIP{yNs2+Xvc)a~DGv+_;NIgW09c4o1BQ-XwTHx3J zoG+wQ06>=Cq^s&^wRe!ielGYAn9i@=gmIvf0n4(INp}&wD+a6*K%P9S;AICXbR;bQ zjuf))wNf1^F%k9$F1a5-W~nC2wF^dI?E60hMgQ0c5-zMO&#Yel<~2DhF!l+jPY1Ds zJpVLXXyvm@a|ltf8W3f^^n0j6Y9o3!WE$WA$mq_d`ZrGZ%UA;A^H0n5youV_GA-&3 z<(;@IbZ6f9P)$#^xrS8woxXb^z07Y>3TA7h6&>TL%$+J#A z<}PM8EJ)L4m79FFt%fli(!zyl%>Cl1iAny#y|xc7PxWp(gd8SZ%n9AHgA~Mns%cP2 zC}4MnkBMrS+%!3X$4D{T?H&Nb|5|#U!dVxCDB;Hj%1d(8MHhWRu~9pBrSA0SA>F3B zxQg2-@(PL_bfh5)?vrfqg}IGz~w~^rnsS zrQHYxC7WD1XVwgqHIWkyrGmE9wTK-nZTq)&%4FDwM~V)#LV?<7j1{@Vn8+$Qlc=)l zJVCB%_kuk3@C0pmH?=5N?KNcX1T2c<(2duk4+4a?(XMB`rMeNkskV8u#a3=8hqgp4 z$_l*nMJtmUFR|EM*Xb%%(n}-1$^3^5Du=Z#fE=YGFM=3~f)BxR49c%a(Yz}|gYA@% z8hK_<_u%(rAK+A3U?7vXOmrXc&At`=NPQ_B0B6=)?I*l)_T@$Z^RG8PdIgyxG+xn53OgwoGu;I)>Isgc73kxp&(+;*dXK0N86#;k=XWC14I%N+wv&`L6

o87YJKcDrix&E;!YmiQR<^NL`C(hDTsd9u7EHbRK$#X z#11lyUHA!%14_7*<2$}zuk5$CnK=z=;*ou1bpNXS5F?ei@aHdWH;ae6;(6u zd^`qp^?zb>GuQ)Kge|Bd97%TNjij7Xk1*@&h`V+PDgH^=nlz9tGXz;FA=tVJ6I=Ij+PmJ5!4S^&2?3 zD{HoH?@!lk5>|{o$LEsDwmLzrH$I*h=}o&{Bv&Q|*2I@S178UPYCwi=PcwzT3U# zDXNB#Xk`Oc?)&to%|vTmttIUYX`0dw`(>ayt`QZatjXo*K1Esk{F-*?-v$Fv0Vq?J zxQt~+pvfyAS*T3N5FRd}7w*Ut?M8aoj%T=9dh_PU;z)VnDB%WJCF5$pZa<4?T+D#3 zq}u&rVdX8CSjq}xSYG=GiH8Z0?HpihUchJSDKkXIzZ&7&0n2JjfP?Muz(cf&xTgu= zJeubyCF1Q4_77niz_1*vAo~rbb{=UNcKs*?WDLD!kJxBg8k5@Ql@m`mV|Ewd91~LyNitC7>9sX*91P=;OYX z2&(elF7;ldE`VfxBH-d;{WcF-L(8uP#=ke4_ziiBcFV@HgnF^AlRi7wi{uzr@!Yn3 zLG?Te_zgy*?$^aY{~=tYDtrW|RuP`Ap`Fy*)ZoH)Xk*dUp277++Ymp~_T^OQ9c+Ul zkLX(I3L-~Cz3kxicvZ7hmsy}}FInimW*sOUgo%pEeDMhy zKS7f`02Ngb))5cJEv0&98@16?OtGXmtZyM{vI2r$kK##t6Z8RFg-ZCbHjyIhJ;A89 zG(A(JUkP#`gPkp{Jxq8B>+yRTpLaN#*@MtQs5KeAhM&hHrgfZ|}Fr7lICJ_rB z*usy27SM^#EQ|ip`0zXL`8Orecx6_}t1t(C}bUazYq^YNfvq2=h{9%qLAoJ+yH!4Vq z0Ur8VBV~?Iq7U#z87mWE)hQ>!#SiKwUQ{bYIQcjwxVpGN>%Ym{qB{T;RMQ!E!sY>a zHl=k&Cl^P_0od}@cET_ehG<>MDp@n!nN0O>1aY>y>u#dv4mrZ^WBmIG@n{BK`yE6~ z&xtlrh85|fO$PH+s4H3GBL`&$Ca*;+>5^+lsPlN{UQ_c80E(U>Anb4y*HNn;D&J)^ zRMx#&3m}2y?tzn};=I=1U>(`~;K*QD<)y;4V2Gc|st!{nm<+9*DluybzFxpWaw{d+ zZnD^Tf-WB#Z`X=SB10P2Ryk)n+Y zLWkXKfr1jdRXtHROn~)KBIBTSJxX(QH!H3j9z5_4IiN&TFWn6{p_8628c6IVnVqp^ z&J5`U1Ih803-xA}>3wBOqI8i`5xA+Fd;k>1VAKQd3c_EdueF>#H>`MKXw=vuCsz5zll@x{R}+5`Cn&2Y>KF#U4Dz|O#yXSHh^WoB6>EztcHcui&Rkh>EC zjgoMdc3t#oBO*16{tr8z|p-vD@aT2dbU9^f%l<^Bdc zqL&qzZHSZgkNY|yKC;U2=o-yhx9i39t^=#LK5YZ;TD36sXqpdV_m7VoAUqo#36^)M zY%SOL1ZVc#pc5>VD@H8ZyM;`2+k|}Pt^GZdin{uKM?tm^BPf%24*QWB<3uC+CW+Ub zvQA_8Iw8ksg7Fp`b1>a3L51Y6UEU#uV<_R*O;+3iz|+$8xThWPuEtsn#VH1Rx#9XS zdHXm!Jmg^f&`AV&LCw%cq{0(Bc&*99kz8dAE8Wim+wIe?1c-owzzd+?9=m9RUd&L% zNT#Zzh1gLjLjgM5Wl9H=JiLPVN>I;!tQGDLUI9{|*62qAaAXZOORcX(?{xp<@}Vgr z=zvQ|by$e^gSMtTN+ZHp{Mq1-2nFQ7PAC*)Mhm1i8!j2O7uZ5Fmf#ufqN%MVyai=n zmL*bQ)!0Qy)t1}&@t)lwpu3$5E)pSF+dh6lRzY4159>Fjco$z%Q(?7-TpjhvY;ZrSui3c(@6dC)j`DhP|BnU_1WeE)6w$OZ z2R3=&Lx`oF_A2ztI9@BTc>%*Ra=~~7W~Ms%<9e;jhuC^(oL(sr5bmm3a^j_HJ`E-C z4(NhXsZuYw!NjO%5s%d%F=gP3espd(eGtI7#}O<9{Y zB5`cs3vvCY0UIL9`ZqmkIMKf~v-OmJ&P0;e(9z0BGsfZiMEj~pEefKeh5q8MPk1uJ zs%ofvzi&e$LC+++l%awVJ)O1Z#24IxeT;w{x<4fP1*?d&Kj#bQV$tZGby+4U#;3Tc z%f;?8_1U(&husSJba7bmRxQes&M%YRc(tKG3y-cobLlZ1Wx;Hoe%YO*L5YFSIH}or z##;4SNKu8HD?8!??jXt3(EG8fCW`D*M?dNA=N;d%&&`jFFQ$uFUL`5iDmiRet^;yl z^L%RIT~+(=rDtkseM;JD_jP)8bjEhv(Kg@;)WFTPJ06v**=KZ0)pjoX(oMsA>5PL` z9?>NT;U3=aaZ}`ZkQGJ*M0hG`6~IQLnKM%mDwt@HgMr|W_>$5LdPQ)Z;FWnG8TK8; z$pzWtZ1%JPhKLo!O52jJ{;40cz%Nef!Fl^ODIU)1VgbrD)o1S;_>mK}CdH3w(qaeEWqwZ))ye?Y1!{hMRc8o69o4wI~MB0PJfJUc=H#?=4 zQ9r2!Ds@ni7^k-7vy4;Uk1^w=ZGIE`U#&(A&^$SyL zr`Gk4>Z{Hs9s_TEs)d7X^uza7(VP`$8G`W)0i1U?0E4r7S4Obt+o%$hSwb*gH^&)HPE_{a(2`qr%FZ8kEM$9pkc?lQXp$uD(J8fpBmeJ<=%8LL5` zZniO*9_55iHqQO{&3Vt1rJ>a(Znn^RM=@J(a+@u3cM_obC4xCIFIx-M`mh3mHIJN0 z9~L{(3y@yj2%}XOU(l`MXDOH6evAvBgyoFG%J>tK5{3tDmclnld2yA=HU=rJIQ_M~ zP>>MdC)nVNS(8sP@6{GaZTmA#lwvf*K73Y{`aE(G&l&BbozZs=^ORu;%XMBPrw$)~ zqSJ(GwF({a3Kh_&2(tG9&sJXr*jucOB~FffI{H1MVBt9O+wVBT!tKn%v{F){;&^@K zFx8>ts^sasLc^)VT!^)7oB=2L`Nu#bv>|6J`X3Mjb{jRgn@97oolC z-t`>&`d6HdXTI&7iQsfiO4`d6#CJWYc{O=9dNiDOc;=yKe7dBt0&$VHAV+N0NpNn_ zsC9u3@T=Opbzp|z3uX;!Id@FFrtu#D-v_Qz4x`IBY?-#7M#8_*M-(l^0K%0tn9+4| zWX6s9Fn<1_bN}}Jn?aIzWuIOTz8yN;!uk|M@zxO1|Cw<8GyG&gHyvE#t<-(zR7Op3 zi*B9chW@3XW_L-$K^~`{Y-F@%@Jrdb8Cau}N}&Sij|}luP+*i|4K&NO%S%9M3?sDZ zZxFMf_q~Dje|;8ej)9`b|T=aS{j=VnuMe7`D*H@2XN!f7P?u;JJ9q zJ zUJ0t~{53KYaG^;)4?Kncn5h|d(uw?Az6Nk&Lh$;*-IvQgUD7bNMF(sYbv4fOLE_mr zx~?_CqgIIDZ^Sg#*q$WxpX=s67c#*5bzi4N`+y$~1*#GVHd6}&L!VTKfgASU zA}4O(5cL7#OX2*Z+#&KH3%S5FLhd{4YD|n|v4l!^EV$%=$1BI^z{E(@S9TxGDO|u2 zHueECs5nOFROr%B7~j$$Jw_ex@ba+w85 zyhEmhCfqpU0|SGs8U|s<8>fK9zC--tMx~t8f8Q(!5*-MHtUEge->0l@dA1|C)}J~( zzCYj$#~0tdMiSPA{91MYJJrwVGT2kR-nj&+rXN49fvt+A;WE&`w9t_{pIy$*#MhWb z2qe#iy)hyXW^{Jv(biM&JN)-uIA4PI(DmBtjbBmuUiW!xJHtz)`Wc)qsW+OHj|F#- zIo;Xa&CtN2S4;FPR~Ge3%F9c{W&Rt7C=h0S^rGZ764||}RmyPj;>8mCA>Oz!O$`l= zjyfG)PbH=z<66B~RJ^8cadq4TizF`C?MAt*&8d<%B6_fy$OHZNgQnQSp3g9V)j)Ah_N)(w%!su54NLsC5)ujBeqiXyw8AQ}p<46&I(&EwKQU6$K$y;KqV{Ir0=UT*5- z2=gVL^)f`z(k??TD2e167-Ez28dU`X{G+L_?>f*I$OBSYoUqe`fz1_qp(}u+K%Q1K zP^hmFrqpp;<$;2p3K>6h5s!JW}djUvxhM+ApQMk2@3zv(Ek7N@4d zYg%se(`ZMmOihvcErJhhW@D3*qBF-&j*}-Fh+?b4+1Opt2lfM^YQ|rGypvP}e2Pt= zjHRKWc?2HM;zWII9hzfYci;L3n3dcEYP(2+YYc(%97XTnYkqhnUH9yO{L`mTUYNCX zP2JZbx;KuNRv(IaACTh{Q%GgWylAzue?hzza3CoVT&n-~)F?TE2bDS&-nkba(o{@; zYpUu9-{dUa;Fk4 zyRorxp-(RsW?2>q`OkGh-ouUO)1CXNBHM7Q;fVI6QsTv^>cJ0PsJgwJ1(-dn+K?R#M01+dbE9?;84JL5oCAI0}Qmu7%dT3Y%`3cq{A zVQegE$&|`VFo7fi9_LZ6?1>eC?yLY-2=r7ro0FxU#~>wNLV&kQrU2PH4b8xw?Th1l znGAXF{vhWh*zlE|W*XkFNdgTR9wM$4q{V zaLX<|#D>vY`ajW$GYT6wJeGTte&yfto?dreh%&5|ZSOn*(- zr9(r#d}Y(zM8(w9G)n|1&zVncng)f3}xZIkGd~Kxb8+(lsnj>1(iHQlB&8-}* z6D*Zm1Iz=w$7y?uJ;885awRNJ)ZcuadVyC_LGt$;Hl{3QctDO+|T^-^;`6NpyR$g zumn_#XF4QM&g)>O)-S2O=@x>q20!|-x|z>u;J=83`uFrctVx#iQ*%B=-MLCk7$`eb zOh`oBJbjru6Oo0Z1~Z~4bdfumXI^$yzg>XsxiWUCLn-q`@jlZ1{e8l`tJ~CIGv;NNSLVs!`6$T^0cJ-H&U^@~Gp8EU z1{-PSp#x@Ze8xly_ijl!7?c;$jaVaHc7uQaX>P`*U2YKM(#|SjZ zjd0)aH?&RE7pT-NZ2WiLuZ))9?i;PM1qyX);neEVtN34JPW1A&He z3WBtGGA$3d2B4XcPp<1HCeNhj%MdU$NPT;9apF^xj?+}qSIcKU&+s)E#$JS_ammO$ zZ+jk%H#T1DDbH%qV9*pAso14D{{@juOv0r_9myxoDf2i!%PsCQi-3RtnYKoDd@eKf zAj=J!Ugb7+BAs~DlQ4N5Bs+F#lEF*YPp!D*ARoN|-X@GPw(qOHZX;zPt3{l~YXm*k z#yGYjw8>i9sC6@0L~UVmz^u-M^g~(LwOhod!%5kZ>mXjqol0{W(Z+Pf>hKOO`F^X* zWwA3P+XpPep6R`vbUP0Gh)lV@-1y?y_g1I+p}1&xIg~R7=0c@AX&X8!@H^wP??uXa z+i@lM=282z#Bbs#K1U0_C!&CY@x00c4@bEi6tIMr;t@Z<7CXgTKb}jnG}34_?XW;5 zUZYrYwdB9}+i){|IeHHkE?2SX^uV*_h&G>C#ypwGIk*8NZOlTBV>IIg!o$e@cVgb9 zRn{sh40wC|l1sVD;a9FHzQ~p@8S5b%Nlne(to`vWM+UPtENV-h3M)!|6o&BdV2@|J z_8o0Nc2=UGAcatsO5&b5djU>Wtv&Zfc>Yy2?M%#xBYP@cf+`JzX8?J;G7TU6~ z?Yl@%(^6B#6RD4Sbwf{xaN9dfF*jCQytwPRkvZzviZlgTPW{PaJ1M#}#aT{?KE~~PN4pJ9olsl3z%eeq8 znbIrE6Nw23I6XGpp%%#J&~4j^zFlR6^_SZ&rfLWwTHENVWQ zyQu!$TxRPi3J!9=YgwIfn3rXabqj_D2H|RXDW5U6_6gp$|F{5Geju3M9i%DW;aaPd zL|9FSUKehn;qL}shSJZr*m5rK|7N89T$fUJ>*L*tg|EqX?`Bh_49c5nWQdW`+i>$y zxVnCyoHP|v@p@&wSr6RK@^(?m>6`W57gt>7HrBSSudEM~{i^kRHE=gaII_Y%?3#&* z4XC2#Np29z`klNx)GP9I62ojXr7)s>M`y*8+F8-(9p9kpgRzshE$v4{@TGjV2BKrHqw#1%(Dd#(70 z+N)Z;nd_P-P3n9|4dM>&alw{a21zR=@PVgiHM%t-NKZ zdZh;pm83H=T&pj8Rwi#?PVuY^w|6&XlAhdm-r6UVp3T$!C6mb6n}Ta6|a zx857bPF6E0dE(`V1D=cTvJAPea@MZ6GrMSSKWl* zjIdxkS{$uJGpgFcUX*CxjmAd-bto&LMuC#zOPbqMt53q9`iM(jhdU^duw@1Ebe2ab zWH!H8-0X!GC`u$R6=Kz(JwM32`4xr_2}Q;$MbpZp>Y-osGf*!inDxS#of~qM@w9Pq zI2lpd7X0~T@i~y&Zz<9q!u&qN_Tyq<>^nb0nZh`PBT}qW5ArC}WLX5>q@|i+p56JN zL@HdrU)fAS4+mnNHb7dHGmvcj@$8NpD4vtjen;sJqj)U?AO!*KuWsF4n`6<*JMqMu ze?|9VGm5%}#av;t@ulK3vz-fLla1|x_g$y97Rl~l7IBUOF3Yqnd^&6izG#Re+Da~ZJ|Q5~y0o+#6-IcONMTJTZ>r2$)T78SWGYtE1|=IpOpX3|gLmEh zU4_GU!m1nNIJL`(tuptDXqDy2qqxW9D9L1(&F4>Ny_8MVQ~0DwUqV9dfWgOAz+b%{ z^|-+GO;|!Jo9qJ=dltEJTwy%k9U?{Y?{5SG8uwCggL2uTtJl0F)TCd=ljg@qgmAQM zdS7Xg_a>QS`>Cu;e!_}UX-=l}S8iyt=9Xv{^L-L#wjcSN8QlKtR%tU9x|F^I6rIvx z{1?`qY(i-@`=L+oZd}vhV-J67BJj->{3$~yGwG0TND0!3M>`DDd?1tU21LgG!53W>J=1A z)5AF&SzfZ}53ov^Cs8-}S#p7nLceQctMasIR^GnE>e*dbF-)1Ax^SpCSojb>j;oNL z>c^W%Ox`PI`wOKz)RNVi#icy#{Yey_e zP61eEgfzbn5hj8cL>jz}$YtN2;s5p#OhKm}zvH|!&(G@I@j_JDnA}b*`SV-fE1S7K zFIm_IP%Ns-Z$Cp0zkPTXkGB(X+2aL2K2FO)_2N)>H>s2B4gU7_HZi~b5>lwlP|C5G z8J^Nx*J9h&H!rXk-=8`t=+>Na+xgGZUG@NU_@@0Q_e?JAL3K&dB$W}SG z9dZ1ao2L=}5Y9hb*AWvDjbUy{8Xw!RmJz zn(c7U-{EJ5@a*y75|N5WICe=qX|tb?zNO-Gsvf*7>x{ct?K&&vucE>?lJAx!WAm_9 z=!$*6R4_5+2cT{hIq^C=%%1xa=QTeaREJL-9!}X-o@9|f8}XmKt|X1Ewx3URBCfw` zQk*1BNS%6q^!jUy+}+6cu}Sv`4MarDY{PJEjAR6f`kMF{UzgyNJ{){Nf_~ao$&I3os)Y!C8Oh#Q!0bb=iX*{f*ZL< z;lO@swa7ueaomBc15@s(^NDy4H+z0`&mMe6Uplp>R#ga)^IP$}Qo(+!JBE@cYa%z&|F$@ZzNlPLtU- zqZNdNB%i<5`(7X4KcT++Vf!1c>e|oiXE($M$r0mpLaiI>>5mcxtlxUPT_2Zyoh7?z zv!HGv7?IBQ?#IxyO46g<(-Xe#zF<>LQoAA4cArhG+yjnl3I8xXDPk<5M}em=lT@^f4{Km{sh&}E9K+?*TgD$*1YZ_dH-%~u|5g$x_~ zQ?;JPQ7gA~JX~Q%g%9Z5Z=LE?jx$WOrchaSXP@Cg)*qPMSnydFy`uK98Db5KW5Td` z^3~fr;2rwQ*XyNk%ng{6MT$6_YSVP6=+g2=h)8@GIRS!ec@ps#rHPk)A3S&fs`r8S zkOA4i+ea)-6m51eC%j%jSqU5>*yj2eNZ+_{kJ@pjd)^8LFd%&A4!)&UM9U+RrRkQO z4D?HGc7qR@O190>e2z_<5-PokX?jGuiq<9U8Pqz{??l$;v^H-y9kD3e2n!1bdRt94 zw7qy(saYEkFbffsC!(j1`~JE03t^G{fK2_?+%L$--BzNP@QN$rX+mu&#l~iRrL0@2 zX_ph1(chNVjA5K6V&89jT)Z^398AEjpBv8jP#84&JPr+-a@*+a-ikJEljIHQ@z&fcDxg$3_G zC851)H925pbRg%!2Y(mIMOr%ZiHL>99XpR7hh8g(6w_vAgd4kLZCOy5L*S>V92kx4 z@AuOu(}D=~H_rtY?oPM%$bQL0gyLEj$Z*}G$+>X}5e5CqAy&@OD;9Af^o@2~XZM&X z2TZ3G=ci5S?M%X-Y=)iLx3z;(pLBPIvfwf;aBYhS$YpeD7C^6?tLDpCu7ln;`XQ1oo==Piw-90Q|df;`tQjN5Ivgc z$sQbBoT%bo)`~3$e=&1&S%>)G-##0FgoFV2x$Ot-O10XfkNl84&~hKX~C9$ z(%!!l04b)-C!o87{j_39Km2uzJm#1gJvG}M&z_(onVuah+VWlS8__m=G!v{^VpMgi z^^=+OzxZ@_n8e1$mf7=D{QlnD#hvJT=38s_%9k9zzRVIScc&#zMh8iJ6qcRj^Jw4^ z5<~e}KJ)(-BflX2c=tLT`O5+GnwGg?^?tiC;&|QOGp9%|Zo=UQUCP)#&y6|2yk&*z(n!UAX znHQItzqAwl7#rX8#H@Wl8`EEGf7Pz&#P*yG+s4VEA!Hq-J+knR~e6DuLAx z1(Lqu`&%lW%OSBJ_{Jt{c8|Oj6bx2=b8ju`AvJLT6izu?{a>h+{W=zEMf_B1Z@_z@ z&@|FR8%B^M6=8H~i6$c9Rt+Z^;+wo7ddbuavwZ)z=oP6qa5b$j6?8)CH^Wtqgqw@~ zOd-qDgg5I$E)%@K!*5=lzFOyhN-8+IBeRe_V$HUFG zH=vB!eqpL9(w1-wBcH^LU^1nVB{Q`316I`>l5wR3=eH_0hLl}9H~fpTYN;Er886M$ zp-b_V_=Vy;F*HL$_*!A~GI_+5-DS_b<9y~1jxb)Er{aziRXBb;Jd8}Z+o4yaqm~Ew zJ-`=+=`6#zpkK|#!8(V|)YiuBhQotT+ugVK2`(|2 zeogYi90+8rPTWMtmszyOcpV*lWHjG(&t`HTymkNi!NbbOO0^z~W0}%V#9l@_W5yqp zt@T`-kdVk`Hw0L__EmwOasLUtV+)68rpI7XzC_Ap1K?FOpX+Gn-Z zCFS~j+4r!vF%7Ahx7_2;*4 zk6~^K?uUYdC2OzNAGIW$K8nkKQoWV!{6ouNzMCQKmf|saaOQ!m&WF+mA70EEZt$zsw9b(9q3EKANDXM`@qKW&KX#qE z0LuPw$7uQ%Ca{3w*@@K;s_9KnVJC%yweih~uPPxU0;Z_;Pr==T{Vy4$kDPx!ZT)G8 zRQP$v_0?8=g@=cMxE<_moc}f^%<}b}FB$g*7|seWNczta*Jz|izK{(gZWfX_npz0! zy(ucCZ)%$B@jq>$r8Kr7xOaVN?A1oNlIB6xM^7zSnUMN|P44dv<&^IZ>DPF41NrJt zK2s-No@)-2uhq&m>N04m^KqkB9O`Q8`bZ$3(%?LJgRd*@^3_v@(3hXzJY5+xOF+@) z!9+f|(0TCC1ao$Y=Cqf&S3UFM(u+Q&*V&s0@t&A8edfu9dR3E7kTsBr>XEpQrVak( zTlopBrlBx;_(lK>?AG&dz6cF%9vGkmkygXd+F~Vu0*Bo8$`Xv`vTVnV zK%li@`@CL!YoYT6<xBzwZ4G@_T*9K=hfH zn{V3)xD$7kt3F$^85?z*jfs#g2iW)BSzKD28tltA@LVLVfjnWCIsEnU!Ht4M+AohJ#mxk#TflB*OWjny-;C2F)?|kw=zs>{X)hTC4Gih z!l@GKU#_k!mgb<`l8_|1PFz&NXQ?z+C?AZeQUtvPtRyV(Ouhdh;YA|D2v8nr!c=#{h^Yj&`l7 zNEW~61;9aP`_+UY;E=*W#y1thN$D(01_a|piq*t@15uZa>(*Tfh}Btwez5!u&k)+p z5-MwHreQJeg;Ck%Z&tATMnO1*&*KCW@&cqqd!y3_*V&8n_u?3qp|`wS!+=r)pkmcjEuY(`TbjXhvPXFh!!Clh#~kWCIqCk z#jn~8-hyDj^r=9sS1s{Hx-brQO*LK51+2+IPGe&eZA-20Ou&+9U{4rwephUt&+9hI zPUzw}SV?JVI6zV4>~|Xd^h-q{+-8X;|99zJSG}P9yYs7njxkJH| zp8GG_g5p_-S=ZKJawj)}yu9PxG)slcNd(V8w(!Jn?EKG3UEYnwFo>|CBrCWFJiY|- zwR`BoNM*^;{n}23-JJUthDvsNL2Ce$?7v zSN=-yZf6GmK!0D3y=f2+A5)IVSyJl2j!iNonPRpeqQAdCHxG|QSYQ9iHZU3+E^eHs zWM*a(>j?-5`0xJ`Cujv$I$RDWNkqD5ySqiB%pb0HiHC+Se2e4^JGd+l;RAsIEr^Fz z8zq8=^%x+TJ3c4}O|c5mT>g+~eqA+f^SA`3Qq1@bu>YI8yA*KfsuWhnSKfzlKui(Y zG_`K0AA#9$lFU5V5Ms^7*F-B_0s?}j*4FKrVx_@MIxJ45wBRpqf^`NRo?q$i&T_@t zy&Q2XfxB{ittpVUP`2xEoFM9878al#q=c1GqO4|rPu`xg0NLFh1hrPn=zZ6_J+Di3 zefx7>cl|3$*%vl$mn9>#-OydWLp-*q<8wsXhb$4?|POgM0J)r7#1%J>7w*^4`3k3;P(5S|EZm@ajyEm( zKi*ijjn)16BnD=QFhj}GWy@EBBu{wyvJ<@_`;1RG|B0I^Iz5(#V-Wi zpWQbGxzW02SGG309%BOJ93?YxvnN1faeFWmbf51)0}d~C1?G$$mao^D|Ha(f)s zWxMi^l}Q?4OVcQjXHIfg52XF>Z!LG_aOpHJ-})O+bnFV%Pr z)B!-nJ%f1*^YrKD>LmZ0(*0cJl!md`y}~5Tp_O7KA|S4XaJ$g})s!;2FY`=~H&A|Q zEUwdKS1TYs$7Q(_wSURUcr?^m)c0}hn9NIaFqE}EZS&{xC-z#RK%jY)${uL=a%V*? zdM}W*E*5KPUE|*eTC6`0h@5H3}Fu(c?MbIAQNO?Zq zm#;Nh#nYgB`P)ENUY-tLbLYX7Py};ChB5yrWQSag>yrt>8Ztcm#~%1ktcUefb}X`>#)jbD}-Jc~Zh+?c0}V4=g;_?T|EomLpb==9Xxf9?7G5F%}s{=EF9 zTlae)MK3{UeDm0XxN&uMZ@pNyHag2~>Q& zOkQrcUAUsV!>^sth~R!O8%h^J@&Efo13I-3fLFAH1EWiS@db9M6)m%{GU;~iUlMn5 z!X<^NDcyhCF}&|=co;-+ch zOe^GPU0By>3ks!OC-}rZGIN6j&g>yOiY>vH?&EeUyDWj=<}?kdwt1z4uYr+n zWX^8z@)n;}ek~O&SU8SSDQRXbcj#uEKb#0QOb~>J0k^_$ZZ;jHj3_GxowUyR)8(E&*V^!`5PWK|y~Kt`q< z*9KAqtl2-~OV<7!&*UQ=O*;H2o#He#3dz2aFT@gU>y#_A6=18B&Z&^G0OT+UabVOM zgbSyM2Z-0_VgDFaNo!H_`STl$-F6wY8tK^yDmi;A#{%3U`v8rPKb;guHh`5=ngTZj z-RFi1>JNiSbb$6qvPk~0DgZ#FZR1IpQK|&^tH0b>ENW@x04bao&-bVl*ZWfGI`zsU z^O&A~xqtZGuPKiZu!*xa3*pdF3aFDy{lLH`r{*zDZ8Sc8G962TjmhN8*MBH^bIdqf zmskt@xH;J&K)JXwz}H7EjdivdcznDNInnv}L}s1I+dW~E%4QOhPgZd}4|#Kg)*`;M z|9_bYjCW>&mU%=G?W=I5ks|iOAlzDc3dNp-r##v_f?ciW$F6SNos=6$zE-K<8aQBA zb=ma-S;l@2x6;C}`Fen)t9a7=xzYSU$Q}=FNh4NrYx)&YOQ@6xz^1}G>bQD3Dqx`) z)+1FpaE-2`x|%%qh#$w+BHH?F`gr0|f!wIWh?DOsy{j9Ai1Dq4uu*G1D$fFy>O6qM zJlH39-S9IqetvoM>@b?Kr@z0Keyp}0;7o==ETHWp!dI^S zpB7+hN~J`^5VQhi3i*6AB-Z{^p-<>%7z-bD;xW33*%y2k&1~aYeI! z8Ks`Ce5U=Cb*Sx8k(--~{Q>@H4X8g|_;V+*B)8-usY+>98nn`$Lh4$wHs2h#&wW4#K$^(yCyv`ovc7SZl*||KfB*E zw{Y5E7jMFrYi^%J;~A0^eRbZ?Gd%01Fga)TYop#_FO@f7o@>Hrb2v1M~3h{X4^1f6e+Av$~uj9Y_Os zPUUmAD=9CJge)#CEhWY;HO?hXgg3yQ%aMw~N1Z#ctdIsS6;E_J4f^$5$4f;>mL3rd zUp3bT*JBL;M27F9It3&ZLF^QVvdPq97c$ieHoIHt5L8$v5)tSk0O=Bf3R{t}-MjAsb9-JX0m z4f}~Q;GqXlC`SQtqT$gsfY=naPvHEAe6rY9B8_qiT_TORupzBU*hhzrhZx zK#L)m>~3tjO(x0JZX{-iV}Fxnm|uv}*RHQ3$yUh;6d}{IT*MzfX;$?oLnAvl_QBck zIf8#y%n%lKsJ`FG)7|WzoSNc!zFJS?Mu=XmBJtI)f%iK#9}Le2?nQt5?fvpf(cOLAUrm&oMoya)gqmrS?8zh*U| zL2D{v@hB{;-##6-u~yN-YA+8l$T>RfvYU<@fd$|9ka}a*RD-B#k{`BmSg!%Q-n;`T zz;@I)r2FllF^E~6zV@M6G_Xty!8v}pT(inT)Dc)B7ggxxw?crYFp^6^=?JF6Uz+s#aQSZp&=mynaacfJuN-v1)xkH z07|SxI^h3@8VrVYL9SIQnYWzmb~H;fTc$4eteDs6v^$)ql<*TEG7bsA065|yry8(E z4nJ>Z-xU^ioY3$P+e!+i#>jgHo*5{Hwd^YdwR%yTIUxAdr#|r%qF2caCRVZpU_LPQ zS5O))=z&FQ_=%hB1GYlWK{}oyxt*u~?saJI9xxgAJq*>8ND^J`PcWTLkP9|D4(!Jn zi$#Ux+-In*ww6}!O5zR3XQ!U)#BU_~Iet{wr6&bZ zz4E#@{K?4a<>LwzQiArXIw2e%@>vkFLoy`k^&+CT2YQxQ@d0`woWG>m`I*;sKJ6nk zv+EjfQ>Q;u)_^=%weJxqDMmzc!|xVhPrH^Fo=XuBmb}7aUBo~RnRZbnx1JRA{HKZ% zGrSMUzxZ2eIU81cGFfuILxwI=*w?FLd?o2yqcCbI0H$VIyRl}}FjdNW-0hTD08ZEE?;}j4j|8*XM6^e%BTo(94`kcGsyiwVe-C&8F_SiTe-i1| z$v8t1mq#D3u+gP4RDA4!KRF*%=&de2m8RBDvJiXs)$Z0K8ecvb*hksvSN z;V3sH(w@-;gX99_cDCOBncBNaxX_pixbuujWam;zqY}xfhh#8GYO1SIBzTbs`Ki0N zQmteC9O6fwJw|zS>b@wTat_BSECZtbzV1w4=iHI)X68N8RHHkD&SLPS9UAp~-IK({ z)jIQ7XTaNJtbsQMB4TSfGGz3G~ob{8ARwR$N?6EWf>ivhAA*nT0(*uA2+j z?E)C4iH69sCdC7pv>pn?5e@&k2%Pt}3J;V7Tg*@sRJX-%ZhKBx(f-ljy zB;G~xO?95mZDPl4-kdSKJY7P}$|wifzCOwg$|UqeEa0Cses*7$YsN1P!g%rWF0xuC zP{aJCYjxR2is9zlKD*v`@V}o+9T5CQL18B&@`3uNpQmbNp6Zm(*v;0`rK18c$YrmwzA4x{zz9ijz zIr@+~!tPlqv=nhfma~YiViQ&J`fYj~1|9NBizn+rT*j-z|4n}Gx(lzl;*i*~;!qPnk$dS7n)dL-5{b3)}}HZ8WIWWOSWoY=PpQrzFV| z*H)Ov*38w5K7k(-xxL30I#CSthA^UlqKaa(T>7OOkvS%00!$Kz8C*2JUt+g1$o>3- zlF}kTJ=cdJg|owYn~#gvS9Ow2$1UcTPS!JR*%#Aq))- z`eha`U#8yf>&ud(PA6}&9O|M~BDY(~a#;NzEMD3iI9BYL3QerY3s%8wCv^M0C7~L_ zCq=_g{aZRCyqLm0#>3^zg~RcYcxXi+b|?%xMu&54Mz$kY!ljR|2lIlQF$ zNM6n&2Aox>%2+$2(IV%;Bo0T3zMzjUQ&kZ>2=c7?$MxK zjz&t!bSox-G$AZiv@7{u$&qf4OZh;TTRW>#}{u0{*cj|Bexr%}hBgE)`*$V2TUMT6mE zs|9Tf*M2}+pryztZI+TeWRC@mU=Oy8aAax8?#6Qz&1cZ~^GjeDa>&Z)r)A4-2wj6l zMNn(iQZaG|B!swTNi0vpWHs|m8jIMDAi;c%NrjR5&(ByoxVTz7e**UA%EBkc*IFp9 z^5*d_vM%we&|ia z6yCk~f%LznwluMKG|^60-CfvIk~`ly1Rr~YJoX4l6q29Hs3i}`71spj$VgUNffO;P zzgcQc&q&yFzaU%?qnO=hLbRyuQ>KZqZY#y44T+Z#vqK*B!ut8(k*l)$u23^CPDi9t zOh>{)%=Ia_KNATU7=O9*x_=)3=n3~ja;Ub!!|@I3Y|SOGC=@Cb0T0ZX&YE7E8QGw% zO(n+z=ic|aD{+PK{&tS4>o$4!LqZ@z4^_N$0yD|GvN=+SE%dc-G`!!fbUjy=rieE2 z2#Yo-#XTv~4lSG9TxQ*gWjxLstrir2C6Le{PlPg~1rgE9D|U?|JJI1W8hfN9F-3bj z6e1vY^69v9i#zmEjfSr0@>7fGf5+@v;@>`_!2B8I7S_yDN@?~@VD657QanW9ysIrc zf!{Pmzl@_1Bgp7FQDXRTVX9pEv)8Nxd7b&I5wEcNB~v9{^F% zkYb{rzbzoLl!?iVvw{Zl7E)?KEPbvNDCu20OIDCOqQy*5mDgP5)ICvCuPulc`84vR;?t88=2|(d;E=GMN`L=cabNF!q*>=7^v|6f?pKEYXor+}0W_h^ zMJ>UPV3@BP=hUXo#Ob<3H+fnWDo__tv96K`rBORh6$2e-BqXM@Cgf=eGv8R|ws4t+pgbS}X+&j&vou3}zZ!6LR&0;Qzh#@M?kB`THHg82yLUbWi>w6?H)T3{bNuojGtUro@6%`nR zr0>k&y1$*e#`yCid`kEuWvOVzn@iNccWcvwB(l-1%7dk-V*k6i>yHUWdyG@xwU6N( z@oH5Une}=Wip@0$uy|+$;lELALTFSoF6=Qs(rZ-{X9@b!qGycy0y4Sb`JH7H6Cru% z>+)F?A-e+_pWS{Eu5}P6{#)2c)FI6KVpLLBy{&DH(Et+m!O_-hgKc;R@rc`D5J2PH zyPfH_?L&kF8{Po?tN%UoyPQMVz#y)uc)n7i89U?;+FCBZ^1mx}vZ26ClWZe8UB~B1 zkU3Rm!zpiXl$m8DeOZ&ptT$6nd;({#c-fJ#+y%(q^Jwoe>B6Zd3`o80@c11B!zFKH zECfu`EZpw$9P<)&i(-!3?f=o~tC-!^uUmHsStDVFHVq&RNMNWevsDkpbC;?A*xisaKVcZuL- zq@M&JN>o*MCe`|%bATQW^LiwNL+s&yD-pk|Pz|;C#r~#F=otBEu+G6KU?Bw z3;8pG8ds~$Kw>)8x2G%ctgXU<;7tyj;4ZUsx#fi+ciDm@Xee`Md|Va*K=MSNN}SX4 zDL^Z?V=*b$$>Y#tUtLG<0(g1}o8bqzopHoEdTk)9$0I6w20Yja;U6R{Zv377=k5HE zWO+Xy^JD542y@gjSt)f@*102&S#RMdiWTN?!K?|HC+ADHZ%kX_mB$a2~CK6u8e4Rd;wdw9&T%-q|!A6jSyLqNKZo3y1}m^^;g8@o`jcvU+s{T z7bbs4pk_Lr&JvDtdk|@lne0z;Ou)z=>B9L*1$oU2-gn_hblARu2UdF)stHnkOhd#eIWdxoo#c;Qrr_)~VuMXd- zqYa;`ljGTX>*{P?jKZt^f$u|!x#d3z212$qtMaaU!?D*2;lJ1|2e;E{b~k9(ZMdov zGW`$Z0V@;-N`Wj7!{JORFcPVB^OM~oQ)dZ+Nf)Yo23QHTyY3Q=-{i?&@?Kn;>tJ>p zAB3`*uyq0)YyY#yswYWZwIz0vZoU%w894Dgp%{G$KgnjHuWOp=XR;eBhd&o^^e6|=^eP}5B2 zV_qnV$>lb-kU5f|r+0pEG~)Q{XN1Usn+%qZ3gsY? zb~p^<=I!m{NI*Dfa$RGh#p%PH)#erQWM{Y7;xB}PT$6@x5^wu|EduJ_kDwJ9 zgNGNprc`ivSmsC?{(&roMUTP^0R;_jM&q311wgLHv4opRk??L=f{0H=3M>F>*yi;{ zn0#{T_;jL;UIwDk>!yNc_~*!2etM^^#yr)BRN|R`st-cY@nV2v-uxF1zrAm^NsyA$ zgLfIBUfiQWsC`rjmiVxV8o#AZy+1+c`DW)Iu`QEwcAyS{QK25jj0g+e_B9Y~{>z}L zRYco7C%5l2HbOK4v)M2}P8~rNH^P#MV$fV?CZNAeX(2?E%|4A6_ewQiVG2vya^wzr z++P@_sb-KzCGnF|f>->KH)HAH0Q6(X92^|5asNnEO1Qy+ijHnt+6@ez0YLCz*?q}t z`jde7ZSS)Gt4K_Rc2{?5sF#<=+b+D%M{q-1yO@NB>wK@nX$t@@mXD6U{Ie^^j?0T~ z0*xTth^_U?#k8~ZFD9>y^j<~ruZi0~#M$qdfyV5EIP?<2m>GD_%7_Upt#DX{^`fgv zHs)AA#K#M}nJ{=fgaoPUJqU)&5%e2LU2zka&09kSONd8Y=hLGB?h=y=8d$c>&*^Ir z!NFpm)ey-=#k$G*ntieb0j!oZzk#>oJ`5fHCqgQzkB<^`k1x-EIPf z5EE_zdI9y~l#c$Vt#XbJK~mWiwj9Co_IlyA*5TD|vB|zqnc27BS&jtN_U<5RL5!m5 zX)bVB^%%vCgTW~(Ot8kPtOHkL2u@C|Z#IwGXx?f3ia^biCV?R2_Xq%d#kv+2umCRy z%!i?wRu;{>6c&?<$G_IxdTrlU_x4J3E3hk^_kulv%yJu3a_L$4MthUF4qq*HFLtk3 zJm`}0V)uic`+v5fiK9sX1c#k7w~QYjttj~lCo)KU$mov$Wzz5O$KVhtQhN=(RsU0} zZ%PcV92Suwhx>dG)=ayH>@aQSU3s zDTZW?|3)h@*WyV~%q`4;dQQ-(10=eWJdH=RaLJ)Z{VNbL=?q=H_K@`e=tIB5C8Q{Z zu)9YT1r0xVv`E{~A`X?ZzrV@#iE|I%8?I+mo27Rg7qJCV;;UVc-_g@Jtll4|*Wi*Q zKI7hRLY$IIHNdHkYd~@E^oAEuiuygw%Vv~b2U^C>z>gLemOwSkQc8;={Kgjugan|5 zAZ>K`MPtPhc8$$pfW5vlb1?sYlT8v%$7F~rcd<-u1W8o+9ya{*IeEqfKGwr@;+BA1 z#43FNCjAtiDpJYbH|UK*bWVJ+HN2zfp07RqVN#BpzZybzN7c5Qy~C5=`<7l_US7G@ zROa%s-TQ8`Or0(lyL85u`wCba$j#COcvJXS5oUiEv7XEU1AKU4>FDUV8P^c~ed|2J zyuDVOY%)0zlFR1;o+7EZJ+H_C>w(Pgx$!3NRoO4-H(w`m)9o1&>v396!oynvB31Fi z!sPj^1$`w4WHN(I;?K2g4wsu<9Q4r)WR3o2)i$Q3z<=Jw|63s#-Vf?8_H7*53w4!2s*<9tA9VY6+3bKuMZ>|=%Q(8Fg!-Jz9?sObLfs+deWWHTE z@HaccVm`q2(0|7CXm=g}RDvo^4p-Ef$rzswe*nJ_1urmQQW)QzFV;5pcC)$h^G#Us z%1J<}U6E zEwdt8k;K^FnkoZFj0+7{J#r&2I}gi6(;2lj5%MYLz0G&8k;>JkuTRK?-6O~FK_8!U zatjeh+8(AVwSB8)E3@<;dnm)>h5??8Ha0+MQnRA14u>zBP}D=b30tYAB4m=?fB?A8 zh^uNaT8M}uVnRL}E2o}?6IR)OBT73tgk$@9qNV9C$Ue*NuzHG*KcUpq&_5D<#nv2E zHpfRqWb8hp4p^>mv}mIhX-Hx&F8KVUX2({aRb*&xNk0Ae5&X{sXPW+fN1{%x-0S1W zu?J3w)4}HCZ?v-gHaJfkK=#HMO-~k~U+C_=kL-g4F8?dgB_Wr%U8xzrQ=AwR9Nw;? z>+y*%Z=)_6=tuyj$0Kazw-bzutUKax`WUCQHXm)K02~^p@xQIH?i;CrT+6LdE2I+g~Ubqz%3$!Z_t29axW++ch^Cp^ypZCW*dwb@`Ed= zDF08LbTG5MzvyD)|059jx5~u96llI5dwp|#*a1DuM?~y{lI_JmB|!r3(^>}L#Br2T zJD4m(WyTx#^S>%t43V6!vlx%4O-iTcNaZKC~8P_mBQq5C-u{okJ@-jJji%YDPecyBdG!X=3f39vZA2V<<2qHEHmyrY_8nI-@3b0|N zJC*&ht@;c=_NVQqS}xZ_x@Qh3UTGWZudlga+{b71<6{LT^j9%QR=w-9#w{P7wtF0o zdtNLSe$CZ0*DW7d%+U!QySczf$6*dZ!5T zzHB7sUrY}W9OKaoyS>oM1d3#l{eN#EWzYArf2Ee<1{G%>%tp_4ytV28{Ea&m3C!(m zU6|U4^{TaaOaHxFm0=+|pq`A6^V=719AaUzi<7zNqKA@0dq7NOi!=EX6Ti3b1;a;9 zm5{coo)H}%##~36TF^5iUqd({(+v-n!khkhV!&G~qREa=1nM^(90u)i{uSqow6E+F zx@*{UTSy!Xpf;ltJM<ipEh2W6&;QBsn@mwoiJT$}&> zuH}8my@^d<7YsbISfY=s@D)d5WsOBA5Wxw9h}^f-;HX|KkN6uMtTnwdv|h$;Y90Pu z)1rAp|JkqfMG*Tr_+I@s9IzFjb=nyO13FVPclAdq)cbrGevgX}etv$`nrBZ%Lqx99 zZjArFZ4~e82(s)rhie$+sN>blQ(Y1R(}x5R=mbKr@DpN}|I-4@$i)=KbCuKEB;^T@ z^uO2qWYGZegCR|<>aAPKCVy`R^c*}~dU3%|bO=I1kcw4Rux2I2=bzb^*KiW)kO#Wc ztuJ_f!xz?Ji5aQk+4xj{j%v1vLDSN6=e!l*3k@H?uI_x$+2O4HJz6d&;aHlU85_AR z{hjrVIyvv_$Sq3lRXWjGS15F=@<3Udax87dSl7D8)h^^~DHk&ELow{bg2T3gS3Gkq z2t2;vOQ*{vnJ=)nJd$TrLMA)y1pT~NAw=?vq zx?}^`&=EK41k8YD0ay>v*Mm}7q@^gpX`%zt)A0cM2JfZC|E=S`auP61Qfu*E>iGAT z1|b2Tp|aJaLcOq`p?qYm@tl=WjEQd04`EW6sIV!>?ZHxN!#!F`+sNKzt?vaji2#iD zZ^?AN4pTe!b)ce4TG-1(tW@InPZ@g=ojA*c*qG!nYN73kriJkr1)@oP$)aSU68EiY zPcPhuZX$6%)ylc^MAp+%cKC^;^s{0pnek2!5hm>IYkpp`#^WWEy!>^mAAo*S%3Et6 zniVn#Y8JgOB*}T<}fI-uapnynyVXQRY!n{xj+hn zQNIl)AunP9VCqe}asaZQ-}5R5@P@i-O(0}K;ra5vCgEL_P6-pwPh5{OD6op7jlk~c zd-A4QKH{>VU-lb>fc{6(eIXdXTKtbWOk9-J zT1)-?Vt+prkhdXHs#@4f$AD5ocW%?a?7hN#<1a9^*b+mXB~Dg42rdh>V*v_9{EJJb z6yMi%V&~i&4IO{d4BFa|z^LhPzR*wYP-2uqr&aa*qKkAK@4;E%CFuJ)p(L(2)c~1p zEZ*FKea;RIot6C!X^06Z)i%yay~8ba5D&b&!=kye>RLeAtnPUA5&8XnU0Jh@Kn)ew z`E2ofVJ;8SFDsP~cu;HRk# zI@+4Dxhy6>0AZYdmpp~|O)j$k0%0@(01&430kmW~jZ%%j7c7s%;ppA=Tr6Y>L+PVo z;X(fG`uY$fr|M_c1?=G;7&K8`pPAwB43U5}(zo}#9+2A&dB3Kf$5$@Ft_wtjo`v5I zxw_Wo=>)Ek!+k1N@)h>V+~A_!@d_c%kZj`4fKPBx!Q+(Gd}FEhkM)hpIf&M#c?}`H(Pd(WEXBt&P}v@HOous*d2Q8!g%|&f3CTdyzeCy1&a(-t zu8LvcwtqwdX%ecHFCrsftt<+KT;4iDQGx#%jHAYY%CM|E9)pG*O7&hblNRUw>G+Ec*FvhZ|u zHJ8a?>(s!%uRE|e^YEADP#sid|%B< z<7*G;?^)DU z?1_jme8g;=h^3@3(QNkvrnx7?SeAx(jEFSz;36+53+sZjdJ)pjx9`Q&DQZN%W$9$h zUi$Y|5sr+2NZ;k_bl>@*^TJJIe!nJm1c9clFeDn_-0^w&MxJ@s5~Vm50PG3p|9FY9jLUk+Ph95EhS>efkA znIk>o8L3fo#YHEi_xZDkRDZ8X`c?}6qgfh5{1Es%Oqz{sZ15w==QLFd4P*_maIj%w zBQPXpmcBEExeX-jRIAQC>defkoYO@P}}iEg)lx2~REzRWaG z$9f%e0cWlfP$?R%W+lC}s-=uLF{xfVhfs2z@${4Z=>mB?ttxe{j<-Kp6S` z)vnwPG$2kHRHIc^?|KgEF=D%_b#S#^5G_gDfN0UVWb!)QTkrqMtJnK{Nm!1~icp5y5s<`MT z$v#LOtM%ZU;OZ3qf-jvHj0}zAE?kZ+(2yTVqDiFw)pITf$OTo$if4}wK7Y%mt`yIo zPUMmI|AHEiN*a~bpHYl!g*yp~e2me(Xo~<(y^3(~!;O8xzJSzrVe4Dj16sd0CN;wN z$eZRyYj&0=lMV8Kk3HZfrAlIC-OO9h=(@h%@X2GgQ_X`0)TX#UN?M*SNqC8Hg8d3x zbG>Uy_=`$yfixh;tzcr=^R-s2a&H9%1(D$eKnv6c3Qtk8!xxGrfPW*Ciu2u;w7{Z4 zv%e4CDETo?BTtNvr&Uhtn19no7V26>Zb0!KxO21nlgAPh^VS?@uVRu?G3I zdkrrBqKSi}!ID;fA^@BLtgS6}M#B+!c`wp95nrhkiTASvM0wJe`yHNu{i{EJy1qKL zP_h75i#QFen&lf}ATu(uNJ>~sYozs}>4l-G%{;w&WqUBc`vVy3~j^>B{9Vf8k#KeFXicLp$&yE zsq$`#Yo=7GYd-sq*=r~3&AaVvkxyeZ(YU3@IXmA_Q#p3`$%VmzuO(vPE_S>Nj}mWK z2xM(4cXcMxPJ};iJIy~%#@10tbxrP}czAM??;li|>jxfy)M;7nzU`f%M9VY})xp=q zjgxo|XvVT*Snv{o6}RUI{##4gd;NIuhn*PN5VN;KYo}MMwj`$S2Au74>|#c~-S$*F z+v-MFb9Ie{*SfB^_#K$()b$(7CA~V|8W77NhLZ7Mo&Jov&h>>hrpeAFK(d}7?f<>0 zFE+i5aR$Fcd+^L|E3asCF>p|PF2XEO&1ip!f`Gc0`ARE-mb9VNvEiX3sOu;iUWlU5 zJ_Y!vntoD>CR7CCVOkaVC0^Ci>S2l3K1?I!lwk1NI8VnpqjHs9WLmSs2&GBulqQ&a zTeU{g1dlq4g-WN&Bo_Q>zgs@V%+tSle%LZ-Z%N7)`0VriqFy<3ZQuE`w}Um?ml9G< zKs}m3H;Yw*(f6g^i;pjerl04nAFnJsLhc9cCMKqUd!??&n=j2(H@xa|vL?Q0n+YU@DC0MRerYn1GP ze8>M9snqWkXI;xGNoHv58_Hd*A0LU*s=9{Y*`}6*7>v(Kta}NwN4~&HTbs`$14RLE zvF)-DDy#97O6v2@&*}D3jw_M0p%0Lr>=QFTO9@SvJpk_r^)1U{#mwG`c+K?GM3W61 zv;Zutaw1;Yp9vhM^F7%+*bD{k0n$L2YG)Bn9S}J%U8I1wtD*yZ4}N-VF#>w4NmvFf zYGC4>$%7W4FjQU zp&rP{=zD>Ng)f;Y{Ywn!>TK|^EFJKni_E(w%$8ewrm50+BX@i(!1Zp^<3o%Kn zZ0S(0X4+#E)3Z{-)3ejstBZw|S1~PW-57pzBF6A>awoHvjb3M$kx~18580brZkDK7 zW=(wQrePZdll|p93sv?^TogOS=ASzpz9fW!U%1+y$RgVR>O&TB4t-qmT| zgsFJJ3f~^E-Q>Ikog-r4<5!fi-T@(@N@1Y=+gI#?x6?%f*5~s}*0Ze5s|yC?7FREn zX_cRZo>G^>ayTD6>(&>=w@(dYKq#`?S)6l2aQi8P*rkbYHIJWSTu4z91#*8X|eyUngoVmDg z5CN0r-w)lC)adYem_=mZw??je3)QsAmyC8Y8HyQ@SJdd6cJO;lW}xeXMr!kXrJx*M z3jCP7KqLMPz9c;pjECnISzWk?9J5p}m$@%3rn2>Sfnsw^vU;Y7tOgTHKzWJ+ zk^S`W0Mje&9e;qr62I~3dqzfu-=ETj1bRO~Zlo$Z++g`%(w>SB^1d-8*yxDE`(!nj zGBhfz3DcN*qD{@H*#-K=ZDHVig2UxZy8<_qXn%fOMREEgTc3a!Wy}?$2M;_f*{$k= zm27t9of}wCg#T(kFAntF_fS$(4^n1uT7Q2E8F34i80krLmp%8U9X?Q;n>pRRdK;Ik zO3h_0Qgc0YRgx_!Shwn^5h+WeC-z5Bcr)1>-Q`YpzH!)brBM8$&fM-pCK9qOq}=Qv{N%}k3^>y> zuQW#X^8JY_O$yWY4VI!?X3>_k#W?s}9fq6=2NJE_-0wR9vK9aEMgOLaO@KdbWUZJmB$J=Bb9fD4W zUTsOV_&NGS{LTtTAZpX6lapkvQss&Ot5{wcK?{Ed5n0HGks^|0+D+#`z1lyHE^vDgJroQxTrQD~8}%G=#4v;jLV;p~Tbr;r9juAaEhpmxvUTo? zuxv?EDv{cAAk;Jwc(l&3Yh=PJ1_W(V=^nhGpB;oTU-4-0)@?}DQr7r~g>ThD12fQ|_3*hE0> z@^x~1IGuCpE@;G*tlq&(Dx_3V=&9l!3W}CVK=b5is}GIC>leD!f}(8uT^3zfOR=p} zZHC3F7WmP;0!@Zl5D;MZxza#sX!A5WG=1cPw(lbsD5JSFArYS)dFx8(jUKG!d4)P9 zG|%7x$QbyfkXAza#f-#ZJe3Sf@Sb>$BZSnfnAiP#ty(dy5xU#ucI@sjvQWvw1w=?O zD?%v~v1*p@A;n)V(c}1SEQvKAc&rA!fhMDzi0X&?l03T$@}JE9e!ToLKNx9{XMS_q zg3=>DQI=%%Gs9F$o5g!<26^duU71oukY=Y4kv#jU1KmvnCM0ha0; zPph9*BwgjtGs~g_l|6#9{1?hAe+`K)Hh(X6@q6PdkchqsDNT!6BMrHJ8ZvmZKAGke zg$X5It(9a+UD&Ak1DtL~&UsZtKxHW!1ZOHh5wH%A6qHC-#qI9Dx8Ky;exq3qYNfBJQ2vHk&~8=TU^wlShPmo-e~HtwL+0K`r?(cu2m;V3XSZB?NkLTc(EQ# zoYxqc?MJKmWqh>y;M20T(YZDvU+2}DyVGT&5@=B)VPb zskDF~utB<|rMp2|I;6Y1ySr0KX=#v>ZjhGl?r!{sbMC$0z2|t=`m^?8?ZtAt=Qs1t zJ8wMmJTGE1X`9G8p1rVr#RQ!y_nMvJGfbWI$zsNKH- zOub4hxLU+wv0rnFze_65)>d77zbZ-_oi@310a|_-t}rW~J!qqz5E@lB(525Dae#h%fQe zX9Wg(+O)W%Nf*m2D+-D30s_7QLPEL9NWPwL^?JX+XiS1CD}WY$xo7TsX&Mom;KA_; zS{gP($b@rAbIM^mc^7|DEf`&5=jqLKs?YJsdd2a;N!8l$bq=o7(Vm~MZs)YzvdCe- zso4PyJBen=iv%OuvG{;}5%$68w2jpq<(svh`re*)JSK*n-gCCrTb$5Ys_x-;HJY48 z3JeYnFD+bbvebe^yRDnFZa=saRkc=*hFnDZBd56EUm4IUhn*#$ptuPIW0FyL-LHp? zsRa9T-(M1ieRv@T1Pm0B&|N7MdXFx`rvxC`e1XA1K{7~pCN3X^1qFl7>Ey`9J15$^ zN6<(_7M~zxr=SWTJL<%zF-yX49`hU5Nkam82ztmuG3QMV9s3HjocAU**KXMlvimBM zanXoS(%1_RmxC!4MViXP5tfNH8pskzq!qnU1@H$}B%L~dynDl;^)%x!rH9WmS@TZB z(SU%wEpTOA7u*9W*J^>9aXjP`REdGXbL82tU3nq5?zO{1+pzs>Lc?S-@CbV{R3@~; zC3#8zxR17(N{`{Ehuq6GfxJU_zd%DO$CvZps!(^BMy(UmJ#NhEs$J{k)fzq=D->jN z2^qQw0AIBMNGGC8ZH?eOR#DDs^jRaUr1FjwrNp9lOeVT>q`4Jrcv1+5I7xYdVhNg5 z)G;B1YByIacs36PNsMQ2dONvAGH3+fgX(H$BJR+B@69KDNpfx%`{W%B@iZ@v7P22N zpOo4LRy8{3$KsQsR4?R)#p2Sp!daX0J@~wNQBUuDhyB8?Vt6(V3+yHh41vsme5Vmr@?pbLajtAt_DVyrL1}xB2{(T1^8i{(X}eihF9@!O z(9pX&S<5X)`Or$N8ifI(LczS_0~5pUggCdx`yESgnWEf&Pi4*QcERJ<)C59= zoFqqwjg(`$GvaRcn>cQir4}+@`PCj$1xIRMOvlVCrNZ2HoIqBwz$7*~3_~GLM@_|CIn zLPyGG{j3-9y+br+wfAB{{NDe$LRpTdN|iu#v@AR?wMzCo;Xt=5d=L50N6C}+M@pIs zc%Mawhpq1iSmF%f3Tq9c-gLBgZ;__I*S=xZETvX2C<*YsSfCYjIhTN|pH$V!RMV_i zq>)a_G*|oH>v~^@ZaUmL0Y9JL{ZU2l#-0gjPY8a*kYI>n#GdDr>h^L4bv`%j`^U!@fr zbgt2c!^BXi(fAEsn7Qv0HZxY8Xk_2XzG_I%B6D1hmS;xqifU$L^H}i*ktTHqv3V6s z0g9JHv>;rfxOeC3T|pmTgYdZ@NlYA!i8N;gjq?dOJWF7jJHiLYx0xZNo$Xne@ST6R7?-dvo{jheow z>3)^Narw!mu+pIOCq+iq=E!)h$q2=zRG|d9@J5;04D;39#lrZ5SW9(TR3P8HvN$Ye z2{&kqcZo0e5iFP;vlQmbJB*?ZZ&fYwp;(Z45ghg?ls1Nx@SI_XX-~HD#Ja~3S3ctP z-e4?CfMBm-1CJQ`<9&A*EaiPK<+5s~uiD)jCUI`TdH2AVoxWjn>`6F_h6)J87;)|C zhmzD}pU%t=&F>+Z>u8R1=3EOrQaPl_NC{S4&%W9Y~X7EtS{K?0O3;73+A|EJ(uwhV2N>~jEP#vIf5DBNxFG>ct;!~?u$A-UV7qhgqv`Kb~X z{-zoKMwo=dxYDTCg&M+i7DBPSyph)+%*0DV!zk~!YRrc} zO@OHlxvlqsk9)^2B?U|6jKsKV;%f+X1pVysI15=fu`~mEI`0t~Y)W!VN{QjYd@rS< z1+p*W0jFfF;N?xu5O&((G-SPdnVDWa;d1bYW$X zL8;Dg3xLK)PSru}RN?V0*v?8rY}$u(ugf+AZ_6&c1=K^NuQDt=G}o}<>Fs{&M|*Rnp^<2qBXyGjL_!%I2hLUadvNnK8sQ&c{gL7jd)NRSzNwS=x~v5 zTDTre6Z5s}B+Ob_Et9q<*OsfuONo;^fkhNirP{hOa#_1@BWG)uavc*3%3rYHpk9f` z18aF+&;4QEiLs;NnfLZ4v9MlmGORK~QP~((3hH)J6!v7k)ADkn=CfJIfCYK7e}{L% zLFB78mY?5z4^=6;H7U^hXdw*P{6@7Z2Q>m#A z`NVg6nqv?nr9G~z#ip7!%=A+G$!A|*nXk9S0NSM>&2L{jjBP}KsM(X=j*1TUPF*3 z^(SP+f`a3C8`XvEr?HD2EYcEEZh5g^otmja!|&MBvho7bNM`60>8eD<1_5NK(UAmK z2eobMb^5zcAnnMW1QS>xY`kRNo4T|Zk2qE*SxT2+Lsqj zR&{$c^TOpcijoFeiiRNZtvl!u9>4Bb2Iv4Il2R)NBL_Vegq*Ygx5IF4-Gr%d)a3vm7WKf>?{Z17yNKj zOW{@j7U&W7syd&?&~Kya{+noch+JzSKD;Q~Qe5#new2nW`ZQ(InaD?Qk)AaupR~5k z*tb9L)S)E2v0Iq>0{t3<7-q}WE5Kpdz~@d8y8=#@6cPB(=zi&^@i>I}6(()wbYcKe zsd(7iq>vm#3pInWaYyWn9eShp7jO>Q}4HVE~YC2)<9RzZX* zVQ4S?Kyxq4={g$&quJUrIHHX-8pyj=9~>hJQ;jq!DttEKCDAY_@nl}a=dOpmalg>< z`jg46PDY61MlsX#Y3`)s&}_8PK=m5DHei7h2}e^+l2~+B%N)Zg6LobXOH2-OJ{j&C z+3TA_6+)6rpwH@tI`;c40w*Br)}dspuNO78)}g=jXm!8nZ2eijO2H>nM~00;Oatv+ zj`ru!mYjxsTi`;W87JYLFG=)ro$D2P+0^LXZ5YDH^#)$Gp6j%8(JohwX72vAOe4!z zD`~j8718=W-^ni#5fPWymtuUf-m@oin00s_71ovZ?>jzO%ht$K=WFJc*f#IU99>AF zED@DvB&bK-yy2qPFsD@JFTfN>Nsmy6k9m$Vlify*BoYM0V(~_+A+h}f`Rc*!0F?Ap zirdXOd8yp0<=6&$F_0bMI2)520h(l9BGNWUkEu^t)XT^&Y=<-?2t7@nw33@ME+!A- znA33FU#^S@VY-`*FgDbv`u_NJQ(C}vV7pW%|EhhP=lulGxC*oUIDJR_aGC!r>>NmJ* z@|vQb^3`cmIjzUqPhK?=H12Sx>5GzPN%vfaW%PCi?tvm~SHq}IB*)udY<6l@nQ^PT zXD4DD>Qi^5$DuL1`097GltWRKU3>^;`v->&a}Mn#Wg7B-i)j&`lQqD*qp%0Kok z1{F=$dHqy*@@S`YVd-gve!JmJK{51eAY*7I*L-(Aqwx8QFrJeDz+P<`iI>m1eq^=) zE#7Of=ujJ(D`S!oJ<{_Ob1Ct}FKLO8ad0SsSYrYp5+b8DY231jj;B-_ z>{Cp)XS-8;uGDI<@NB4?h22A>X2QINlhA_6WBmO7b4b@}QVuPpHaZ_!>mhBIMpTyx zCsir)7V*>DV{y-n=1>(DnU}AxCEX8>Y`%SUhW&IXB>3fC9pjZ~qimJ#WZnBIXXiZl zNsq!SN^6s#ZP_YAV%`~d$^ZmtMXck`I=iaD3UQk6q|c@sc~xTZJ#J+HK+yor_wY5R z!60s_x{VY~f1KK9Ha6PuwO3xC6Mgo&$?-@h&PJU&4c=r$&eUq=LqxO#Tt?~z zp4{qR-Tv1;lA7JSKLpD^oiM6?X*`Gc4$n6?mb9LFxSra~(8JK;#_bvWiYaw)r9JB# zL&=6oJX#n!-a6lsrTFSAVaCl{@bY=QRP$-94NInO8-sYmX1{F%IxKI7+j$bI=QAX{ zRMLrl!D!DyXyn|813QjpF#OX_dklh^m_JPn?!m;`nN>Za$bMRH{G|5Yo`1*_O1aO~ zBQ+Q6buTl zFW`7@^dCd{^UdJHfsIRA{Wj@)&JNs&>8zrtgjlYYG2u>}OIG%Q!_C@d0JS7;ht0Qi zAW$j+cs(Vt(yft2qg={Vs?b}!g`k|$cz~cTF`(H3lx&!GW~&M__;w%CKa5CId${N2 z%Vs$$oL%o$*~k`X!NsMZr)M1!jJIxQ+?**)x!ik77+riuRdEp7k1O}eSsN#TSWD+r zE|qHTT}t^~N*^4SG14eW`^~f!qSpyWsJ;8#prmOst(0EhX!4EP(Rr)SI5ljDCT>6E zrniSOZ_KLL`=GyYro-;fwf)nHMDkRFJu$${XTSIY3bT|{B*aRU*vmVg-SXY;$E$rZ z?87^EHldcWPQ%e$$q}G8+mpfFoWDE82Hf`ss^eX0+40Sh$7=Xny9c<$PxbEEJ8|9H z&LQ0o6_1Z~_oPO)yQHj%AVSQX_5F%l^C(_^FiPE|D2YpiyTE#fNExOdelq2j*IlbAgYyWWgaRT1AQa1d`CsmjsiJ)r!)R%d`_ zAoZ_}{jgq(a1Vv)R+=?0Hat!Te!iyXafc!Fr*uqK%?W$&NK0>R--o;NAjzT8SD1j) zoR;WtBgl@zU@PsYkgeeDFAe>R3$}l*@_q_(UnLqxxqs^snFDF<{cy zd(3L)Glw*SVye*`y3c9_Y7lZsGj-mOvngZ5ib+YuXKmrX&T~-X&f>AI_JhLqQIW?rla_u6DJHq((aBB0;TJtG!_Y#OWlIC<i zsvL=^M>UK0PJP%;E-fJCRNY!e}(A(GfM!o^Z+^8sUBm%v^|}D?xer1!G`%-xMi%J zI<;=UvZ3ZS5f@9kWlW%dH~C@S|N8o`nlOHN0}ZXj0;xgidpaf?&AigE$OuK>p8KPL z0bGB#|M94QjvxH?>_P>PP^P+}j>c`d{>>La-=Ln{>W)YH&sVV`6zhXjKBlP;Ww|7qhT!Eo7WjZ=oJ+ zvWY=U=HCuO5dwxm!5-DMQO15-fOxy*`Gz>?mZ8B4FHd{^nEP%e+qHs(?>yX0f1Y4B z>38D*MaM*75lHhK7RlFxF~e^i&v}n7PV`>Lp|RM1zkpf2_(8_T+T<6M_4UC$|K@e5 z2HYd3!$E`b`mglBc!zL`f$qqXEp!x2Aq3f zS?gRj6c`p?5>a$WiVgZ2Zxs`sl7{^Ea<6A1l{h;VF@d(k2T)nlpiMV49q>4wbXG*m^jB^(> zPvt7GDl~s%jQ>53W*vuMt!uu%lEr*8-@s_E@C7_tdN_o(YF7KhoYTwlIo;pr{SMg zP}IHxAl-g750&|wGvMMDT2mkYr2L&NKZ0{)1xE>9lh^#)J3)4WM7)?9{CtwvIPvFy zn9@TX@?!#SjL!k^QM2p+WKIvj6V#dSq7C)CiTzWqiX^9B$oI@?MG1g_qR|O9cHQO| z+)OQFRBe0l{|eat?c&}nZaqY9*DsuVZa2(AVvqX5dsay;;#^EHE}|8bEwF8>Q{x`2 zyzKx?g>!uycGQ11p0msIs0QK`hJB(Kjz*|_=rF9f0_xmCWJVQ`mEJeU&U-~La#WN%M z{Giw=zzFj9iSQ(SICp#MHw0~dIVvQ&-YM1Mx<-u849re&U#Le$Cf%%F2B7B-toAqk z)3jwt*1~Lki`9&8fkE;F4GZ!rO01Tf^KJP15oiMwA&slgK`*b;UQR^ zakJ*25Q=Y<)kKXEK#hqZrWbYT9+<4Y2s_ximlBJAgPW3iJm11FnJqNMbA*B~dWR&P zU+UH_dZ=nd|O;@^kF`+P#wb+Fh1il0Clrp~iq_a~c)fG|u3E zA?h^Iyph$IFs&G72fp$I%%c+(#>F4WHk-`!`w% zp2Bds$byhSK7zSgt1RaUUs<4h z0+Jl3Cz2=#Zf=6NV-*q3;nHH!e2?9iH+G1w4%15`+S^G*m!RBP{JP|2fdc*Pxu%Dpb+ZaJ>G%CgqwZWA7^` zx0MN?RvOVESl)E=li@8k*3lxMV4ci;Pb1T<3ECy{n#kg%og@X5Yr)UDn~4s38wb|A z|KUNkq2e(mAi;wf+zTX;(;t-q6*jp_<(F%b6O!?o(FQ3%qfZ6+$L(uC)e?FPFkX%z zFkwpntZml~3XzWf34PZ#?;?tdnPF;}@J5)(LL`asF=|32l~s@^YJz9L`8&3?1Mp9k z4)PlO10AVp=+`(yGa}y{cwizZVBIPC(-FkJmDx{XVdT|j4b>9@t zong^yvNyG$WZq6JS~c=CM;GEjuFQI-?fCpCQRsK}3w)|ph9~jU2gG+0{YU>IwK~~D z6$@m6BhOlF0Aa@~d38Mtm`ab|`z0D9t_WvPFoel;pp6Y25g7j5ppOE5|8z#0EXO>~}i~`w3Yv zj&=Kh^0B*0>!Y3H<^I1%4-bF&J_`3ijLy%J&1fW>5lX)!7$eRxD1h@o#=-1Sr$0}K zFu;Ztt4gvm*_b*Md)@uUMg7}p=|*@GL(65oe}#^o4LuSW8L0*r5wNiZXnPeX9t{vA z0&3r%^25w7sy%-oJpLBT{5igIaRQ~Ma*5dmtgqRubxUCDrshoXr2k>w!PH06d#RyFKm@|E8H7NIGEX1oGx~fAvRJXh@S5{EBzY_ z&_w|++wvaQ7*NI_j@H}`T6zC|M}Sc+;(Fm?NGvG?AKGK_%|Mv-b9x(X#2HEz935*< zx5vf#`1!{PkX?b!z4pB4KLzg(lSLp5yBeqkKh%%M=?rTf-@2#}jO;4S;+XOvX+xa_ zh}zzQdThjD?3wL;D)&D=Vxr)8+ZJ>}@^;z4+-`+;X48zN6p6lPb_et77=GS0yJPus zjmB>JX^y)IzYE>|SjQrf@A573wDE+{_&S-Ss}y#2Sp?7c83hBmCSauH98*VrZ+w4z zmLtT|$a@RWgz0Yn_#7+*E>J2o$y(Gv`2D*9!Y9r!I5ig#sGXb>+qnJnoGL&|ph$q5 z*W`fPv`0=S-(Jx$ll)$zOy?=5WaGB=lFmO3pdrn3=Bb<;mb=Cg6;t{fXK6YA4VN3@ z_OssxHNgBG(H_^p=c0>Sq`z_=Mo-TrgBASaLe6i4O4P5+k89v^$y~<(1+150vJ$6w zm@1$qwotAzj)U(D$jfJW*-(DIH8Ay0Ul-|V0zFHTNN7NAoiG6q(d&b0;#Tj$J1s0i2KbRi+{#WH8LJ3C1hn~ z>9q#}oifzuES&z9f0`@s_f*nIvleu3t$-J=#{v&O`H~V-Q$Kc`Z+(IZx#~CQt?>D0 z6A>qv@&3ce0JB+a6p^rsLrxtB19gTGfG^2&$-VkV1C8`NgVhzM8|OQsR4n?!cceVl zee{ed!w7&QhzSY#NF)*ofPqxZPksM=B>Ld9c<@Lg^;*!OwEE(y6|{-`m)RX&4y3S1 z`@eWy;JXI8&~jS#FK|@shC{fWF zCj5`j=s5+g_EG}ypwR$+gs5t>nO?B<2huYp&r6+>!m;T2z)ypHni|$Wu9PF{D6BOcfCMj=R6f?It#|@2l%ZQW^0-PE?}kccT{h!rkxRF48m^_M+f})q4SrJ2m-pPNBy~@w@w;?gG2p%XlKJa;4z}flM>3CHQ3E$&# zDL`*1os*`etX!)ghYgEf#~1w6R?#YTmrXTtmHq)lBgLUpUrGQaLdfCUw6}V}l_rxk3AvQZX8&>2Bh_ZAN$>28<7@r>{ekS3 zM0|L$T3vZ8mGWEhu6H)?<0uqN2ev0mX~pBItk&totB(AcKERWa^8fz0eo3-esZ|gp z1bSETgo44V)6s8HB>-J1jc^2>Bnu0rOCr!^5(k?XL+-j*ZE4Ai5Z5(R9_>*$9B|$3OUy^o;O}T;FRb=fz!k+Vr_W;kzDswRd(F z1TB(Gcs_n&?ID}`_^%7KDgz}2m#boxIXkEs%=s2RcdaiW0T3&Hx!9Q~rgXLvpOK?P z$W6?O@3OtSJ}Y<>TNwYkrPAeM|9Ds!zwTZ3=$?(ZsjaiO6k~re6M-fzM+B40Q3s|T zU@wvC|5=f|8fWJ`QDRxmCi%dpY7H(|Fj1M`-vKja<9r$}{I@xZ6#GuzV!PHGHv+b& zsb5%q$R)!N2N8Jk(q2Efs7jzNl>xF>-&W6sGjQ!81$$+-nx zduF9G%*Ht)68;mPAG}nJLzT99P*}+zL)Lj+m45@_oAo%mZzrj`n)AW{1AehbuJjl4 zU95vnMU?<^L=Bpe(LgvxY+T^*Dph!REj_n3pPg6q#$Sn_M8L;OUxm^ySjfK@> zgf;HG2-8_Xplw)u4@)NhB@z`DGN!K!*gfJOE_}jOha;Y(ayXS!q%U_9ymY(2+IVZV zEKn~=ho5?KF>4dCXLDMw+k+}c^LSvCwJ^=&Y+UKV3r%ryKp?z_Yz^(Nu?3PNxbJ^{ zUi7lj-X6=ZZfkj&iR8?zLmUv+y7)i@EXb- zc;c6^Ey`6J86FyN*WrknCnlwh@k>?V2S*^6nlbZHTLJ>lk{8N=7iV1MtGTPvPlj2; z)^s+pl$dd*zb{3%=z~j5-h3LtQ;73a?gB11{QTAOB#=ZMahv^SXF}}1CE1LBDCJG6 zsQ-)}a9uUor)aA=uJCPIbrRrWb)fInf_sSi6z)2b9=AsUW@Nx`Eb+p4>48IGkRZlk z-bvW`Y&)*R%+$1qqB3{ErIO#J(Fkw!`XX}ViH0kkG_r(MdbP#EJh{fXIXV{7!=Bdz zoYhiic}&ya>8Q!=9ve27{*>{{H*36UX#CMaTlpK-iPmDaZc31L#BaDFAA5> zGae8(Cdhz&GwMVt!Ia2GmH!dJAb~3`r2bq$o8hUPZw=pvQAE?w9o}#iVP@A$%M`MFi<7kk z&0ZLM*T{7`#~8v}EJCtQo_Eh0dDR5BpJwrr8H4mJxrW|gK<>HrFi>FV%xj(KCW53XckM97iV8YMvl+p(U;5y#XnCtabr=A$$$x zNUekGyIrbWK{DsGGsu)l>rgVPZUa=2V?a!u%|>d)>0QD3$GHgzqpKspCa<^E7@4WD zz=X|re&XSX;=?&2kJT}R&7lAD%wAeTLjI->9RIeXE$+{tp}(NxpYMWwAggfzl=#Fq zAO)IF!Ur~00`l@2ZQvJju1zFUo;1N&;mzDgwK;sIKWhuXcQ>rBdn%*;@rk}P=UME) zDDFbS2PE^r%`0t=#)Pl+gU`?Z7Qe6wdU_j4MGfkv?m8ZAKb5=H67DtEEoHm;x(K zE-529oZt(GST8pM3hC)-uc2D2mECk9d>`4&eLFlNLdVTU3H zQksvc2i@$~7XbEcgH=b#s}b2yHpWT1%3Iq!TxKz zHJemT)*;55&8&!QaF}m>`(e&(gui(6REPN3&iEPR)fU<*3qCS)I6Z-4+gE+q>`u2< zCo@FqUt4(@t6?njA`F<0?X7(Z;?iCw+ko6fjvR-v&4H%msc@@>eF&F!8n_A*QgwNu z1yqljrS$?ckmb*oxjD`uk?+cZx?B!9;9kRI$+INdj#o98378V!^PJ5^>~mNvGWcIC z1i@p<2arM3oMnR6cE9z0XJoh^H1b{Rgyk0>T!TAL-bFx`Eb1TsK7w3C1NgR(fgssZ{3apF|S%!<#Ay zrk9=t>7QG>yt-+9p5b(|K7~;T@#RAvN8BhSfnVkfj3&Fzx#;e6Xa}L(E)9OyNZ z_YV@H$Mp5yd+#uo+>P$a23MI zC#^BH;{v9=B7gee)a1a=1COCfn)s^R&4tQ3Mxo^#Yk6@;5FuyhzS^$l{S`^W8oVMt zo2H0slNBi-xh_afOLEH83T6t!Ws}t(Of^!S2UG-miu%Hjaow~DvLihW;hB>#UiOBn zOS$gvzMEsigpH})`h2lmMY{bQi;h6@h{c61#Oi*Blary>1hG`4@uHr-0e_=$o*kd% z>3iHB4KMOAm4{J*JtaD(5;VVZ%l#>aRliIKEhZWf=v7+GMqLmR+kMReYUUn!8Fm&hg3c(rb+|^lnQG#95(BrsqgH>Kx&dL ziv$u40V2uRE|tpLSRn$~foi38L>*NSZOh*)1?C;<3f#}m(m523&Yk!xT>@@vv$^H< z=S?w;4DO@^Owh+xuSRhR?jBRroA{?ZQ`vsLlDYzJ2XpBTgwB7j6*{%Cl-Hu5q$;Nb z=h=(#9j1zeV1XM%@)wbcd<@w{UV+9#z@as{EJ+$OI;O9D#%?>Ruzq=n^%sjOM79Lw z<{#dcdjQi4>prGDwTpr6y^&dnQD9>orlMfwn?01o(6#pY%$uBp_Ke$i&(0se{Jeo9 z405&qQi1_qwUYzF)xsdp(8!~6dTWh=YF%iop(RxINV~C{)M#jNEBC_sj1d$gTWfoF zZw{KJW2V2)A9W(@S9-zB12x37K}|!!7RI-4Wt}3HqoOd2)8XK`TU^ErSp$%nn@p$5 z5o;bnQCN4iGW^hJVNkjlN;=d~}+gt-{_| zQ%AXyMScFfPLwGHhdyF&fRyAs5=%u1g=r1vauh znyZx>Y7ULvcJVrBV6%%X@m=rhG>j$=|NqU_JfVvoa$9sZJf&h4;)jsdPrb+#By1q6 zQtu4K&Z1!3;eVgDR&+skrrU!4H1~8o`3x@{*3Uln;}EvgwT^rz@*F6ie z$RfDa@*%RA(*viCaBC$y)>ovzY!KA1e=NVG7GF0$hxs2|cz8?taWRY8R`yUK)eORX!H;1zd665p_KloZ|ezV=O<;1ete+L$i-TN@0q2th6 z2v4*T0z<{uH1IW5!$5J;a;epik%fUkZROc0AU1iZCVjO_krAmYOH{V!cmuJoF&gb; zrWv#e{LPOM|S5gLua?9A4qxfw{siy zE9%1zpmr?dPR@EHvurw>&%L)m)&Q*&sDBPGfX%s_THI>BdOpPc z3tX$_GI3GOC>;OE{O+BKdGnq9%nTcLG|~Z^+n`qe%A}@;-Igq$A&Yg3@p=ie4*Dc- ziR1vmVJw*p-`9v_&R@4Dy*BR{Y`Wet;IvH*h2i;PtH*RAZx+GkUOQp8WiJgx91VP( zRm&^f;nk*x?M>bTdX`Sfe5~V$^WR!pTI}mP!!RAajr}tn81(xs-*uHV$$k7x?wae= z`0X3b$D1xZ7pkzD>OvNl(2fp*^A*l4>{1rj{V!VBT)-R5rL7(P_W=Xmr;9vnji)xC zTKTCR2i?8A92|_(<$B_9NJvVMZ{hBz2JN$>Pe;LZi$dd_?wCSsXi49ZZAo$UPmM4# z7=Dtc-nk4L>p+WI?n^#~bQUmmyv(Y;%fk?f#{vdN2jKc2u7S&ydW z^}fram5MjXgr&!T9Km#D$pv=l0MM-3*%=v46FGlu@qSu;5&AUm?anPAF5C*{Sh{U- z%C12I)GlEx4Dg{4LEo%`b94y?qM@O`l(ZV6JmtK}UzP-k9`2LNW(VpiJ0@#6g9~u^ zN~P(``9l?3@LG*p!T1Ljjc4(r0e}OTXM3W!a5_c(6s~oBxg%s8>7>0~fEbks7kfFN z>HCWY3>kth$8?D6Rg^GZ)Q=xO7xyBE(o4m+_@Lxax}v zX(9;4J@I1637cOyZHs*63FF*&YmceN~fdynh?#h(b3VVYZ^|1VkqMkq0E%qY^_y6@vu1R22sN# z-DGJ0>mA$c?4eJe=~ur2-_YA9M@iyaaEzR=n?u5T$uSrKk3k_4_KL|=dzv*OG7p38 zy;b;f!!t~LeEd3ax#HUk&{awy`Sm836*M;h0}?}js9iF>^eHwPT9!Gk;guNVaI2a2 zZEth_032RHEqNBpsgG;+CeDTdC>f120r8y4M-=@F)_v4l@p=*lpO`$mn0%FIS-;=6 z;$Yl?`jePjP19j3!nT|6`2foT8H$p{rMV9b$6&sv=c793vw}t3b_NdFF#na|i22Cr z$1H)-(b4h{vRT~`9QdfE8#xBo3@yZg#hma1v%7`sWfhSW@7bom;)ziYNuZ<mI6>F@3Mg2=md< zL;qxe3}zFQatM)Msna{28MEZ=zBU*_W6rvV%)TlmPt8E~OsfKgTZG_{tw!;s;vKPl zHQ0jlc2$Fo@AiN3DsQv|ALdF$o6s=_#MmRjCNeh1j4igOo5K_tcoet+zP`Fah2X?F zv=_kQYF`OOT>63hJ~TZyJlyKd7p%7Ez1K)cm7xFe(;8vDQY)AZ0z)nk)Q>nHc_?3L zaA@t5mtR57w7a$seE=lZ^0x2@o2{AwG5VL*!?R^V4rfj0tsKOJ#c(_Q&hDh48nc=B z&XvJodNK~*LsPeT#o}TOI;QnpTm2V@&;l5a3%W`N-3MTo> z)VlZ|_#F9XEHI}Zc`W@scY2%s$a;M_7O|(mJUp$lN~5PM+o%h(DSap<`MJ6^lcipO znopki6aJPl>}%7%Wb%u94=s-n+l?ID~`tP((cWntA8KITBb&|4aBIUW>y9zTq@*putP^~@Kvc`VzVZdh zH>L*2a;Cv1VjKuL>Pw}v%2=*+q&s|N4hRiyF*38{04yFHLtkhs9jaMlnYpveyEk8{ zSDPkLr-ej;%U1*(_#KU*^CPT7?;Q4vMmT@oGznfdQsxt+qIeH*u>55CYi}iy7xb>s zefY_Eq?iqSkr+sJBr?)L*x6C}(DwWCZa>Wb)~A~kzLek55z=f3%6afhLQ=x3Ljp8i zyI9A@gd2F)yqO-S0d&s3Aw=?%N%xTNcx=`Jex*u&eB15*x|Ucas$^!#c4t3szw(E{Fliar;YJ4K>cUy%tGnOsqCV z^%GnD9~6y!Y`CeYB;2^Qr7fwKrn~S~swn-NBDbHg@l@S)F!|wT47ww2b@8Vg1mr&!h_x&HO!-qdc;y|mRN>o7!emO;? z9Zh9+)XqdLtEaA@(|3r-JKG4ibgfwLbpOk-`u|Y@1hlANBu@!eqxhNZTsk!RpKVHd z=aX=`*swNDZ9KX8=k}}vnx!&b8n~TBq2XoROm-d_vo7gPUQn7v0293a$?-+lUz3CX z`qB%yhmN*~;X_Be-{#w847N9Uu+^&V3psRBK2|Qo@m_!ljJ`2|GWgGP{{0OB4MMI~ zE{4Z8;8v4JWsSm8f5YQW1NG77uTl7a+UlV5tBfjfh8NAYrE5Y9v{?C&$ z^U$u>kslB*{}Y-KYbyPw)b1P%e`woTv&R z_^_!sEekjmGZi=rw{p)?j-&1DdU%rVfnA^_JV~7&mb|&QcQyAS#D6`iw%2bZ7v4Z(?-e1cos&S;>K+$a8(s>U6YP{ z;)@R(T3TCLG#;9)pYbfR>#LG3<0at>hq;_O-ifK4ZNo61FkP^(kw2C3N2$JXDbxdnSWciS8}nS{S6gtmXUbD=Qbf zcvX}<+8Vj6bIFN~t6J|S>-2PgsODOlHZgra&KwQgvNKJ{$5cl^AGBUcKtE8%%eZ2m zS^m8{YV({N&U&R*RrDtDaBQx)2V7r$cCtwF!WqI`mJ1c8&Qw}k>c@Pp1!q`!FtF%o z&Um$*ZIR}u4yP+uoW6aX?`&|@JH#pTH8{u zR>w)>bnFCo&WJdJtM9p;o+{39`f|m_W%I85s1ExhVH`_$PSiGCUHrTdr)CGHX96EI z4qlOC(%mCB-_fTuMZh&NlJ~JWyL~n8YylcV=@<7|%QCqbcLEjqkOSJfRQ=zg8fWpN zz-SKIU*$IAO&9JUJFpqFsqM|*$8T^}iw!JyJb>#$q!Tx3)bp%j_zgo$8Xp^b!KPu%?%7d;OXk;vd$@?2>{!>>?QyJ literal 0 HcmV?d00001 diff --git a/docs/core_docs/static/img/rag_concepts.png b/docs/core_docs/static/img/rag_concepts.png new file mode 100644 index 0000000000000000000000000000000000000000..3093f925f0589a210fb8fef18f0ace05679ecf01 GIT binary patch literal 72552 zcmaI8byQSs8}_XTI)HRH4BaB#2ty1=cMC{2NJ=V0*U*vzgMxH}l+q#HE#2MyZSMPd z-uM0I`PQ1XTx*=!bL}h6IF8>YOhri+1C12z$&)7-P&tU&lP4(9Cr_Tbf}R8KJgW#@ z0{(gGswOM>q-216>&X-9Cs4>64Ns%pj2CtI<5wZ#snmqKpm0-J48q-cD`Z|YzR3Bh zG6-ISYA!e_r9B*iCmq#fYeyVL@jxmauk$O|P!*vg@do8e{9toBUDU(f<9uOyetLd! zqwU#3hM?=fM~j2>tBlgBG+~c@gZQujQ0V`9NyOt?F~X~$mPT!aPjRsS$14;hM>ift zjRgK*uc0wyx;VbGQ`^J;`4MU)T&Nq?{~SmFh==!S7(!9>jtg!HvjDZ=O+{u zCBW9|$}P`1|Mys_;TjkJYdV1UCDid^mo{f3EC0W0F8x0vQ6lz7C)sHo6TiAFXuF5*hoz zZyxHhk6x}%&lYNP>U(eHzpQ=x&u9Pn25DFTWJD%aF-jWwwOjyE`EahREO?W9 zPy`P(*P4XW8uT6X?RJS?iQ&KZbtsN>&rPh#!@lxmNBr}Au!Y`UA;te)zaEUMhowkg;07k9V;E#2ci&`jcXf=GTIzF7R6fTpSb@Xg}M7Gk72I`JbhN= z|8PG{h5Aj=Dc3sm->EJ~#+JEia}}6NH*g!*q%Bq1z?mu5ukB{70=^w_dinP=mJ6Kv z_9iz*tha%0L#NHDlU1-2zX8hG!jHL^(fDswN~eKvYDO zMqUoOE)?AEh3wGAy7WK85>er_A!)H36aD83fgeC+VV-B>+OmTt#>CS2YhN}S#YXE> zr6(nV+y+gJd87kGH@|+avi?G7S55ohDN-Q-ezNeBE)h;(CqsaM_pmh4TBA(Ht8iIH zPK)tZ7ZozbKELsu&r{5znKujcqMElz|FgAJn2|zrfpv4Hix!e1rV#OTI(@0+>RK^H z9D4p03-n#-mH8-cd4i&Xw5P^4rb@&A*^$;bv2v+|P+$y-a@6>;Ud$`zp~)3`%`cBy zJ;yb*r8#X!zbnUB$e3E=Eqo=dc--hbuL1>}6I47h+o}h!dQT=7s}gi(e(~o7|97b| zAi$n}ruZ!wj5H1o_`d$dGLvKt)L-!=%!ApK**{$;+_Hb`dzO(pPOaVvV&~rz#zXlu z(>`b%@AuM6=Y;$)8?Pvayk74>zn>ti>n`p(JyiziO~K0JR{i(&x}@ z59FdK6Sj}&eS4nI=(kd zjQ{wIHGZJ|mGn4tePje`{pGBV?EQ>^ znL<>bi+Z1y&3i016$n}%f>oeM_pSx-9D(JOa^}7RJfdlu?5a4Aq#44%ZQK+tvtUEl ze%|xDi$z%?Bg+goyHQ2{VmU^!yHoXstM#;qnx+Nso#{?8M+5kdW%lrNrR6Mp?VKt9 zoRgHsyJWc_8{zs!;B$O7WBKIKZ$3ZQJy>X}43PE34uk;H&(Mna4EgE>`a2%@q5yrb zVXtPEbig$)R%JUJ`d*jGcO%0C{H8S4ZFA^1b{V0*UHLbn8+l*b?{B~0vQPM~_$|Ks zj3J*fGn7^N%g6*q5FYJ3?>1R2e9%~#EokKgR$)bImXI$Qu#iJ(OZ{y53Qp0#u6lic zb1*wrpl;?^8%-fR6Q{sYVCh}l9Qo5B4fS?I{GmLIBXmgs&yZMjk6u~$7glcevU zTfMU*2_Dh{Hu0>y%0`65Gn_-I-6Lvy!oV>_ng~#%Zg)(QX7;Ie%XQ+s|HEC_{?F=3 zqgkEE`&~#hRA&8d_C{^Qs@)xaZZQa7t;V0s0MYhtx2SSJ zevkBQDi$*0oz8n!uL|PHmDnE4PzW4-E6FT4jDsUjvQtr9Gt-MHdaANBQLJCkcIL$j zilh*pc--rAUmEs*78W;OZx>rZzY9DD*N>d!ZeyzM2Td32hNl&M9F%1fSUMdvg~ z)ZadvG>$pm5ET9!@XT&=C|k4MU66>T%5~PhwqFqb)`x9uGuI~B&5X?1hZ(gK!I%OsRc zP!>HKGbpsX4BiP!;G90}*Hov=cxCeGZjIBw$4toU_&sfPT&2tfUDN*0o!zQYr9QL6 z)p+F@;2zU9jTFo}vHWUeuafe$(a61*G5&vX~IjDdq_l0%0U?1m?VjL*R;(+jh z-D4inbSU5<)Y06^|AIlr_b0zBa>-eNW58=8qgF4uv~cGV-7XB_#_P6iM}d9FznMzk z-aJ)B{k9xF{!;Uxj{r)+pF&wWCCwCLxpjC=!*9PFk!3qe*8b>t~-c~i5C#c zB1e#*&;AWa0Zm@>3K;vIB{rQ+=(~3iXcLEx`qDa!WqT*q0E185&s`J~G-B4O+RTa2 zI;n8;UN7xq3`-*Mn}ETfiR4 zq&cPj3p?M;jY;}LGoL@l!o?AW*iuiFOBEZXV_$wVTkpD2u_KQt4^Ea#eINYc{~bFD z5?g1;OA=heu<(lUk^mhTP0ckf$7+m7O^1U4hRkq94e1>kvZ)sDmXSYfU4)WOy2GbB)?3jp!PCiXs#2Qv>Ies z;NwaZKX`B$2#^97N0`F9@BHsivdB=R`@3l1Ja^1MiWeJ|uSxl5lha$u*=v6WiZ>gT z30??wF0#)-qKw~DYj(fI__bfpu&THR1ahkt^W2Tp;0fq{KsgEbh^thi8Cmo>i4uLd zJ%PVq;ryQEa~yWIKR>IbYo5ro3YS!I=5v^<6DdzZ9IQ&X)!bk2%>D{|hW?t`l4a7l z^D0+1CR&t|%Oullz5ENeC8;%zk=GhQnt-$E>RscAJgYQ;#)^m}bLZJb%R|$q$cOE* z9;T6Bi)&N<4>!hUsy$T4ChX`PyTDPRCJQEVtrAsZmpo+270E z%E6h2gJv><06ics+V2$CMvt%)RFZI8Kx~61jXtvL(H^oblV`FPVOh1rIy>U6ePVCE zTpq53D;l^+A4fuV^I|G3`omj;m{odG-}m!Q@*Jn0ke^_13i?Y0ifqm|c+@5vc{8)< zodEHiEUm-$=Da%Vc9pT__j9s#!iHA7i^sZ^7sI$kfWa1XhUvYhpEip$mQlOMyqos^ z$e&>#1?&Mqo-0Up!sGLEjPTC8^I1pPI}5**7k&dXg=!~TwF-2YlfGAL$+Y*Q-xMQ` zjZ115yoVc)Ph1LTT^&aVcUboMCj{p#iwgM`JXb2vlFe!6GCv*xk-x`eCG&%=4WFn_ z$a6LEv&~^zp(Nflpze}%U)t77=Ag(6PB8@Kok z=aA-3JzELm4pKcPgg+~Z#h?_K^8A7>YBR6kXWQiu+y)uCxFuT+&cia0!zd#NwS4wJ zj@Fs!il|uycGNpIzM|<&dQJF)tu>z5D@#dmEuoR<$DxSPVK+*{?QvhN)p00oblD3^ zPwI=4lL6tu^EubSPs!_f*x9rE>+xQ0od(<+9g)dG$LjUco^jAp8pz4@%;iDUEC`sN0xAvBd%Hu{fnbbcXF!ve;J=SVESvb9Y~b%gTsHaHGjq zFwk+j(wJpzbQ^LvDZ4&p${J z5sLX>s%5t~=w)5HWX+8az4E^oxtMb;H4Yd!Km~LB$yd$YJ#53`#As#1oj6=LBzkSlSr#6KV8bqL zI-kkmI%^`D0cH_N+hPC#$OcKSzIVTR!;tXKXCGmRpijSlu<4|V~N#`CCFa+=yD1cYA!L5 ztIaWd&axy?9UGMDzaaaLvI_)F-&0?g&gUUiUYeX><{kZwBO|NUsnY-$aMC1zlSXLq z2B=F}@czQgfQgA{Ehn(Zdf8l9rN2W|?rTKUyk6cNeS-bXka^bKV!#-dKuv=IuD9oK+;kN z4kO$9Fr5dDKSmOBwYtwacg;Cx%k`$rRVJK0r@0e}xT_KT;Hxq(o=Q*pQ@cU&! zM(ab|SvCmq7T6iR665Khv1d;SdD$G6A(h^n{_NMA8&A!av8H9V?!Fro< zX?me>>5>YeGvEDq(Bx4vT{l$fvEapsYkn6icU%=*uiLisu%+x@@z}IUC*x$^HCnNG z>yk6}WXY!xk$dT`N{<>R>;t`n;G3Ug7xG|hW=Y`W#^D^E1fB?gS?sSELBO1z`(C4l;0k7li-df8cYt~H;>xDG0TNlSo6z#x?{6W#MbkhXCa)^Wm(OuQ%+ zZ;7>oc42UTdoq~2QQpE8DiHEy`GOfAs{47r@niu0xBt3zT9JPqe{BX4a4ISz|th)_<>>7OfuWZU2}P~e{~0ien;^t zkt^7WO$RY<6b$kjcpOa+WlKdd5ic%7Mg)$Mt}I=rq*2PJ`v>=%Z}tsbp=fay(sXVe z@3tDnlUHrm#N5`=^P9^^8NG?F_tyO{6v7ZDWY$GXD-gBy@I^O{7_n-U%Zz6^E#<*5ynccu1UWL!)pQO9y&7 zTyDYt%U4n4;SNImA{1ixiaQv3!`;W-a#^-tKowfDq(Zm00d%O+JYwPpB$Tg%k_2fv z^$N-)qwS;Qm^WjkQFFim3;GR~0fC0K6+jN8bj=RvDvN%u2R{Rzif z$Cu=3ItDloDm?`)QXXnul>FK=&i-EUPn$$JLf8s6^}4!7r3zGr(Q@+Jgd4naeI5JR zDsR_FV!GEqvak5cw};_1_|QB*5Y3_WiHLaj*WfsS2oEd;KLwgN%Q3z*-}`l%4sRxg zjtxI1H(Ypv8`>~QHmtzrFvpzSImsNJ&=}>wJr%=tl>5@R9mD9OYdKaN=eIKK0&H1@DecC4pMl0#)bU%lcdI;3}6kdYzg-~AZ%Nmu)I zAMox|ZWDB2#9i}Ux_D4$hQA(IFm2zJoBfZWlgD>IP91G1hTj4?k8jk@wg~)=5Y}1t zxiq=**zrY+o=>sSkPo~n9jC0W^ zJfVX}GRrahJN-h6r+HGvL@2_Kt4PfYa;L)F_yLvvIEqNah}!2sN#uPi%f(V-*O~x3 zuhzvCiDkNp|IO^8OGx~}uf1I&TPd)$TC0!}G8*2HjQQ+7W^J%FB6*7r%$$}AQb(vC zMGbi_;*CDA;Y0ne`ix1X%Op?G-_TgZo}-{ofD{5mp^NimNVp?zM6kWyI489;_5BY!i zmSjI6WsD2LWMvK>wu z+L2x^jCc6OTPV`8(|NBb&U{zSnp0U0ud3b)%dV9E>ITenO?27-Zs1S4ugR4)%KF7k zP<`4swV?nME8^&oA%(hX?nd_3ABSRGt$qgP@k?zgjoU$GB1%1ZR*3(>RzFFJ3|$`7(BU(662%p%=yE?C#bc&`yObo?UviaWaQ|s zRt$&{Su1GDYAxF~*^Xh5k(!5l6?E{|sNS0SXGLZpR7X0;kwd{%WKKDnLwGZ6X!}(Q z&lOdVtq#+kJX2ioFr5R#ttI22BGCf*wj zmeTd=BaZ5PYWk-ih_mB}MA*XEf95ekKAh{)w$C(;!v3SnrX3Db4A2)Idv00bZ zpw&0Z>5E4-^KbQWwiuk5k^xiGaeNchaI~hvToYS>kkeCA`bM|HtWCGp@k0mahuS2$ zxStOPQMzyv(Yux>>cnaU97P4*;R0lqS$@~ntMt8DWxw8JH`DjDk#fARo;9}lc2*B+ z$D>{Z58~uCSl>~6;-`b-L@g_liiO0*F`qRE<3qE&)=72%@oi*1_v08=0Y}nK8d$-T zO9)+RhLt_8g5(b`+LtV>I5W_lc^g-i3?XzhU0IFpQo7+#jlGJCB0RwvT!1d9?BnYF zB93-a*&e?JP5|e*jV}BBTc+j&nb98K%dNcS{;X702(7toU|G!&e>ssCUZt~0A6K(p zGAWq&7*9un#+yO!LjyhvHUNR&?pMH5l5JgIw+@1sza7D!P{ylN=+4l&hp|LZbPNP{ z7F+x|d>D5(y5Y2VYkQGo#$52CII#81vOzuAIz_x-uAUQrhLU0bqUp6KbZxDZB(aS7 zCn2dc@@t{6!O3&jTj;7Ck z)%aC&R9>?-hV`q{!`;P(ri{m(`I`wnc&!f7m3%lN^d|JyTDKm|Y_h{%_02P8gc%d6?oqDnr()Fx7h0z)p&hsZvZ@>c-^A>U?nM43jAV2j9$+&yetXzih zEDqFkmNezPkw&DbAvY+(@#`_)eZvrIb8i4GLJv1q?ni?pa%xZjI`0qIg&9{n9lK?@ z{F^jcO0V|(psiVshFw$qC*7&#wo!lEO*=V3DqDU@xX}9hP#oIha6+1dax)4&VxfJ; z2gg2vDUwCNJ8)He6|%Zy8wDHRyW1`)w&oQ1ZU9d>xhpFh(*IWNE17UebmcGrikUNu z5q0SYBh-5Ms%jVgggM#JmDL*~S45&D9piu`)6qUN@p{M$Jk*VRyX;aeH;&ttBR znWF|8rXuZ3XlbICq8EW!VgOq^EFXKXByNW65%uLwr3x}{kWZvEhrHKE=g$+&lBM$f zaQd;0q@&r#d6CKz7e2=6(o^5zlAhESYu2{2LjKF6gm7KEuVVSoL{`NAdd94QSN)vhVX5Rae4KVf!x&5JDUN04UF4KbwG}amsDdFqv3N zA2Px-aR~Hi%88hNm;l+5B!k#Z0eOV(Z)@8T2W^%~aU_g|GM5cW?pYhY+lZt2(<=A&e3yP{E=z%WW%JwlDnMw5|PC$&*iYV zE922dDg%A%2!!>3Wu4K|9n#=PU4Q(mQtxy*Kpqoi=Uf0JP`xZMUl;ZEbeF--JpKH3 zk!$;FHl5^AtX)8Xj|}nw?=Z=Q;Ha)nY}h$qQ3&xkU(A5xT;Me&OZ0IJnJg43s*gIp z5Z87R2m$@&CAxg~#^{?KNyBZjv7eW2^574-UTTAGU*0C2xNyX3lD=cp*|AR7nmroP z(mry#&~Y?J{c4=5YGi54{S0<}kFYd|-)lIWB$L?TXd^oJK!bX3p{b!&CVtKR`Vz#1Ue|dx~>O^;A0rk(7OIBKJXm-4f8&t1Ab^8rT)9uIAXhHfqdYS%)D+ zl#j~XWDw{^9|mqQRQ+n^T>7NzEh9**W39UQGu=dzdy>N~3m)FC3y8DLWoQEx_|75u z%59a4l3q^{aVwHm15x?#0v07b6 z38B{F#aHhsg0MFhjf)0Di%jRa0Jl=g$>Q@IrwEj48KUN=WaX7Y8ZNkYPrDsfO>8SW zp-$y<_-0IOD^$ldp+e;}*Dy%7c7crF!Jx{|#9qIrwFZzG!XABY_Mba0tH%^Z{k zKA2>SZC{@KJp`1EcFc^$!TW2j+kIEuLAvHP=ZPdOe;eds6*~BvQmI^tZpTtlxHg=ZzUD>L~sKnT4K)gqW-|snZv^F&*4X8 zu1||tMP%4uG!n|)!qR5%{%^yjE-}AUOOul1NFClMF8}uJNhjil6l8cT2=?xzj7`Pn zZYDd9V?*OlIidM(g0|qyX+&>9kftP^qOBMSXJ&3_S6<{<%t5`899;&NnM@Vjhz&*C zv)J@gtv?&tUj#9;`3owaGQcLa&cn1l+kj9k^-!g<`VbI$xO6?b#qgC+jwUoEF2b;P z1D&4fiItsE5gLH5w{N$3S@eifRFkOsL#WM>{1!^|Hn0KFFgSuKgr+YOQGIC2EK6tp zUG((w1C8jtz046CVFTOC_@H+T1ixF;HDL~WDw{NFps+g@ zv78^&C9jm@uox77@LBEA>t%W_{ghBVXe2}ZE!|p*z`>`w9fHCS`ak>uZG<%H32a9$ zOLdQYLQFQfN^m+(>K9Rwg^OIJQHpX zc4s7s`iCjIUK*JW`%Y~=I_d5+bkq*F1dJ!wMA}0`;+cxFd@^2?$*(i>5LEg9nO5r; zN}kf!1sTf%hc|;Urp$D4E*u@JhpBJ%B8zqI-C8&y97|Vv@&StAoLcje6~4mO#ybJx zAlT_Laz9W7MwVCGdJZJY%_KEIHY@^x{)6q$r)z2?0=mBgWN*b`6(8K z=fZ^R1jT0`CcxQMcM=b(VGGAG(zgvIYYgZh;Y3|sUv1DV65`p2<79uihBXT*Th9|G z7e8seKqTP2?rcr+amp^o{{94|^-4*24}XV5L!QpOu`L9PU$PU8IZzNBlV1L^c z#|jOpWJ-U8=vw|w*vT!J=Q`bWpnM=hn5fZ?rZmST6D5&UogFMRLWvHZ1^NQl{u+0w zw8aCFm^i(-DB(%MrBM*oqe^cS%9~Z$6ZK9h-z{6D+OG8)X4!o*-QaP+q=?>{Q2A&u z_1gBF;FgM6zLJdnK&WM7`RvOpYnIRPNIi>(dCG_e6HyJ8rU2VT(b+B6x!@e(lDHhw z>@hOCvNsNY{79eqx25Ah^T8?ukS9LJm71ZZv5BkG11`RII*TDYGA?$wn941NKXC?F z8RCAr&(Gjs9hs(GtgW(}vv>RIRwJ zG`B5M6zjH)$}q(pFDx(i-Mig>{Cs9+i-yKx%sMfxRNC*nvK2&M+MLsLwuW zcnohtkMru&ldGUQjQ(-WbqPO}*u@#&hRs1e9-V<#i(>ItF$=r46Doc_`&CM`@h?U@KTaZ`Qb1& zn(A5}Hhjt=>h-1M7rU(N{PqNIhj4(g*4K728ZcfJ$0)tb>qB%~*lMOsRnsawk^0Mm zC+8vplqJ9g`!8^;oRD;nDOIMYm)oAU>*=;-N#gAs|C_GV>GpN>8-g7sFL%nzKp&2{u~{bi6IUx?LV|q6%06* zHAAEIV3ilh+tBtx;Ob#RK>|0LQ#*erREYj%2wC2KRfUM4UBrH)tpdXJObvj~psBOX z`4CEPrvA3dL06!&&*_;^dHnR(UZ4!uBB|qGL>gx(>tRTPn3U^+PFQ)*z44)jLeE@4 zS7BQUQJPu*l0iITPqkk$qkH|Hl}z*L0{RT!2!`&w`hLFIs=O_}tl4~P$C;Pk0?7b+ z=vRDU_9H(k5d~lb#Yf1S$6ys}q_flG$0pw6^9<2H923BH$V|&c6!yOC0+5#qLMLO>B7nDh2QC0}gHrSDJaCG3 zKcREv?{JGN?;E;#7f= zj~SyJRn-GRt%$^-O_WGx3^>X>X(e*{XPeL&rmJ<9*V#52kH}kUx7y{znWi2khwSpWa`zVb{GEx6Ijq3O0$u`04kriUtR-bnpFJO=PkHoEIa+Ry34J#< zKJM3B_IIs9MgVMU@i`Wy!O4C8{{>>t6aWk2ZXm;(1Bo7WEG>Xhc_dE#$FqM?w(U@f zrjK!i$6#}`&KN33z-lz^x0K36@Ma;o{g(qI4F(1l{6!uE0<_Xjg3y9<`SHo3G{{KN zVC*&HKVYQ_W}r6_)c>r|cp6~67EQbVo8s5L=1-O5LRo{gF#ZdmjRGq&^Fh7jDL>g$ z&Dzr4$jP)?2XpIuw|`l-ED6yF1kforQ25^-u`}Qw3_skoJXFGnj0EuF+2K5RqhFQ& zosm!pDQw_U*rWMxLjPg?SU-By5&eDaCqB72SA7XSO3V+4WT1W;hz32v?i{VDr| zSnK5sH~6OC|k8V`Kw^j%0}g1s23&BJCe^6AQbGnC_Ji4o3DR#c>0}JW=#XDYyIe2{;``JwQ#cd zT0r}Mh|qFieYZQY1D`XhD4STFF~$89EBxW`*&og$^3O&Bu89W)s%pbopvAJ=9xGs% zY54QH4{`-2CEix0n|El~%Ry@aNQ6TBx~1SR>xBdVlHeN25~5+AW?j+MzygX-d`=KS z0Pb>3an*?YIEv1)%^8EFC7KaTnET_oT?0^@?*@}OlmY%_ug;2fhKATF|Dw;ROcZIN zu=CK@HxocAJtnhRApyXL%10v0{bJr@79j9U@Er@K#ltaH0Mxje$jI~eg5Qn(BUxkD zo4^c&-H2(xBc!|&JZi@)2F7Afmp#g=7*J1Z8n}1>@E{*g4RS!`Z;^jU1NT`i2jmYG zTBNfC?n+8&HQ3eoX6JC&qx8xfu{#8pgn#p7dzeb4d}Dy$8-)}%p-XHcwo)JN+lCJj9dA6Y%t z54^mrNMMyxWP=&9_GsYHzqcXgw)j&9U?8KwBGyd#u>+>NJ1}<^YSejcXZY;#6YVQU zHvo6No;nMt1@?eprd)V6HuGkz4%jItGXN#mhen93u-A$)p#qpJKzJ&KC11G?idq6Z zOeymbQ2L7WX5Rw7IMLpVG?7P|!e(Yt=BL15K0o1K`0-0Xtmds!2I zzFnUwCW0s2ymTA^tTM1^cgwU+Qlcz1EBMhFulco-18u6zW{*M@VsI1jssVIx~SoFM1eaQt6_<;uIB|4ysF3X2}X+` z2wOgcA9U5J^Y{Juwjv!6qDq$v`joCm6gV?9i?+34z;hkN*gC-O!;t&sHAurV_Tx#% z#iBpe8lmV)B)4eF>H7{7!3HQ8|+w zRjc~vfx3jzC_@D}4#{MHuKxV@3o6LMnl4koDS*mO-vb+LeLdur{V{$Yi}KevFmqE^ zZbFq-llR$=+jk#Oa6pfbn6T3zDM|z!61TpuSnf~*d5MN0dSz#jjni?Z66Ah-1hN!0 zb08ap(%|D>wfptfccR_bDBpsys2o^}$|Ofj#!m&h61XTn487sqQ{d$^as*rRPV+2R zR{#uZ6zBN%-LfhGz%AMXw=`e4O z#~{TgXlZ3oO1Fv;IhHF?b&stD)R4GQ1!s`t%_|WtcR)(}WQ191IamX*^b1D7!yx&} zx7q&OmIIL@hMSX;=|1NV0a{u|e=nEW&n0~5ynzGMPU?|wHs0LZMW zQjzI70LIANeWE};B2fVJubfx8QTz)6%LJA_9w23x8UW*z6|8#)U3ep@w)8d-_NYns z(xN=OJa)b20HmFK@<29E(O^@B`oUSr4}9|<)N}W2QnMUIi>)yIz5_=}$ekE}G1W5D zD`x}Q73wKI$eJT}dDl3qBrHwS-4SVtF^KI~t6u*le6FylBD}cqxVL6NXb1n#_!WS7 z&H!mN!8Uc{M;3Z)GtRVHcX!%=>!dDy=hcM1V9TNoofe>m8YYbtT>B_-`N*ai zRVaD{u!e8k`?m(6P86trZ{N6iaoqGC^D^xw>I@fUxNZ^{Wm8UOCBQR_=YHhok?iP+ z3NI&YvxXg@b$VL!8?2~-CMcev`?ZMO9=4rduEfatPML&IBA>p*0C{y_1`x~>W&t4Lz3cj)9*VZcYl6*3gLwu3 zD5z$i+uBZ~I(N=g58Y%5`=!~}l+!00R1|sH_xUW}?Gs&r^#X4K8w+z~qQV;sS^yPr z^J6dh<8Wg#u_`)gfCAllU5yqeFbCNHWl9e=ck;yYO>E6PkONdd9oXn|6Gq7OUIMW( zn(!=-aH^yUxB{mh!UXj3?)t3H5G^o0xfif}@EaQ+D*n7l|2#w-{Hgq_2+$8j!Hfz} zzBCY{D-gNJgC@&N+Tu*DoXKG*XiZKubH{>rtnVAk9`&^H94^kG(ENe!wGWTDJ8uV4 z>JSg%Tz2_X)Qwl+i$XjggCBz~TfW;6xMV@-?E9-|RxcL}imh|!T+=txEjn!41z0+| zrU74t&|dH8s*B2hZW-H8TwMZnn05eL)$b&25#RUx@MBukWEnQ5QAH?Sado*_7b#L+ zrFm@QzI6!Pgde#)rS5BSE`F3e$nagF5IizXL!( zEgzo*;>L!R$RpxZ!83SCmC&pkk~_2aoM-2w1r zH52ywb(?D_Q>1+31_McDG&Bn+^H0D0kjCkzd#WvfRQkd_w9r0^F79W?^M#n*%ieq0 zzW|(C9I=QPt%RR5Gn+X^8~0d926;y2>%`I$@H;|SulbU&Nk+;{muXt@ zx6s1F5<#S%A7NldGA*dHn+CwW)Y>#9`0LhwU)F>OtiflJ1t1)-M(`nth>|#fT;~sJ z1h@wiM}T%CJBT^Qo9(N|wNP$)T9a*I!Jyj^@mrdMuG5G^1tJ^bW)xe0U#Ze_b zpds9BrU0O%;fHG@47R#1K&#x-mzl&1{09EZ_ra!GcbW-YU_71~)mw#z)C9Ce!CknW zuD=$M=}9+$20c`4<$~TQ7~yH6y59YxJbtOUi zgJVxSywu&Il+bm_xx44Y>P(H_xU%+1J?TSiK_kb1_v8f}Y2GxGf|+e8{3}z%+PV)M zZAGBU>oY{=*i`i;9_`b(5VKqn5!(Y$LMb&j)9NnQ|#Jp8|D!{3EA(*z| zi4y{sxBJuZF|K9aBlT$c=@uU}#dt=7>AdB_Z&3=4Uw#Ro%%SU_6^SidW~uh~Mf?X%=T46p z#c%_iK z27NzaEBRKBeO6VfyT=Ptpn{YrG8%jnWpJ@Rv2*S``2&-==_tjPl`~u31+rs|WII=; z^64;9QtP3U9}#VM@Zf*|O{XDkf2*Q(WF_%U*8S;s!E)Ly*I>bp$^jBnY?{75ft8F| zJ|n;r7-b?fXTVXg!NJ@!&T>V)I-5P3m2z_FB&GRNnYQlJ>BSym?WpsN!fO>COuVMg z3mK^sG(VYAW*jDkk5!YQc?F&0Egz7na&ipPUin{T2CaXz5`Oc#iLHY1-!(s4tXGtoB}_&sdf_$L4>-nB@G-*!puUaV z_p(GpgU_@#>3cgbflf|)*OtULSfy7OLs0SIIJsrjH`!-!LJ~u>!byD-nhANNwt!vr z{15E~yO#|Y@&@p52yHTHV3y{Mu8vEbn?&KGEXFI8!Gv@s8!+!>G=yzvbM#5ezg_RV zj+E2#te3gyb+1C!!-(oYo#gLv|D)4clMA26XQS>wr8p0;ao=C99gEdnHzA+o?8|u| zYJ}egT*I2nh&?hh{T8R*Q#&`!>S7u50e7$w@OOLU<>dmO(@7K70-#>j{2i(X78ey% z=G?AOYV-~FSGZvNV*cC0cj^Ypl1H&-Q;oDNzt;-ZEkS}o-NI7oP760d>gq#1B%fYN zy3KuC0=hHuwLbUG13Sk2pu0kZ+)S!7VOo!iv|5hfZDY2`4mPBxkAyVS4YA=Y0|};r zwGOsfvZecG_hdx8AYRU`tRr6r0K&7Uw^4(VlvW^1lOXw~$9PN{Ty&#^o#$ecbZ!4> zNGVWxQHS-q;ETX>Ri0Zv7tTwgk-XMxDR_cyT<7n_4@;JI-!6S!a3le!I2g5_qs@xF zafo@nXTuNe8v5toA>l}1Y*gP zRx8>i2My7F1ZCQm6sx?Hio_^1YA^#ht|Z|BQ*ONyIeC_+?N*i@$DzxxzUx-$IP5s` zb;!k}eH95&90k!9{so&7z!CDIqJ&`UEWcc$_&uZx%=rCG2bJUIo3}?`?br%jaKVk? z^0mZNQ%k{;ALYgwl*um}R3M-{QkNsWlsX;O3V@4#l;MbdN<}F;^P+nXH$X}m)KBZ2 zPyTmuRp_GdrYZ3DN%&OK;R6sa=vMxSd6e}Zxhq@`>m|yx{@RQutjHC|&A`AP5uNF%(15MLv=m)Jqur-%*!z;@Q zaituiTMYBAPdugrheDX5Ve^&{Oqg_CYkJuntzCB)N!o$m%;#5}js8k?3vBqP(l*TZ z0uhA{2aBcFp$LdZxjX#^<3r~&TuytJ2C!N~YbI0#T*>f=n`O(%8(Fu=1d1=>8=bj! zL{o_`)DfAVJ^mR4-@zfbi9JaDwNU8*yXpY^-sIjFHx4NtR zEEa6dpu(ISt85hJjdg2Cebn;4)oF^6OoluDFA2Q78-_V6ex+U)!uwc8vHT-`1ygZ zB@EJCwT@>JZrQ>;!h30IEU^A+t|3!;#PpLB7rbi7cPOb!F^m~TmUgaZ|-G)Ruf2Rq6s@VVtH7xPlLndvzbil>msQqw9 z&I+&cTbCCl2-HBWTgJhs2E~8*?w$SIIucvh{RQf<*m{Z?>3fYtlIOxdG*VthV6+<+ zPc6e$gyKeS>LI?$!>_`84a8nQq|z0%07@)aHAh8L*+XQRJ?sKU5nY!QT+s51(}V9; zg$F>@Mn;doD*8xDb=j|l7hJ71txEUuSv=hQb6@v|pToS#!x1H$hJv*3?KSfyy$alQ zEXx)`D*-L%fEAR{i z2xszxR`;_B-v!d6G6Q#Lv~sSKdU@FzFx%{4>$-cEq-4>zQ5tXaF{MY!sI{3V0KjJT zl)Y9=LNxr5v|)+^gW|7g2Y{vhnY(l18iqI%blk26RecHQJ7|j_F^w0YMW=Pktkux& ziqPP!>~Ro3KK_dl{zPS0dYC>h#+H}ur-ZbJh-i74ibnY%V~f2cZ4VWs1u(_qwY{dq zF=JAJzi}}OJQhGE{FetZ@`}^yb%i+j+Q-+x^F$%((FB2Z*wh-tM+d+&l#xGHdzVrF zdiBZBFP#HH@TK%foy|txdPJ;O%e8yK%H1G3LG4n?e`d%zku|PD%WAJv6earh$Fg6G zWGVjy10XcLJ~ne`xjV0yVh(Tx^u!KK=Hjb#L01c-rEXKzS050(%O-*sdLw3JigafO zio~(CnO5Zj=qIf%S#ciX1CsFnkG;3x3aSgcMFmk1qy*^_kS>*OkVYDj4(aX^5RgV1 zl#nVDozHX5y+7bO7z{*V@4eQuo}6>;s8ip!_&)(xATR@TXS-hO zI*tq6X?JlBc>39Yta>JNMcECAGuTj*faW_0^Z;3OUHHAGQ=Hs-KpX&NU8R8&%Sw;Re+A%C(u@IN5*Ik9rZ7rN zfDH;q`@sLyRNRB6=Z%(|JvlQ8yJ8!74m)~98HUc*@B219-G~|=sbKkzA_e7in%Xn2 zU+E^a7ribL!^0{$_=rqFs4!z4%6pL`_w{@1$FwwM)NscBKp(kmj^y`7OmEdngY|$L zHEg0!%6Km)StR?aS8r!ml;v>`7j9rigJEG=H%)-$JTpMbR=%G?c`vhI#x%fvSg{d0 zKo%f|SENNPEu3`AY&Mc3C*oiKLjLVpZQycI_?oK(xs)!}Ry{H>Iv)4UMudt053R4_ zI;Zqe0(;0jdep3VvesoL-He_bli=c;*hH%ekwmJa=`}l(& zHsjTMhE9ooZM>yLP^`yOYRXpWVWrMl=zemIKuB{MseQ|mG5ICFNLJ??a6bk9gjkUWosX3S ziq{h|R1m9C+g~AuCfRgT(_7?%28M`pSmLKgXyu?h^?mB&^MU{tOGPgnOLyh66tpWS zh{)h?02;P;n!WWSzU5Em0u~5`NFB}`tWvPc3TlYctK3u4?*_081K7yl6rK}j_Xv2O zA~G5xmRla~V6n8oCd&#M0xE0SVw)ef8Fz2CH%>Qc=)Jh|w~RQeN1PA8Ew6gRD)kv7ZOMkh)|+WPZ3Q>6X}4t zv@2?(m>$QI(A6Vqf&(bkyf4;Tag+Sav^lm$aw7q?yq3;Z6e<{`^j6^QS zkoV0H`D}3a6q$9RMM+u5=YzT5+-Cy+?5F3dmnS>;0Rc|F&+ZF)qvKwm3=w5-xv2Z0 zKe=XFttqh)%(~v2-*aA@Y9B_GsRWJ5=c-yIMKRpYWl$e6oWB8p!Lbri8#eY9>^iu; zLGQwFC^2%(^?(lb@}q1Z3Dgu9-g@MXC(lh{cCPf(!0cN*Gy9!qQTCEXm<>WJj=oeK zB4w5cdOBT|#E+7n%E%9~KH-l+L0nc&$6qDsN^mbK8#i{-R6g5=URczMGE5H%Gq@S!5zXM$hImQa8ZV$wG>l2`%QyE4R3S4pzbg{qp~5pXvxpx;HoneE7paTIg>H=KD+kWaob+u_SV_Ha4P|s_u!}nI zSQP$pN^s0*0~m{V&Qc#^lW%E_;iHT%-(w{TaiQUPO=uJ*4XJL1tsUj+lUQEY}(FLghH@@%!7SpG}d%`b4iavOF3_@dWega zOgG}_%@s}Ip-V#_;XCKI+H0y#^-f7xzT>sU>M48ZJOu4f_M>1C8uq*kkHyU`70yh@ zw8_xD%Q%*BG7Rd(>8u$B64N^x=ACbJt*cF#&9iiaz6rKl8LU}A_e@-1RekGE;{E+8Ztd;U? zjAFPd0413;bQmD2`3`RA^k*pP-bt#IMsEgSP>MJ|04UEd@_j20FJv={OPZq^*aH7C^E+i`S zf}K55?1E3#oO)(OwRWqhK1dNM^C6>ZV3!>FTs1%h9!2)uMI?UwqM`3w^ftDHeJ3`C zxJteJ2*WoDzUI3xvp)d#^~KECc?O7inN7&Sx3yrYl68Y+!5q@aAvG%DC0)d#Y~fzh z*GL6;cxWhGi+1*QR+vsGOjPk0yC%~Yr&>4h>!lad?alQi<-5VRo;Bb$L^P?z;pE2+ z9g6oIjt`aEgV!5R=V&=!jrT(wq>5&pxglAhla~(H-1+Y_7p$5jjtfVwHf+JLwhFrJ z2)0c;$x(3Mp_o) zx}5>_-jpuGx2FV*Y0CmY-e`kL!#{a?tWsuQQSd-~XC?cdRdTedVf`-ElHRk@qo5c4{zGEt zYuM7X>%7a#LR*692IjFcQ-Ft_Bq`L>t`g(01!ivk@;P0b@gR;|B=H?<0vyT0x`-)7 zsQhiaBCPS2C2h1z)TMueal_YAW$kd8B<7n1+(A_&t&WLNO20Rc$eJ(S$Q;Km=bE<>@hQZUM@wBe&uR{clfQQh7t#xv_XxL z$-;)SEaa`UnyouIkys>gS6o;Zfm7sR#7xHfrkQsgZ4+5i@mRsZ%rZfi0QcAZSZLYy zCtt_bO_)opd;PIl?mm~pOnJjytAV<$ktMFFu0s_Qh9l=`F!(jc=S!rAMHVMHqG~^c z`BEVljE3=*^%bnKgJEnmf5i6X`ByQ50i={)f_XqHk1>kkn66-a|B^#2UVx-<+*!W) zH?T@51sD2tzA9rl!YPm8TvMY+rB_7B$kPXVeNLVg88ChhU#)GwdsNbf($nJ{m!Ylj zxih2b7r^ZqY?G`+42@1)u##fwQw>b-HOh1x|Vfcp!?Wnae^c-yX=m^4v3su&Iw` z&chs2*dQk2aO$Y*j&F!r6i}7XH9DF`)2RSS;XOa`dul?L02NR_C}-pffs(>RJ^C7f z>t?^e#sAFRjL=q0dB{c`0H-}%iLP)yGMtc84E?uXAGc%>yUh!r3g5yyx$i}ENTP^< zYtj1$tFT`5eSErvmxW!v-H-M7?Ipkvrg?dfE4054g?&5DLwmwKP>9DU>gbvo6qNS%FhRqOflPuxs)&z>+|Tqi|8K$S=sz z972iCw{mk(U&)P^(|j(|Oetpg;I;J1sioVcV(usV@tePlB@WUBT8l9ZmWQJe^ysc{ zxT-;Og)4**S_gE6nFs;dh)WwUQF)>;L?4K+v=_)BYAWgpj^_LZKF*@0+<7vf{YHH- zAd}n6l+afu8O=IzoeG_x0CgfG>Nl1PvJ-wK&^7Mc?9I_4l@)~(EXDN1WJGWMai)O0 ztZ#j^F({%nBZ)h{DjW`!2%DLdygZAZ{Mv}`u5e3V<*SXq0X@LfGj6Z*WlLF3YfR6I z&+*xPj&F><{h&9@2tIE@0NHo3)W-^NoBnqgr4gbVa%vkg-=CsOf;#qCjNhIp>55C5-~I!w`2qa1 zBOvL~g(0jl-EaGtajZfUb_=1tj^;00Lx)NF(w{%PN^fs8!M%dp`y$%CzE9|e&()#M zka7LsN*Li+(SCbk9cdZ0&e&=cho2TF|M%elAPEQ_I7(W$-2H^=zFw(9tNt1Mgkj(*6)o2VKQ6AG#F%ON`YBABK;#?G%%c%_6}^Guz4U zptn6Co4Pe;*Aw7@0mJwo!PYJm2*ntSbS(7u)&7Qb{GAu{%dXi+24P^ z2qOdtukshzn*qO!@VAiQ|M<+048ewA>@?*6&(|X1g50Db!wAN|_xR7v{`G|?9>8>} z-rcCn_`h7s0bHxEUpO-=g8L;v~3hzGD=JpS>ge+8vKE(GqO48m_ZtO@e`qXhXs zmj~D$4A@i_k0Qyy|FHxA`Kz8ExYl;B&pXop^QOw)1)FMmMTX6K(50^j++8jy*E zHh^?Npd2-;FZWERP&L${t2SQz&I-b9=+EBAXn=Myg2Ehr~8i^`p*jdUjLD>767s| z!Jw3`U>Q?x1JIgeRhg4{cXt=S+B65MU=?7*#a8Js8U`7gULshve=*Jft3||^JWRVImchD z4uJ8dhGqE7X$vbNcmV8Vi2+)df3U!RJH3DRLpG=d@Nf+wOePR^G|62%e-(cL>26Xc z+Q}D9KER+*&X!J;uk8hrE+zmJ+F%orC8!V#mS_Y67~e6o+Fxp{|6OqYc7y~#&2#`r z=qZ2=HUe0cihEefB)h5*j+h5D#s=5D;8BZ|`6O7l%_f-YH}mps{CL1Wjy1ZR%4a{l za9wEeJOMbYygD~hRZg&wDdAAwmN7Y^46Voi`k^Y70&)&kz>eIw0wefYFev6*t@QEx z0fNEfU$U0Z0|`c36T*HJ1zSPL*Q}w(Z_x5UKH#wW=}p#97Tzyam0hyx-VQh^kiFlJ=?spU7UA>EWdMx)pA4Mo74Gv8R0unTsCcMA+;qYAmUYO4X zfYKpIdGF;FU?Y?Pu~DT8EuH0g0GL)({8kNcd()hD8(|WBVo&z~1sVVK&jF701jQ-E zUodl7RLHHj(dCBzyX}7)^y8;i0PKjDmSYKu!6FD6I~WS61U8_QKRn^gC^Q%aksq9zB=B6lGU1$5fA|_e3TeKa z-QB*}Dz>!7rD&+t|%#J`g+yLegUSxdJO-HRz2Io4A6MPrQ&>{SQ)yAUUT_ zh(9~S0TfE3fJ#HYF#f#zt(#)v4G^IH7TnH8*=K(YlCxHGi5#h<@{O#Py zlxZ>ZvwV8Ug>*54!yB7KWH0@v1rEWXAps?>nT_WN;h-Dle@uXU`3$J;Cgg1J)&msP zPIdFOc>xn{5+4A|i3M!~`NN;-sJv$_?;*|yNbzGF*{{=fK>rM%l(q^S?I}LcjmC0X z1K?!C1Z&@L!^#*!NEH@1-gT5O%a3Ig9)>#ts=z0a;u9z$lDvStodL}V`XAoTQx$FR zF!)iOGz{feL2j{^ZOgyZbKkoH@$6OSlsUU$DF@K9tY$X^gPf18)9C!f?pM|>O97NN zow8Cl5NF0>wr&zSzu+v}Z`*S$0pm!%PK&?7rG6_-e#RPQ~3DfJIk+>p8`5!aPPjIHNz3D;ShTbaGC?A>&>^< zmUcz7aFrl1On@`3D@tHJsGCjkHx|GfLN3+nQYU#+T>|Ok3Bc7d2O-tKj^MUKFg!AO z(Ua3^p+y}Z^5!E&0X5cA}H_?uc2(7 zCdU4Mv;1_ANLCV<9L!?%KL<2$QYj%z4304Tvv`4I(+N1ErYaHonR~@@AU> zQeTc&t`}Q8_o0kjmZ0w&%39e(LdO|vhcEZAg>EIEQQW>mNC(+U^^>W$PEK|1mxA=9 z3_oB?_^yA*Ks^7hVV2&)i^$}wuM7=)f@=DoULVw*0_fmMgI;m%8s0Q`FG0lJ(Qqgl z$gV?%dk&X(`Dbic#n`(i+`UQBGFN3!cp`+Ja=$M4$kY(CK$yB0D zO5LBYZ;<8ngb-r2uwn_p*!lxWJQytAXFmjIzAbXNp6$&zH_6rlUE(P)X(0-Ta7rgt zoC82-XJ=^*olFq=`T^UxB8L!8sX_f&Uhk!dv>dm9o4~Mhp}bZ1sw{G&T;#)tI->#Q zNBEYCu~4XEMgk7B@A)uZ_8+wge@F}HxRZvy>U|w86KDvHS!I6&Z5~1sm77yA$|?as z*{5;}5KtHbqz#rLl8Yfh>&nj#1upy zF)b(+v11pcpUbyU7oYxa{c978p-tTC&7J>q6ALN$akw}H#?>r&>cR7yg$rX*=hGu{ zL&@PF2Q&Z$#LD-zhYy@732p&@(li{^4kW`ttrOG;l`XJHD;kjr8BK5UF0FuqIxDwy z+jlp?qiVp;+F&}!UDA;FDQ^h)eO8Kad4Qu;s@qoo7zHAF@w#11Kp^$U#dW~Rq%Q+; zXU+8HUlR5IuqH+RJQ<1)xPc(3LkkSNkol552M}4^*!@l+6{iQ-4;9X9v7L2}yB0us zm}&|YZVUqXH(j5oLCqK_Y|_TOjPm6L_;axp_4nti=?!W__PUXgFk0r>VY^qdR#?!?`SWxPm(gfA##pm*t|Cjnx9>{Pp>!+CfnIQa= zeE z1Xn{pJOyRU412uc>L)e-m~u*~G880wJzK+p8-a@v#_!(Y6o3%Sv>BMa>Mwp@dS*pE?yPMkEGpkw8{~jb3NO-OT zBcs8asI>ye`j5F2(!ev*kI`u@i){D(jPcWp)*#sce~A$hZ7 zId)FT_f2&iFOM%0g>Po9KLNM&^S|v~P>O#BMly9UKKk!vNo=9ZZsr#r;+g6dSq6`! z%7k)uuf2*l;IU{Z4fo9~1y1F!571Kq^-J_{mh^v~d=){q#`s%)QCYuf;p^IuIr^7J z8|mi#UjxRLtEPos{wLQ;>43tI6Re%{zq{Rq=P5?scp$MgQ>-;sHzs=>>w2|+GOK2( zGEl#3kz;Ml^zX{chC)BTT#!5PpYI94?jCO$&KB3S))mxS+$&DYqD;1naO1K_ptrxh z{_wj&{d=Wkb)oJRH%#4s-V3D{*m62Q?Xp#9ILlbyTL-i_vg*c+5@T&Pb6K~}*Uf+U z5t&q^8gu{WijL4-%tqz5{pWieu(r*Y{d4nHFClY@cZ`&xaePu~7AeF2#h`KT!|}T# zxxxQ7b#k5okI#~HN8_Jy5NH4iY5!!KpSX-Sdn*YhqkZ-5SSp*UZO8hX-&0SH(?eB@ zDU;1=N{#vdb6rd5UZ#U`oB#7Y0oZw&L=y~n)GA?Vp3ypsM*Dlk8ClsV_y$^`MCOf2 z?LveAN~#b@$Oftt!uVU=&j^XYlI!A3i*qC*q^7fDFSV<3saiw_o>=0uImXc0%aH*mVas-I#PrwS zIAP#Az(Vwi+}c(Ex`AZ<`52ekHzJ;1Gm8q6b(v&^_OgqXlFD*3ri#*Rnpqmpo?ouKRIV0oY?d9OsOC zNF%CD->_XPi2@&V@Sx#r>7xVaf=%KxHtM}-x*EdLa?(-DP-DB?-722$wQQ~|Ht4Sa zEfv*E;&Dk=-s}A0?fOYDGaJ`ZyF?)@(Dg%M1)1FRk|do@W3*a7pkLEZgW^6NY0>%u zWNniGmuUt#wxv8L6J+C_0M9fH6bJMHXrZAIynP|O+&7&dB?MkRodf4 zRdXi!lbj$kl~%JbHZ5*AK-DairfZ*m{d*dy4c-ui@!KV0KD7djc|e<(H2^9OCUe&A zwM#T=hO_pE?@!KjL#Oa98)SwYlXz*_fZe2<7|Cl?mP{HI}|EUv)r}KLLwG z7v=~G#q-M*SCRS!ll63_S7ir_VoBW`&|n@P!OnD zHJxuCf-YCPW@CXZh~~MvqH0E-_qIVVteO#s8t?-BlDQbp(ImHPdzYo!b+aqeV&Sw6 zAlqpKpb~MjPRqfd-lUrbCz)(u2!PtAKpTq`<`{at!|Q0}R?b%mM#NG{X2jBQdZ;{? z1yV_@eH}m*qc4ePIt%pVN?FyiTpXuecPh=#^*@&?fEU!je~Ain{as-}zEq0)9PgLT z3nrNdzcY}6p9Ez27{>_-+TypMl~%pOWF;Gahctq7@yM_F{Pu)3`gToq#Txy3k>=;T zE&V}8%-Nt{mEJ(+!@=?>H*V{+V}R=#(Q!_ei8~4?EK#?Nua?B-aQq}J4bKManIQIV z#=5G7@%gRujD^%7hr6Yh-(1K0FCplxVB>pSn{SyX>8P zwY3F;)#YG~rx|KDNL^gdthn?xWLr5VpT+QEIQ zzLI!eJuFi^LDk>9#c+B3_|3$^tYh`8!*t)p>8>RZm_Xk_P{)e_v@u@L%CJeax3`V>6yGvwr}FkEMjG8a-IV*HLK?9xFpf1FeDN0 zufM+>J1nX%Xj}UD=0M&=4YVDfz$EQwC)h|}4&4Ba|0xIXdY3KCNR+#&5Tv2Xbs*_v z=Z%(1@zdxEwSHc%#n-CtI2^Cs-Is$MX78d-P=e`FHj%Y=qVv(9L9)B%h!^v>$Gbq5 zL5#`m&(}aU63bt5oFC>~7CEt1~##{TbhSV_gPT5%liBv5UvS zpgf-^74;x%L6?Px*+iI67nP|2Z@yV16Qvydsn{9)B>1tvyh(3~-4F1Vj6i0&T5Gt6 z$ew5Z2u^Q`QT*OtYtJ{iT*Rl+!h}wToZp-N`7p2|A^dvkZu=>}}q1ZaqC%7ftZF*ZTIY)%J=w`_%Mu-xn{Y z@wBRv*Q0p_)Wa=R=A~~}#Da;6f<;5L&&u?zx}qK!ju#21s#e}{aS1N^l4uRb4od)| zA>u=TyOYCVRav$!`|-SlI?)VIrb7ss>}`i@&z$%PJcB%1w#JK}SuOC-)Y|%mXN(|x z>*+!Ayty&bO21#w+xxx7Mn5JlZYe*Rm|?!Yv#YJya;D`+{u`Cyt|vr9$`X?as?1xP zqmw4Ob#`B(G#i~`1*TqovT*l6`J`d*0NqA$Q4^1`tTeF0$u3pRm#v~c-L z`XYiAc&s0RcUUA2P!S9$uVygG(C{$=#^}|aUo&!XaV2otBXZcRELeeExetRYK9!HX zjgi6j(7=|n3Y}EEtyt4%t0S1H=aXb-+VY|m+(UdAn&@P1=jT=C)4rhLk|OAXSmS#B z95lFnoE5UuLOyAA390kVysq8PE6MCn z72>BqXY(7|&m{;vnMFLrC7w0dFfxmJYK8-tf7l#?RRnWm z=pC(Oa1_8EpuzIp^+G4a$bX6c!+@%zLqxgM>_L^qWGgIrL=>CNP@islx;!zo)DDlWa_iOOx;=CN%8lJZy^YY2$M0fD{gIVZha^n49`(cEY8mDy>3m)eWyV)|?Bf%@q zsG5y}%c(C_IyR*k8Kom6L_$zkfE^#9!$?PLF3T-H`(-gO$WQnaw+9k9+R8|F2om_E z;%mfTdX$M94#fC^gtF{GC^4S_Ugiut>_v!25c%<*$8w3zMH69S$h>7YrVzKQKb=B$ zYnq;TV#p`>PIM+XH~_r-(v(l_Vz-tdzA>BvLsD9yIe61!w}WVkgrsCqEVcDa_rCOe zosUdsX!d#8Ew}TBeub=-$w73s5TbiF>(YlT=U`nSb8AFfAUA&ZWMF*t*8$}8xMOT3 zmCT!{!KB{cifp~V&x!Ww#(VFmS#)En_61T*9N*E1&G7wefk4qb8H6sep_83O_P5@= zK1Gf|y$tVsw1)%{y#9)EKf983Z_Z__oo}$H!aaqQN$+Oa*deBoq{{)h1j{ z=(s7QS_|G^D<4vrPi}g_KQlSE@z!igSd)~0A>%hyxwyIFJuVc1MFTf6k#Sz<3f(ZU zUn0nUd1Tj3ZGJgILV=dy;Yj@m14rAoa9v7!a+>Q0ZM`FKL_5<4eI zStjzzHJt~%D3qgM{J`#?V823ul;hzm-P47%qm;k zg=#T%I(HkI-E`|R=G||Eng}>#+21fssH&B(3#O%`o9n^Ra&fKnM5DgWR}u#OU>#%Q zSc+%X*}r$B5FPA@Dp~LKZ{r~pNs;(Ob9S~*W-?mjb&iC+WtCBfuWDpP(%L2{S79VK ziyR@^fi8R3vnGYa)nXQJ%zm@vp{Rwi-yJe>t;P>8Z*NNSl<0S`mXbKYp3^Cpco>JX z7R%i&XGu7AKK^p|p-KfpsAjXTS0Q~~G>s2yrD;0Zr^9Sdx6@P6CZ)pfxjHWk2v`ua zMyw=WvKoxin6bO!43?HMu|5|(j(9ql?raM(H-vFwUOgdWxY<{iH-rQ8&3>h&&*%yZ z&@dmtVqjnh2?>>SLxeRlnIvw!z`*+7-hN5|c+#mavBj<6ELy?Yp}-80(aWX5eav@E ze!sUp{AT^_h@Xd90lkiH`wNPrMdf{*%qJv0hU+PM{P*Ky4lom(J^Y{=pLod+92Xi|N^O!JaslQnyV- zmPRrBqF6rW*Xg21vknz6h*X;7z-bV$?8BqER z5va{*I$GOYJT8gHFKTuV&6JF81z|EZH8qCC6ssc`pVIuLE9S3E<2wwFB)_O|4@^_2{MB9QW=g!?)W-8Wqea* zxi}F{P2d`1GlB_vk=yzrlI`1v?I-o2#~|zR&%~v}KRZ7YVs2 z0{jq$mWYT-%1ggQ&erQ#7}1@&{8q=b-goSUKN5hKGG zW0fYjH@@d69I&eVB;4EteyGF-`typ2M2{ceGnrxrFJ7NeRVAgPdpOQ!rmeqxVM5&O z-r2A-y?D5TpX7e#5j{88wX|@D%hVL)Bdw?FGD;UjICzNol9I}5s%c_L+*A= z6d%UX{+aYUKGMKSex}oTDyN6dpal-efbXjxrDfHXo<7cq1hiJ zl{98}Y$9vSG#FCp-;etX=SOn=-<>}?0Zyo1xAX3p6dbSXna5{(wbt%#L@>d9D`ecC zz@eg$`rYG>>+QikFplT%!DA^g?RYpl;~khoQK;65F!a{>u(hkpUq4iSjgmz%?UB=B z`0OO^y_s2+$jp`P>uAQV`Mk%p#ag^?y$zVq(K8jYUVQ-%xDUqNivXq(w?EJbC08H` z5d;U$7>>@d=l6jlgn1_Z?sD#>2I`yaWiFRBOcTZYj^|Hntl^FbVLZtzEhf-ZvSh|j zcQ)*5CWqM5QUV$#_w)eZEp^PCo4>7FCM_vm+?@ac>T$ zD$jTpIou{q`~onn*C$_uofwZ(NPP#sXrx&7$?M>F6!jVp5SA9Y>+GqrzfnYyrr8~= zd{b_G$08C!5KxNuU1l_TxqbOpL`8)n>in~1^xsY~n50EqKDfpIcxz0%t*$bNr1AZA zok4;PBS_DL_(dYdibLsM7-pHfEb9J^1<)n;ZLXNFF!q19|G_WjtUoV|3vYxA8{qEdmg#L~6x+5M2J`za02diJx_I>(FxjmJs z^RlMCX5r__jH43=ve7OtHM5i)GceEe1ElZvsUd6ZDlsC^&?8B$1MY zWnkX)&tA$hEy5|uR7YCqXw^Bvq>DV%-BJk9Y7cxoOpn#XB+8Eg1%FQaF#Uc4shSnu zD3S#>H_o&qzV+CQE5cTEF?v`X9fac(kwrX~@0G?1z1zd0nVlD2_ZPp#vz@sfWOSGQ z;L*P52v)8!#s0vc{@sa+mBl1QYimrI-KnI*t%pEW`A+j~2q5>Rg7`O4YfA}%5*@az z2Y!c1A1Dj(Y1uday{z--3$r^cws5<1wO&n)F3$}|VgUN^ew=kLu>GM2kTvJMM(RIY z%T&O`)m4#>FbDz?MX?$=I*HEM z^&N5^n-Q8|Ypa#*Fh#KM-j7SisLh$!^S*2t4f|co`I6f!XE;CKrG@s71*K7?;pp91 z)E|N|DDGrReBrUw1pb_9R@o${J@~gnc?1`r<+7`Bk&Su9M!+J_y(U-ARyP z#Af^8lE5Y%>_8ICmoGSFM(8&O2Os7q%OhSs4Ic1B!qN`RrldIDc;cKy0pE0ag=<@7 zAw11#FIy~l&!_0H(!?h&Q&M;MYe1Yj9-o(2>FKtLQb)2Xe!a5>P2`4{Fu;7C~zDIGh0 z{q+dRQ~kl)izK&;G!Wk}9yg!}_lV0Aa^i4DDMF@iI^uEI>XUoYNk}E?5uPEF+}h-5 zsXU(7p1K75vit{+YPP*&+HS|)kYrC!C;N>sRZU-dl@G`6x53>{CA5XIrE^|qJ*4aH zdwhZL5ZR|OI9T=O6Y>cvqTt7HGV9w_$**6(YVXuoZ;WLGv@XVOjq1*BW1|x0g;%od z=#71*pV&K{w<@ddJhPaU%~t1NpPHgkryya+>7$euYRIKFEnxxeC@H_T>t6@k6O*h( zdIP%fifw?id=Xmj=d=Y^vsDqG~JM$a&${Jk+^o&pFR2J}Y z5Ns6`J|7!d82C<1DC5M(EaVq62CjVba<4YiDbDP`x;n=0S-4u-`nZ`Xq4Q&J5i}g+ zH-}24319sfMS97@EnjXH*Xy>p6HK}@m1?)xTGObHL|ghszwI-FFuyBz)z3*U;A7IA z_LuUq_Qrqv10}TY?BaOD7+wpr_2}mdFlm}iLhAf?dL^Sgq-N_%jHfU~Bz|@q>sQ}z z>gp5;M!Vd>pUza7Yv^in=ObB^>~7InoL&aI7XhVuan3HxmO}_yO|ByGtmY;DkrHt* zpIpS*pw2|4iIrvp`|MH|0+MY6GZ}$?lc+ff8ag@*K0ZEyO359md?l&Dh>Yyo4u{dk)i?>l?NQgF(@YE48o@0$wNWdS-P;l`pm~?aJ(Ce zmno5Z^r=#g?A7+*-MiCOuQ`;a@7`xr_14IiS^nPO9JKcd$Wpq6m#e%u+8smQZF27f zFvpBn%es&k58;m^OOcCU9vjEz>j)B9xxPfa%Vurwvz#JOVm!m%$MzW=`%Jl-;qCRc zzS=;QBV#WTW|zP09uE|+9-*i7r;W7a+QpVBR|}N)-68kYMr*rxck&f9`;-k`HmwGN z$x;Ro5Xf`&KhapJ)F~!cryZ7F_Qm<+h=yu|j)55->&Us2vj=GV2z%I#(Y`M5A59lc zSGc~QMu`-YGnMSY9C*XBUm1#uSY`f^MYl6;fY?0;kHKZOR^aZ#)4i4&i&^r4!H$-& zJu6wW)j-TIpX|3rRLlB3V7elCxQNcrQg&NBH^QakN=)wc!9zEFKh&c`Y^Y{I+tp(7 zIl6fEL!S8i$P3Imut6{MpF4R})(Rw>jG|%VUEDGbw|~^pC^rZc^bhsFqkr#EH;VM^ zj6B3+4vNx!1bMUd4sZ8c&y{Peo=k0OfA%wQ_|~Y03GMo7x0Om2UAh^U#>o#pQ!LVC z?#0!UA0K$NM#9i0DvU9J9;kP|T6Nm1pED-rJ8jX7%B7aXTOHE4A(=>zTN7aJDyNg% z_pVQ~eeLWdGaM_B$d;ZKbZ) zeG__YBHRFO_bX2T1JOOva$j6p?dghVD^}$_)BaUwrWnI9i$vV0HxUS^RZE0~mPu$} zF?c(dzKvMl^ZZ6eA`@W{`vP7_EMLF0bT5(faVn*Bi7_+ZVGu6UbC%aV$i%(C>utb8 z$>SHmM*M=$mif~u{N{6cq}yh~-XzEj1UxJvE)?_NdL%wAr0HU>7{kb^s}pPp)Ojt< z--U$@1L7JWxh?Yx2ngtiK27Gq&!8T_XEP>H{mFFqiPhPjj@^%;d#2d*g4q#IpYC4m zWXs?*nc*_bHul`y>MkwG)@ci1_9x7Pn2Ual=eA% zHxTO;$!4*T9=Ez0WiOtw6da8Iv@<=}lbo>RbcW;WOv8g^%&rD0^>W`uxq4UUGnk1o zT|d^=HPc@iB}^uQkvDm!hyd}{xJ*<#)nnUEbTaMn+{lqdSM}!zU6C2FzD}DKL=4Y{f!`k2 zb#F2sUlN~te+OtaE`yiFzmk6$3G5#nzxiy`U5jFI?OH}J(S_BXXypc{XD8c}UI`W! zXZzP&P2}XxIa61Oh6Bp1>m6jGIM1fijTOvdm^`Jpz@#Xq-W)&YuFE$;pAzat=hwVH zj!!Ts76$X*P-zw5R7u6AS#>XI)HB()-FKo_^X+cFrf8?fe__0%TEJn4&}7&z2HuS4 z`IMVmTSv!}%Yg9>_16S&Bp zpaxi;uC_E9FCxTaF@ZBR-3q$zsd{;ggdk{UmY`~TwvYkxw5VoUvH56nI=K8d#YA~; zibGT@O}@;|YFOTh(2r2cxfA_~mIdn+ylUHOe@Xc4{QTKLBtyYHBoq|i!b18>1rmmU zuNl@KgoTBlUc@J1JIYbKWuc=BLe4H=B2{e1A|&iv8zvy-z4vxxXea_b>n+Ar1nPmX zyTVnWTJ;zX(Mp$lKYb0}B6I!Nu z9cAI(vDQ~(b;-)>dfm-?_xbd+?$*jTsw76O2#(cy$4;+kMs)k@Iz9IF-3d=~aOTUL zOh_`jP_*j2_f5xAM-XDxPoPI>R!&On;b1HIX*VB`nQ zHuvT~x#}3sRy@FZ#D%rVmBju2T7-XNLb^W1?8%USMnMrLXc>KcYY~9Hhl(Kb%8btM z=_dp|>k81sk)vtc`*9Hh5pk@RzCI9vLln`|Zu3RfN7c9`cuQ{F9#~SKESFB6+eAEF z;hzkuOHZ0p<1Pd=0ZvfDf1Y%+8m1uNeotW98PXms8d|b?W?g{8@dz1H&-L;?T#8^Q zk&$BUbBo(k;`fZoXj`4Z3W%dkw(H;ZHx;=j%i+&$SHj?^!+nwAQ4Llvu$79H*w?}A z!KTv4b85Bw2{$Qh^jJaoO8ouF(Y*GqF3`u2NJ=-;wnjyT^Y+fsIAJ^)H`rEAsCSFE zuPoN6GRu3r$?C?L1QLGV*Lf{mbPuN;f4=&}#~b@AG0yPy+VoYL>1a_MM5^gTfHc~z-rd^Gk3CEF^3P_tW4nSkzHShV$Zznp4I^y4L;p=FYL zk{FbfE9{QatjsIlpIoVzkq5X6W?|8XsfhpZkFr0*3Gt{xb2{0^l}_RgK*DBN0HEM9 z{Q&OAT|+d~4Y?_WJVv&O$(~>QLl!}0_H1BV8y*8gx-}b1^ks|;95s68QOdS_shk2L z=_`C@Baa9ZqjlrQ(G$w9IkGTHQQ~zbpOm8ejXs8dm0R(G=Z<7fCv<9>$9%G|wbi4? zv~l&b*YhsB%{GoSqZUfN_f}`x&U72E0>#Wsg?OqvsrUN%+DRO$j}_|G z76|n=_YAVKNMMhuug)VA58KDFg{LZC4x5bbjaL{;bfT!%pcV7I*HbxM4a92q4+-xoN#JS9=jP5tD2bH((DvFBr`q5dAZd5SpMhY{fkOk%=CLc3j?+4$#k)B(z?3@;TfwvZDoS@ zYG}Us4kclxiyVF6T&o zwk$@4;(EW4d?up7$xid!3Or%6gWOBX>d zz&PxjI4E~r*JN5fSOJ?rRG)kMUr9BW_oA ziqq{}huSND{hpYL7A92GR~vI0-8M9xR}WF{qtDL9v$R#dDYjajV)ITNv(;oLvn4IL!pN;@TXCl9bWEY;mhU_p8<`B^W6Vzz<# z8EZ!bsEBYLeK_~JpIRK)Q6)5{Z$5%>Jzeo70!)N(Z@pPe_paAvL@1;c1rZ%$*wIn7 zn!bji5Goc4cq$YOvzfn*EkUMH6#MM)m{sDyPPg8n9rJN)i99MXi+5bT8<#2hYfyxW z4`&rsIfY3b?dPt3#(Pg01)@v!|YuP>e~o$Qd~Tj8*yos0xXBa;T>ah!MSQ*aK% zcj7(x5h2{{(pkZ&%j+hnZpHLUU0wS#Ep|cP8@~Xd(ya|POElLFG~lqMj>WsGa@4~d{KxVHjfvVa>N!L zx?mg$1&|;kq56TAtxJMdC89GqGaO!(-h(kB-rh2?TwKh_()>5ibaqrhIK*>Pt!zD5 zY@0j;BfZui@ocz@;!5ieh8wR8i29S{MW@wO*F6ol)2I&szkdTm$_u~E^&-;;&%o*3Y*Iw(l z2e${cVv_}Om%_1{ea^^zV#ySEPy1Wl&8A!2KJajKJ0YN$o}_Uwq`VzQs2^lW=js*w zmB4#)KA!GIqe`wwCDPDPQhr!bgnK5&xiT86aRX&tgY*eS2_+QeD-301K{LSYaC`mlgmjM89XT^c)q;&1{<}NG+HrW zRkJCij?5RPtD8ip%Yo4DY3=^XL0W#LmY(3)` zkE*&{`q1;`eh|{}ZT!0ww?EvzZ9UuZ8N%m3L*w4qAOg4DyD$tgSetZ|6dpTmB0bxo z`$BPX@n+W>5trRz=07YBx3Wf;f)z&jOb4)zn>`^FYJ`AjC4dCzUqeIFqp9q@D=V2w zO|`Y=QjEf{&%jlA%jj7>h3UoHFFp(g_$j+1-EJrPAdTDHr1xU`oCzLhJsnOafCQAx zN$-O%O9@Bm{B-Yjbu@>XBl@)`6An=~y@l!W?mEV;x;R?aZLD37D@6;Gb+-Fsp+b+P zE~!>ynBB(9%ESHbVFI>iv)SX9hKSxJgC=u8?Llhy(F3P>Ph^MeA1^J6dh-%%mle3Q z&B918H_1v_(yTu{HBB>;JU^2VoUZr#^E#;hyV)pg(e_>i>MU`o()L80M#qiVk!0rb zlOUBz3iyK}4x8pwCM^`6QmXV!d3ZXN>}Ut%m?mI2K?LybE&JAsC?_i>@Ol(gSoN#z zHjr`Nc^xGk6WOlrD0@sPDq8;PmzDax;tx|n8Zp);22i&R z(=?m7(vjrox`t+PW#;G0wxgpS3SSt{d;CYp8Q|vSt`HRA|1M+;&(X;;e(dqTmen_X zL3~dxn#wM#+9L`ltU}bp>-MK74`L%j?nmZtfX!v2;`DwY`8=ETSuXDDbPT_1&65(B1bVL_W zvVA^X<)2jfY<&a-E{*2@M${{!56eUob_?|JI?R)56wx{~TFZGWjkYH-Km9Q`1BA^A z+;|dM%0eVSnEO4J&UNe>PQHbv(ekB4s~o>ITu!4WNp6}G?3=uzAVeU4=*x941n9O` z9&a`}HQ^hbHewHFioSd2%cM;fM$OtV5?kw+Vm~jn5rLCw+ztJ1-;G|M_7{5Jh{3vY zR|4DoV5&kda=v`Fjy})A+*uZqRXjFT=W(X`-C6}=GmFH`lZJ*viXUnZ^!?Q%us^oC zfFrNJ4}*lQCxSgpDKGFC0~;Q_x)~NA&Gy=UfXry}erR~xQACSdxxG@W=u5DL1Y@{> zcY+WvFMhZ8O^@HE#IQym9H_Pne0M=(=@H$;ClbLOs!KQ8;%3j3u$HsZq*Jz=;d!KK zX}c~Yef}3j3Ih|+)g?wGE?&TxfFGz;rWs-LcDnc7l>$)y{c3SaWz)d_!^?2`c65WN z3~Q1e$_+I2FeyZW)66Lk1uqk_|rawf^?rLmS0VVy0eGA8c_u z-c6aegGF0Nl^O-~)y8s>V z17L|F!ln5IXi}Y_GdtdroFL+6M>2+16??rVCu{vwb(XDl;rafxTF9YPfi58I-O?>g zi3~2)eHi|xx0fRSSUPKBA1fK!RcWWgsIcvt$s6wbpv^n~G=fLSJRK1chnQ9I_G~GC zl06|qN-;D`G-t<+D%$lMKQ;A7KQ_bgT%%Up`x*v4Y`r2S4Q3qe5gE89d-1MFyxQ-) z=){zx-}u#w=!hiKtGio0s4q4p7v0VJ+GV(?K9mHG|L6@BfEdK)JGskK46FZ@QM z^t6(}`Bbu+9X(>=HL|9v_=J-24}gEZ9s9tgSS>NTi|s1 z=jA`qfCMV(PwaC0@KCqM!(%%uHdof!VaJ&_HGY_5((yZ+;XMtjb9iJ#0C=wKV4)08 zJBB<9Ut!4SRNyPIpK^LnEgxu5vkdQK62|1~*Uz3O`bk4MCP&<$ZT#nd1Ot`EF#!%M z=B32R97*VctG#dz(lKGQtZ8Gp$}`2=6XiN-d`{%gs*?rG4bOKzYm)wsTR3X+=xn7+ zg{HIg5RG65UbW=2h!#)YC1bpM~fusI3-|3g9?&vu_wj+Vk=|i+3O;O7> zno@%%U;FpZ!ui+9T&Wq?sfdp7o%`i{L@MbQ8tz5UDJsyiQbEj}+G(%ApA^;zIexQq ze;&KEx20`rL`^f{SQTbCL}0Gz=G-hn9CWqJ`!J$~OgMwVHTg;nM6Z>umfkjVFokA49UtTMOq#?~5$!+0>A_}? zeVPqTDOZUh=3Gz=+&t2fd4q#hF;aW7i~tl~TIkzV3R0A8Y#22)HMNLFeF{$oLfW@R zlmtUzk7tqD`5fox=h*pAFCQW^<=&o4Qc~{Z6n7Tt>gp2uK0hK)W8$WEVRKm6*hGwH z^5y4!o2bR@Jme1r-g~kyr&)prD-Sw_{p;=CZui1}&$ngc_h`X8gn3xsnUj+We}(VJ zfuw0l*eqoDAlx<;s~z@wdbDA5UtLXJWc}r)!|fJKwVK#ScPGO5l}TZ!K%gp>>Och4 zJRmvn32+>sgcp=>!m4|^?#rBP_zezN67$-igo(KBo8P2!Rn&b|LDwwFYIj5RFljUG zv$6+@gNOUA)64ziM_wiY;wD}v8D*Z@9eG-A`}Cv32>iKnJKGgbQH?{ZqrLHi#p?H? z&RSsRo3p`arOayZg(dfVuJ6IFY}};pmei{4B57O{!zKWnJtfi%r z|4Bp+o1LAVR$}F8>B8U))f7C#DicEKbVH4WL+p`<1nCNvUur?fU|Kks=Na$^S4z~V zx$PIYV7hnd5-GGEd@$#zAIl{%RG^U}yMD1^jCr&o2eu3`8<^&whFlF}LWh z)!=hd=GA;ST{By$v7m~8Vf0R)dv*)I9QD=m*A)SB(aiYeocvLHW-8H zz#XfPNUHdZVCZkk)nbNSE^5?Gs13dI)fn>XJK?)>nhEVc-rDIaQnj*Az#ucVu?jUXXe>!aXXfeXAmK9y9_ta7Rs|Eg+h~|D z@|<~N>g?=QQkn*hZQAYg^m|ad#+%RS^6OYF6K8sm4j%$i&@&b#1!{+ z4_SBA*4{JgyaUXn3>rNA)Kt70qbc@CHc64g?jvk&iPQrc@DjvIRA#m<+r8s|-%ZLw z>!l*FkNaloR7|c;XMX*~h{Sg{U%iS+BNcgrz?@b~GtElF z^uz193O$_ABrUwlZN!Q?>%nNCkw6zCxWX(@l=Sluu%`(fEet}T59v#pwqf35CQmPRGzZjS4kCy$ZpuR1DLm- zM+wq0)88|2;}#7JgvZ8655pojJI*nETv`G}nyiV_`}j<@841p}ML-w@IJfxyVWLD zeo-YP|102x&MoR6IX>%rv9c^`OK=pSrL)BUhX=h-E=*%E3yX$mflRcjb*hX5?iqyq zp1;vLYE~JO%>j;Depp63p^Pcj~}kP z6$5qasg%^DyOGm_w-#`djc3-b6`A@nw=K;77D|Rmp=S=UgFDsnD;9T11VJ#$zGyan z(6=S(QBjgZ=i5lj9l{{R3s(k_)`=`}%Nzk2+GEW|+BDAq*j!FeunbYAn$!A+HC3qC zJA;Ga#J_mSC&Shl%v7K{3L&IAAy1kx#ljm35^gzek-@_m#m*#dm1H6#x%kR*)5vw| zpQmeyq+yMSp`oFHBC}l3sj#9vM6=vu9wm_+sGEd@#G+BXMzJz&iw1~@WLB+3ItqU% zN7OIx_TPEh-biv($T(;^i9%QzT0|-+f}^+%ICi&>KWkLxh1AC$O68DC`FxrYNn#z% za5!=Shw`4wl{bXbptriCrw>M(AtM2T@)6Z*`zu9DI_Ev)l!_3mu)%hY6Rl@uJ)BV#` zjbZZ)N#CrkxABJ-x_2h3DJ{!nmoTGZA1j@RCas~KJPK(6fA=C?QI%%&R;uEzsJ z%*4u{>~IqP;lfP)^w=u)QY_ed@|g7e9g-%EDq1R1<$V@tC!% zrQo#a(rM7wTM3>o_h>H$Ye70c)R#a)k0W?KI$cL2h{e^L%0S~?%E-QvoZOk1hiR!j z;dJ*GcoZ5vEcBU54ps95)lSEd&5A1DLikFPmKnWwmi&c-^x9%#79mAkIG{1;9vt^>UW_AW#$-y z9vGrK9nG$59Wn7Z^uPJs%6bj~2X|L`aO20oo~fT;p?Fm@GVht5RV$dH2lEM^VR!tKrm(x2@R{eaHo>ZA|+LAw*&=C%b2I85|pK| zL>P}}=BFF>o4ee4eB(yGG3l^`-#lS8z&0uv{Z)Q+I6U9d^tE+FQeL2tZeW0EkgVMW zSbJ{U`=kA^RaX7n&&f<5X*{0TI)`rO%Jr;{%#0kOlK?5&W#4LCt+bpE_$mDT^JID; zBGMo>ARh|uYcajPzGlIxQ!kXKtW`;S0A5(5LlQ>P?f`3WF>R7LbgOVok4h-y1<@-j zBQs&D1Cf?HV6_&E`f6SUCPr6ZD%GG9al!*7ueA!+CIdTxPJtYj`?x`|GMNlhMn(ox zaG3o0q1882i-BmEgtW3~PJ@W%B73z>!N}*qL+=Uz?*fA8>^>m}GYPBEaAxsEfz|=F zBJf)53^BkE-$zxes@7i9EZP#V5dFfWz+0`UWzd7Vap=q`eQWV}OHaLLuT_X*s9m8N zzF%xL4&~k--rS}A!^V#$Y&%=FVZTuX+T zrU*9K+rBCRRI-neh3|Gi>OHKooTW<_HG?6+zoWur^S0%`s7tcsXP*WlV02WOS^Ca@X)5s5T;c8ytoo@H!^Si2$|k6M-~OQRilL`q3-Zm=3a|2>RvP`JHC zGu2C<@xDpH4N4OKnaLLxlto7V_)n*{mr*i>)6Oo5ZJY*16FelZXE2i11!(EX<^(`L$o54&OQ5 zGdi`EWj+J>t7uJS<&_*C^_(DJs_yVAj<`a4WtpPlBldUD{(z>i^us$l$2el1Na354 z6+Zl8=TjEP6h%HnL|{S>Cb>{}ZJwQeMyo9l`B3=uNm;2<6GQ>L9&Pe9v8D8rBcp4% z7&D>rZD4$Yu(Y!cESH0K)N+Y4v~EUqZ&UD?7_NSl++nXd6gytXamebq>BG*?2n%2;qXrg zuU;3mO3TQyn@kcxok>H#<0Ty?aM%v&e-^QwC79=Db`}`O$8hw?#3&z_3j(2;V{ty~ z5{@wCWx(Sl=qps>(*a~t2>+>%}s8on74rL^JuAS;=0mwh^rymI4vkO*p5^ ztkOBtm{TqDP=a{32R0Yy%A4!0(CxYkh6GfyB2k;1wr3)6hFks!I1o>)w}gnFtXO=D z%tkBQ!tOLq@&fe@ZrgB9+x=Q^ms}GM`g?f$Iq7UZhR14J#)4(!s{Q)!dva#RVx{_D zW9bQ<;&9rb3h7q&R##anIp4R=e7~*)eIHPOe{W-aw0xK9-{g}*mmHb6Lz13`W>jqo z!~7%FPsE_n{`O)w>iF-}L;R2&^qCq*e*4fVgS{$5MQXeG^ENhC?8Ogi5a8cq)y^W%a_g}50pXz!?CgN;AlH^L{zl5#eD>Nf26eF%+EfxJCJN|6PrfAkV&si z;!u#uGTz0|h6ilS$jRGX8)LdT2$3gUWa=6DFC)&5T_QQMTuz4J7{(|c@AV1)`|C%4r zm76`6Kgl?nAdpjc+dn_Q59@-`D(I##K4BhZ{K(-&Z;d4fXME$smRITL;3X6y1cF#S zI<)E*k=L%5=N6NG-e6iOYZt<#$4ixz!u#hTtdQPDGSOJ_I;;1`BY3(DHux8hPu_rK zpXhFF?c8(_W63&?uDWWnUC^%nYJ60ow3YGC0yT{gsZ#^J81m#3J$9sU8r zK|5Q-n$ZORT3*hRYIV;oK=5n_C8Hj8y`ho?io#ex%`4L5zFLzNNZ3OBiZ~)l|CnVJ zaFAxfv_`k)A>fyrV@^GGAe?NNn0K{`kp-tlOTA`EPu)HaN~EiC#iF7sUHvw4IDGIJ zTFcMa3axYt1BOA-K$w0MV|#O8{j^`=YPaDhb}EIUiH@3ROASyD+Oi4iQkBh`w{C30J0 z0lwwili#ax+ybI~U&uGS{z+7HcG|b(h3Z{x0lgcX7{K@RbudQq;BW@i^MPF(O&gE7 z=c5K)AGY4}9T(Nvx)LxZY`DzJ?c=X>xP16?Q>wlQLW1a_g=5*cp1vtHeyi^8zolx_ zQ-46=CZ&imaa;`WP->YUKYoCL696N3^2J*9&6HBy;>TMAc%tRTX|QNI*$K^IWTAqy zkJ=APG5Tn!X!X64j|y3*1vre$)n<}Ki3NXBv}qWV<<&G`1g46kaH+@+=@SyJ{+^%1 zB)`E)2N9dY@`H%pzy^Q_vXb75S51Y$e|4WZ>(LchqR;7Zg#<_iGe0TCIFuA9e2MW@ zAAtLzRHdh@TfGphE_I(JrzKFilL)COiPlRONo^TSF3ahe_$Homcgdsuy}%JoeL#a3 z#YkvMg{tsSycq`rc9`{v zEB>H+^z>OAiNsK`ROJ$av(9F$feYuCm zUgh`Po%O?@K_7X>e3oI=`GDgEA!BSEsOF&|ifI>E1+S|%Tk{p!iUSt{=%QRdZ9wBv3UK0InBC{mP z$bM@YGW`aR(>el@?Bss+{Nydug+g3nF3KG@vfK|<)tq2I@K)brOm3Os#~!vonytCYrD4P#H$g3b6p_OhmR4+je8!Ft4%@^6?u2l4 zrU-l-ha(my?%4$PP7lHlZ?1*CncVRR!uF}U|}(2=L&Vw zBFa=>DE=HtYz&;M)=HyzZ*rT}= z!=l8}_4PmqR*Ee?G|3dgmv#DA!#zcWNNbsJW##hp+Qw%xoTkuKI8{0-Eh({b8ECDF z69}wwzJ-NsGR=RPNj7S2{rMf?UyU=Q&vJ^#;(McmnZ0GOH8NRB1Kt_XQBTYM)DC#4 z3UdR00RF&Rtatu5@?M7A!~(c{2M_!a#iCRyL%uLMJxNj9wqWO@&&M(BdU#OzrQOYG#88`|$LR;CW(T#< z4+R>gsc5^7(*j`ksiKXK})B_0JrA?O;$k1?WV>uUEso&{0NR9BJsy?u!j_07#F zrk`;#N{=t4J#(6x$kOd_q6l?A@Hac0<>_##GDeCDm+SbM6op!=Q#mH@wOeSX9)y)s zzJWA(6Dt;t&CfJZNfV>!^^QYciR0feLsVi0h>`O?y66Rt$-DUJS0-N^dVYoOy&_HG znPpYU<2R;^5|-@BxI?$@@V^i@H>G-Rt%u1MGM)hJ#6pO$5)EBKWhbp10v588->6;Y z$XQ7E3IL9;L80I*yT3%#2#y^Vm*$W)D2h-ql+mjuzD0%%cGgg-0(&wpgr^f>cJ{ zX>B*XpXxtww0o%Tw`Ge4<#(?DZOWgRQmqjTv?X?HK6gSuW*#0(A{(89``SNf+S$Vu z-HmJEW%l+^1_$r&IL5UjjShsWKAHMR!2|IFDZr*7KWvmC3%*Uy;ISA_FToYnqece= zYcu2O-054~@t2=I{DF;*;K3M9lMrlQkpExtw?S``8C3b{k`X3TX>2l_HDbjKeT00T z;P|D8#k!_MaK@xIT#22Bjewvh0?Zi-o^L`yUUV=7xvz@=Omi54Q*j5*+be$C;nX2~ zLM?Vd0TEZ!6VOy=B*a3NidC0<2PK;X=Z{ghm=;If!F+wllTBtSnJ>k06!8j zwUhrNtfsy9TRO@aV5&^=Cj*wkMC|mLPEq%4kr;!Qr?7lff?BqVz%}1%G{CAZ^H06d z`OC9UOz4-#Wu6pTFQ0RBO=dmy?@lA!4Yk$r^FpBfsi%#l1ZJH{m#a^0fPZTGFfYy}F}vWK&eiK9JG>GyenJawwG+OD-&cjQbh}pNEf#_j`Ft#md)2QAth- zrUnN^W^yFJ>7V`G>}8447{dMs$S1+bII}&OR~n_uz|^#g+GYTn(-&^cNu><(20}{Y zwD;kOSnx43v*a(DK>Hq@()rQ~zQQnl2puaaNtl6KNB)GkY!Bz>1K`gN>9t>J3~4W> zr}o*684_nQi);48`ELEI)=I{l+ChUVqSX887asHM$X`udG#=i(x3Xo?U^6-m#~w~E zL<~zy+GtH~!iX3!e67v(@rTc!)f%{|vn;|HFwmua>{&>h>>bAU%4lS?ymf@BM6wTa z>55ItmXRsCv4(V!;_){y0Tuo>S7Ro1G?gvSv|FM?^E;`cqAyR}<^a~D^(&(wD_@q+ zLo$xcf~ZvjA<12PuiO$IKIyP_WqFxXJ&8OCt3X8?=$92`>6NO}7J+;ohp=A58Z%lv zZ`7-YC;vLbhlRWs;>33p^1;;NKLDI(L&zKP(Jjo|)zbxJXcCVtkFPdAmF)n7JYEl0D- z_?%WJZMWfA9TM0RVX*#I>YnS+9UjdQIo~X=xO83YD&9wMaWOXhpBG@lbNos(L)`n* zd2#_|ykW0=%SXH+H6`w*cvlUIVx z=p*UTX293|iT7dU%(aV4n~Pih(5Dh&Iog|!fPhzL@H4=zYJa(ft9!UfRk8ytYOhHl z4x^S_o!15ubr@4BlS${78@3}1LjS_vFbmaIca%Yum1Zo1IEultseBx8QyGNq^J`Bq zp|?NeL?_un$O~5zpH+h4AF7(EAkdvzOy^4Hr2?b4i~>FMJwdE|sd#lzqp9Zm#pcfz zGST=fx~gX|SgDyaR=7Nx&<&d8`b3dpQ=nCnY7?YB%#T$R*)oN?T{?_o^EfR;EIZB! zqSvgTEkn_5>2CBmGcJN#{w|5g`)z0`nOTRE%y&ctNYW6M8&j@L0||S^aRFM+|& zhzd7T*|#66%}Tp9cvCZ&!D}5xW-`>tHL}8ABR?_y^1F!zz3oFlsNwX&K|~~^7aDWf z3kDxfe;7~alIBpO_r6f^WsSrG%C_{p5Fm53Pc0_$QEnae2s3{%7xpl;nez<;7g>!|CIn zyVNoT$WpH+VoeD5fZNFkl3KUz^`F673)+nbbHPyR!*sxSGn1*fPzvJHt`TUCnTdiwOUP4hE{Mi3blB{P|=%$6R^=88i<@UFE-1@wk(1JEUBSJxTMwAHv+Dlv6p zno{wC1prZjBMr*22;>fr4b91`20|~ra?;Z)G=T{a{z5{q5;h3remG6r!2LI(iw1`n z7;#*{ue?J8&M)I+%A_|^dk>dX$ymCRvtzKjwx*?|a^VHIy?VtytxLfje9;)N1FYZa zxM-lmDUY=fR03?C&(sc@Be8{lbfK#=!H4j`JrRM5+})HdjP>tGEAK1T(t-O`$x*Gd zQZ7t2ni`O6)3l19Ya->LaUtXmcT3v;Ulb042Xn8UF7j0XH7>D$QB)pic=5aY6rECa zy_$s6wCS4aB>_{|Z^^KDhe3=qTgoVw6RO>8H*}bh5};o#)T#@lKi#2!<+fwkI1kM} z?O=lferi{ajcoI&WCTO8>-QNm>z;piJG()o9=!gkwm6&3vYL=jU|{I;*IPTTNeqG# zO&x3^5Cz@4K(kqQuUhKmF_B#Hm9^@->~=p)S1vAic*MFzxj#aI#CiRLb)6u(i14-e z>JKuv(;HWHs+EQZi@7k==81IzaBp)Br}Dn+p2P$pkDG8KNC2rjmU3-yOudk_rL)U3 z+0m@opyk?PO+;9FT34YO^?ZHE5SC+T)Eh80%1+5gyQC{l#)qsSwXcWDInaEmoc${i zf(#fI6|3abzLf_AJ7);QQa=x9+L{hZX6Y8P zD7KVn(uye+&T@EdAjRa&rl+pzX1xb7%Gti7Ng|5~n(+LCA3Zk4-C#ZEyS0t=c^{($ zUi9(aEqS}t43|f|mO(2r;#6aX&hdX7js+~;T1#ZPsLv?G}D0~T2h17_svJ@ED2s_g3_WfH~D(e| z@Z4n!4+2&iYoHJ+imZXSh1-Ebf#|36ARem7>vQq3{a>@oQOQ-AEv&{YDm`Z8-U(3ypn_XrLvm`3b=-C~@3YloWhfP9X>h$xQH_ED&DYY~(;YqxE zKaU1Gdlgat31k&iFjk^x@(MztE(Ib%{t%nPpY@zsG@u#Vx>ff7$Wp(N9w}e|lg{4S zD^lS1P&+`5Tu)|xp#n)u+_&I;H8kXGil_K%)Cqt9!T;D0ETf>$yEBf~kze}|^)4FKUO zwZdQmSRp~wH+crHt1LJe1YBlotqD&QcA50xh|Z~Cdq*Y__FRns9C@^6npXToKV{aR;D&-jNv5OO{L zgCz*kNV?hn`?t8FVGC||clQTAJ{2uPAI|@l2!N_8L(78jDgFnSLPPp*y-Zzz4g1lB zy(Q!gH3(?$j@E)(@X`XLaezE|;PX8yzw$T#n*98LlUOJE2B75TPR7ONmnZZ631b2f zK29I+66n3%g*G}(7-=Xve8d4DoodaO&xR}2;`_PftK|e4nx%orYpun0_9l;ys?|y) zDIodupV@wo33*?NoR)+6C`8Ryk{H9IW38SK2vr-8qA)2;?ac#8CxCcm1JffcZ1w)J z-mAGq>xVvwm^b0t_S$NEvP~M*expP9f5`{nRRM%lfagNt(7EUTt~mg3^SWY`|9{24 z=#*YW(I|a7Ei#xQQs$1PS*wBLSI)7@xcLutfQQsKXwhA5me%aWGf|}HPTNvBPp8o-fKHIB@;^~3CF-IW8KqdS4;IEtyyu30?dD5RXr1!yY z1kvG<=E07cK#(?jx|f%nKTwTX+tj(${T4v~T<3_!+0rW1zSRS0FyMlsMISi08UVbf zaC0nvQVTY-vK_hpFxv|^BP5*c7(mQ(hlo&PLG9!O1RAS;x?&NMjnmM+zEa6m|MCEM zn*g*}fX>5pOXBbJNydiRVP}s=x}j-~r*Co}uMs)yDptZY((i_@3Sx@VXG1Uh_MEFw=kR`-J`DX6M9U z_IQPW{NEpo{-FfhewV{_;R<00(!)fSMS9XiZ0v15@PTlgMM7}hOZsM1I-mDuHIqjY zgMi>Qlf70-OyYMjfF5mJ16!&5AK_^9mWJg_*e~|)keucCHzE!oWg8n|){?&)g)e<- zzvK*%j;@K_O6Wa?-%`EegbW7PaX>Cv^g^Bt0IE53LpU+>eA z7NMT5v+gU+BLeBs`w;u+m7IocLLmT8J=OMerO^zC+7Yfal%o?^VTT{ZnW&%DeO_#1;|C57vmFMo zcMEv9r`{z(if*gf`0k3l}F*_qxRDv>(}ZbJYepI5@sM z-f|pR+uO5jZEY>~^BFm7Fft1tcu-JKsPZ}8?H{^r@?_AzSS->Gu>BPk7SurJV&scu|7pvEm=&Y|h|H{B?7)=r{=5=tsxwJ+j;>QLW^x_MZ zMl<%9s~rEk=U53LnFEBABM(U4X7m){WNA<3WiZ{N|W6AO8j^z$UKyQA90NhQPDvC}x$}e)h6x z6LKQoV=;++l8w#vizC17%Y8q44OkOWHvDwZ^J#kjMFT(g5(2@O2KU3voUcW;aiM^^ z#PE^O?|3ClnZPLcazdEE%*^al%C`z%4{dEt)6qr;rcW&%8zQ6=V^a1IZe_bQ;?m(^ zIe^E-3Wdf)zeVo^!{cU--#W>>O{vr`6v52|NZ7CQ?wlqsiE(q^ARME7kQ*UDL|_mL zkYVYfv{|EgKES%!1cm_j=~-um%$4gbe3h6GgQ0k*Dn32RCRvwiTy`@z^gzSSt@*UA ze>5>H8W~e70oa5}I6v-MW3SeeCZ?u_0l?}7AC(6#2i`LGP3-Vw7P>no78Zd2Dy76* zZrL!h5CDfbSqoNhi{AB4d3pL%)tLu~-z6mgyJ8VY0WCx+tRF880jQ&sMyIOOyN}2S z#F_qaf-7?s5H^4X;C+PnB2$5V2q({_|$&a%p0CDz; zWk!7b{QHfFQL8psStc}$T7Xl1?#}S$^4G2V3h(3ne$J7HZfg=QO9uBa-L{skZ)_ST zg6Qe0o)Ib!CpL66+d$Pn$Ni+|WWg|q!(%-H!#ewL#KqQa%+LKS6jGTwpdM0K_$dJz z_?=od53t*ZyVrzXs2DWR`<;(WE^LYD&tC) z(H|Dt_RGNm0NN;!YeLM|hLKJwJ<#C(+RYAwToDFEwTl9QkmlcD@c@s8KP+wueL!2A zPXXD8n7pm6@c3eL5r4HG#r;jY^x73LFLkQZ?_~g?s})@r29KtF3x$Tn{snC@)6t)# ze97_U@)x?Zn=uqNfHocq4NR;4kc>`_g&!&^@-ViEek$_i9T_5~wo;gO=ebO~>CpTw zTW{!hX>%%a(hqMi*WC@q=WmbOgpzBP8^H>#3($(2nTLNGw4wz?W%ddSanThlV@$bf%tHVM~o?gxq0M5d0Dt1A_Xsy}Q8TNh<%mFWA* zI9S|V|1fyV%s;@G4^4+ocI`^@+nI4sGNyoysTr7gqxm!{zF-uVO5roPDPe3< zkEGK6#H-zLOhN0D@I&Uy7zKOU*@(_;nz< z+wbPIKUPKLvsW9l3U~%Mbq@zqpR&`!Kj+JA)_@WX`C|rw{{_c=_M_H;tR;xL}K<5*$m)l`roUNL#tiWoH_aNDU`)0`P^*EoAu|`rKmxkcVPJfiQMpSQQR3`G-U)w_T(&~n6U`H0YLzjaLQKb;C=-hWte$8zEt zr`NKBfIXYo1-vRaF30&20KA__+J|nNRIlN*B>&%g{H{k3qtRg}p9|y^$3_1__Ri>n z+b>P~K8_QlEYGKcTIG=XOudo_9g`T;UmxNAtoyE<{Lxo#wmH`IdC7S(?a7bZQhPqF zYM@pr0B!@|YEXai^)(cJy_^M*S&Z8V0ei8QYn#A{o8eNsY}TF~3w>BHe7=oOMH$h6 zcz_r#a7UL{$*8l|eutgv)woFl22`K`3h@f7fsh;WghGG}^#|YqQDNMO7B(~eclGNH z|B2>?!;TzoN|LF!@cio7M|ZRz89e35Kker>fD+i(pjynW-NTyY?o@D9&;}^$?zb3j zJl-mO6C`l*d**+;=EYnzuQ9op_3u*mzf(ZCSiZNe4Ug)w6K`ozkjPY}rcLZ|aw2*K9fW-r;job`h!25FZv1!F4C%ny@C-)?983(Vx)21ZosSPS#Ilb#YD-23UadY1ZsG^~}w+0OX*!wvr(-)QEidRT0l zn8PI;rI)8O8N1&xB)_sm$fTnP^2hha*PBVJ!~^^Z;_at_i8mxoSBF7Aap-bOm+b#v zdtVt<1-G>e2r5Vjf=YLn(jB67!v<-TZlt?ZQ0bQL*rarCK%~0`B$V#%x@#-%d){-t z@BF)C+&|Yb9D{+|z2;hL&bgl1$H}DeTp0W6h0nGoD};XHc&uhs|19m^X&r%9==Mhg zK|F47hlHv6Wrg?~4kqX3Hm!P|Q#|E*=~COgaJz z-JBd^Cr$KZCO$s?S}7^(8M;ZLYc_Z6ocj2u&|uN$E}!)Z;{vmM>X25|b>4Td=!Et3 zwQUX6IC^YZ6sMU^u$7-)xmWegH-qo>^aanp0T?xHI#bUJy0L^j#G&i$uj2hVigIt~ zpg<$#wYRSgd$4*y?Z6|PdtBGTVe zsynZ}gDcsSIQEj)TiOS;%gKCtkq>@rP-Hky{iNR`f%DYJ+a|CL(5E72{(!yj+gAml z`~dt*Z#3xWS?AZ#-K`Dhk=}C9vp=DRdF+~$X}byP!(shtb@+}PsrQf75`oksklzgZ z^edWi(0IOWQsFB*y$GV1FPL#CJu)8oc>kT--a(GWf~WiS<3vVK=b{* zweNT^*oh4h|3ktm8woE#FVoUj=T08=Zk${fd-txS6(lTW(bFj@U*+XJbJ8Wzq@guf zR}fAERi&V@aAE+Q(e&;CMjHa@#^EA-2yId>QqNK9X+eW*-XO+%gOSY;#O2} zW@sn*2yZj5&K9bz;p&P6Cv3y-#|4Ed1me1a2IJq{0&qP21*yUOnPZp;)JWTHHOu8_ z!sHG3d%pFnh7D~bqp7)@Rbr#CiUJr1>u=QLvmSNCv7t~~jM>%+>Ji}Xr1}_!$;We` z>Um#s_87C{Jb3Vmg;8w(9rAPW=egH2s}nWsY^9}NKy2b2@|}68`i7!wdp!Hnd=a*qC%HnSw&^{yFqB0_w^OY*wO*+4zusu zSRMcBtIO~K5qtu1gALj4Ja^jM*o1?g(hW)1&h5AF^IqM33W!17<_^bIY)`+(Qg zqtb~vugCaeXZHFTAB_9sr)OE6?{>aC0oD)kwdpj0P9Hy3VFI^=p$pF*+P!df)prd6 znB+_xs*WFGvGK5d-_gj8JUoihMCPfwKBcAdaW&6ZGY$Xz;W*i=c4kmWuaV?3*#n1E zJ|(32;!<}TmKX-M<_!et@)~~+6D~+DvZ))-c^C5Q)0K7R+X#z_C%jlNk`2GLwUJX( z#@wS&t<)C4ZzM@1nMxcq5LV-{ZFbKyI^X{>u7Icld zp4vH?fdLY*dElZ1rqmb`;v47M_1n{zb&7Z<4zloR)4*l2ayk5MuZy>1Q~$2 zgLk3($#7P$rdQN3Q*IuT;nN99Pj9SBt7Gp4Q}|v_jzDQ|Wr%X+=z3osf`;lj!}K}w zN#@*TDpzkP_BSRgx>p4v^<1#I4{D??4aEBx8JU$FCF>kkgNDIK&DB!@A0%HU09%3U z;MKv32(0&ZW7Yfv+Vq0}(gC-_wcC349)Z=rqe<@PBqkCO@z^3Tsu!tZ5W!1HRHiCg z%nG$K(le3A9X39=h+H1aiN1L{R93n(tso4|RvgmMz2cu=b>D7X@w!aJ!SddH0e?SU zY)<93DbMyMt{xDnWAT)EdoMhCG`|u~Df04je$FlrT$qEUf%znUSnXQ&+p?ljR8;f< z!y&~{*qIS~FD^VXb8_Yr?fXjNF2E?cutKY$yXKSSW?<4?&NRvo=ZLGhn7?uXW<5{F z-dpA=2w&nVQ;y8IfC=oQ1oQKYDNJo*SK*YL;?!dAzzg*SkLP;@<1g^M?^23~wZBDb zPjSaInyO60E2|aA?SazIp&)jW^!B3c?IS|Xynel#>=Itg2;JGMcjal2BdpNzK04W5 zQ*fV8DAd~hdWTehvnTHJ!HoN3VV5s!;(FtQ`)L7RP8+XVuZZ~vf=H3aqa&Arv?Idp zbOd4T83OQRB6>gg4!ZlaP|M6y-R8R7gxWP+(Sgf3{_*zYd~3gb5p<6Z72Zeh@;XH+ zgq?NSjP<++I+x|HWNLXTLPtl_B&qkeTVM{Blmmy?R0Cl@CZYeEx3|eD1b*12r#Npu zYcK1w`$LUp3HDW&8 zT`~Xroo|sQ6oi8x05MS0^IC1rw>3voOl?M{dRgl&j0RF0qFncPCc!|ksdN-fB3?go z?wJq1Hok9;7tN6k3=>#161?mHS(|9 zSVcMbyusljM=Ag`*3+jxHojB1LCsDt@g|hNLQUydzSfg32M<_V#&T zuPPqOmYp(uoOV59_*;0+q zzS9lbfy)lfrIK+32euFkx~Bt5-ZSz$*seY-wX+}ezHqsyHn_7@yGm1kK4HR=)c*GO zVyT+I7J=dX{uzG52q*=}m`+5-pnhOcnd4XGVRVmcsRkX*^wgT z;v;V@EZ@T6;e0GJR;Z%2cT6l4RR8rEE-oH98wJudztcCP<)i@Pj~}Zy!4&2`?FOU3 zJm)i@3evzpq|b*#GS#5WE2YMY1%CQIjduYKVO99F4$Pw)T6A&1=|45NKUJI(TlAfW z)!wq_u__VYJ5^(M&b~(o+qJuBpL=~c92w|xuT)?Tl>EY^3Et-z01CCoTTR^uk5bFT z1RWiV_!|xU*Xiks5Xk$X2ekKbao-(nPX273vT6SyL4p|1X;EKUZx6o;$NTzW<(Mu4 z(Z7!YP6cv$Q?m@~i(%Sikw`6T)NfP!aqeAi8&5y8v4!oe<_MM1z^OMy@ZGZmnDa~q zDTkVxEAWJ!8RMLdZAV@FcF6K_MnOGM?*p8H)kOP&!(V~E*>b)MhGcxTh}J%_@Xg+T zjD#L>yUl(<=eLBxx9{ur0Bk)c`B@~PO+gheZ+M}|7-eWvRNTj?7l9pOfJNqP5K-f$ zzm%7kyJ>kDEzWv(UB}3(qWX)!Kz#ai6me)k5e;tsq>2y&EKvwLnZx1TA0hUoArl3C z79ZRnu5(VEj#rpg7!$*4cDUeC7P|VLGuGPrFEZMIiEU=!u~u*LNBY5c%!$m+QOA;* z{d)fGU_*))3(Zh)2SN7qR5DI@_O1HQk-wQ3fIs+^e!>Y&`+MnNNfE!yPS$!KQ=L1D z;?L)zwR-EXy*>wi8raJSt+x}@g|ouVr<-6v`FJ?~O$;L1OVnJlRT)u>@T3bPI4u}cIg zVrXdAA15EP_A&lGtUoKiM1{Lek^ed_z?;!NXb}Lo{A_{5u06D0a@k=> zjQTxp!Lpr@rPLl9zXm0tS*#+@Hcu`YIFR(-JL`c&Grz})1Lt! zc#gIEb>6Yhugo>y1>Gt3t7g(~|NS}O0ssngvI)y4_FudOqX`2;k0l>-qP@C4x#MT> zdgRfwXP_#Vj81ZNFiN=BxNMSz1Q%EJvMf?BCZ&6}7X{_6yzIrf$)o|ZeNtloIMJGc zpm8K>Zmv&FIuhi?izkmBepOZU$@4>605|YOMP76tpM7m!IT3C}-f&7PCJ>-8#L%3- zFA=y)c=$YcxBFvDkSoH1`oP!bc(}an=<1T6B7vf*Fb#*U^%TBk8 zBfYuRA(x(&$Z|$PGGwEB&hV&#Coe5WxtknqfHA=A!eby%gEzwBI*@w$orFU zN^I}#2Hz95UxG@1)GfHoMsW8d#Uzg&$^3mY1mdZAnaE4&>1&o9zh2`BL~lqQQi~2m zdTU;>c*ktd1bkr2#a^`9$#rd)cnJ$tJj(=BtLW=p-aiDHNQNqL;Kv7^6Hhhw?e5Yvo{u& z1jI||tGpn@Xy!JjnV{3 zF&xEkmG$>-|M}xdCXiJjFgJqkpTGU*XPdOZ?YhatkN-=J{C)F(zS0BE#3>$3+PME> z&c9X8@6XOL14xN1|JcFbU+JR&_xnVKz7K@^eg5d{|M+Y@HK4RA)0OzY z9vCg$eC%xh`r=;<_8*^}f%BWzP(9{#8|uQ9&%1^%Ty$$tBzzP<>= z75|~*Ob%QjsQ)3pP3v%xOiFz8*Ynjx5q8)?NjF_Cb;-#?bK44boy_weTu~2?p^SIB zivGolOh~{M7wtSd(F$DdF> z^(4PIOz@STrikizk)+ouHRR0qpKr?>z~+bU&Kw@Ih1yXggXD$W{{=D@|Lfe*6v=#mBF9J# z=+yeHg?*bI(P`H|=2gjlJY%|$6-%)cZ!zL@7`9;euCwwm?0$+kuidx~-E2vQaj+E_ z#9^zQs8C$n**>d9{K?$iC`b*Uz8 z-4H+En*B)3e;@egK9D|5Pc~lS`0Hj};0j-1&F;6K4<7pgp2sr=v`5T#W@=_a4?y%T zy%DP)_#rB+YN8zVE?VAv4~BQGLq+V?(ImF(Zma22cc&KC5XK!X-*i@3)9!L| zzJ~V!NL`=kWS4OC1ke04oC~v3`!GpW_Q<$~`e{&9! zeCl2YwxRGTmHfq(dvAkO6MMa<`#UOX=r)9e@7*4#L+|QwaJ$Bdl6Bb;{Wjy2Fl->` z*pvo*u#h(;@j{tqD&@L$V=A$~>ML+_Pu5w9wq|G7HpcSR$U)YcX5R{74g?Q(ZXjBH zlF1*DdDcbfs{Yg0eqo8ed#o=nldTUIW3N19wE6!?U8)k0!AUw2cAJRb)LrPLTAn)@ zViKy^6XK*wYkWzi;mKa?4uRRzhM*G|+BO4uTpV4#aR{vdKZqNsi~v+gqyH48o+X7GomuBj3B9L^p-? zJc{_d;s84n5Fw<(5MCzA>`Bo}8MZHSyH5vv=S28C1LoPlT35noX22p<)5|Vk`W!-h ztH&4_K)1OSFuV!09`YvyS%R8a`MKBhnU%#OC!<)d3Z3pi(^cTnT`W9UsNefEy2`xN z2btJYeyibx(sAAjEJDApU+@3OMD?Sf#P({#|4@6eH>n%5{Fh!+lmMt|{cgwbjp;FS zzk@`Wq;=NAz!Cc49_#b&;~D$Yy`?T`Jk#D3_VzrD2qPp^W$D{V7a7x9VuVyiEkriZ zI_IR)6t8DXAF#KQ9QIa#V@vF7u2Kcx_%N+h3ys;2Oj2d9^wmfOx3j%OVIP#eS8QJv$s*8(hY>fBt+>f7CW~ygqjZRAI%IwAO zwJMc6(1$1_Tb7l-JKvT7PDWU@VNquQcv?S^9l9-EQfw1;VitaHAf$ELw1d)=H)!o1 zZR?X#EGpj+hXI~Hv$w)XEH|KXPCM0dipy|~ZI6Pr0hbwsi%&b9LOMVL@Y6CAt;>-F zQRe-R4|sqJxA)Khv{fh-9mpHwUfKAT$ADY-(l-Kw@cv|NMVCvIRdKMDbJHrYRL;L;tZLeWGL6y(D`S3IMT5}e)EFx~Z#$`wl8ZV3+W$lS74WmBz!QxKQ zyKN|kcLy^!wQ!^W^Lkkrdoo*2C#J>z=1zm)|y)jlaaiU^#$WeGKt8`0{joBEi4tMmtei#TaX zZZN#5qHkA^tf97d!ze-g4-fW;WY^oSHVyARhU*)G@csi~Z`oX?W^hCpioKE0J;lAfj z{fxOU9Ajpen^p39b)4(maU*ILpJjmoUQp!*im{WM@xSJDyVb2 zS#J6HdH8;59w$4rqkc)hEe5S2n#{xddbNx7T<^_$t!)Em8>-8*_30U9R#Nj9j;rZ2 z?5}c?md-3>%XlVc^r{``KQ*3wU2={c6dZA^@~vYLNWOtm+i)Y}{K3N6Z)O7uT9rib zQ;=>$!4`z=;k-o1H}bdE9T89}(Soo{Dm+Cf1M>@rbx~ddtuIOjIii0Q%DwwUq}$&| zEY+F^f25kee( z@%7$av#}mG%`$wmM>>Y~(2*<|Y^N^zA{NB<6mMOtiSZBs6jkf^31}YVq1}V*bk8G- zm*Jn1dc7omNzbH;sNMM);%kXeV^iqNh>7}8jp)l~XK6umiN zT@WRxIP&NxsmgX*J@#u3+Dm2z7>#;;Jj0_Mv#JQ+u@%o$$g@^!c}M>bvx^u2_)`8< z*mLI36L}Ksqty2L;sf@F&2g;Nac!05^RQH7(0vK=q-ij$$;XhS_AK;p$)*89%*6JX z*6SQYV!NSgiFV5p!m3gCke9UqSw%i%*gdvMyt~F85)qkg z;S5hUc-|9F79T}jp5>coQZ|3vvG0eF5}4`*p4r_i<&sEhrKZyZJ*xJ!3vMxmM2s@) zMZDUyl-xP|{`o1-?)i2fCIo+|*QD>cq+RLHk~3w3y*i3l-|pt@j}ejKm)6ucEjxQs z%u`ac>#`Y9ULCT&OL^npJuEfm(#Kew>N3g@+b0FapzXEv%n#_1N_By$321w$aA5-qb&dq1qUu{l88ev8C%2r9SG^P@Zp1Jr|Tvm27?pRrv#saZ63$^Oz=2)Ct98E$NX6JH-Mi&`e$R|lqes{AXj75itb z+AxUjuC5T{;w>sZ^LrB~=3iajtHQ6eZUZanT|ia@-Kt8r8XK1efKzy2Ax0}vDEL<~ z?Ag&fRQb?h60#{Rl~|?{J_$C(Lqt~R_@l>GDzOF3oijt2sI?&CUn_) z)a~tjO->U-E~+VW+Xj>fcEs_G?kMAYmpWZ|fDG~wH-BXbUws&e!tFRdoT>>$C(1<< zjv2f-9LF9lOwj6Y8onb_wVOzxq|&GOOlW+6*hiA38+@tM;9yT z76r;%rjIKU87I0x%&_mLRo8YxZa1qssc8qFADGruY-F-1uU#^K<0kfPqk%(r_h0pT zzpryg1`+zg`DK-(s$b@Ai$7E(1^MC!F%D0ZxK#M?aU2kwy19`4J zB#lj9XvX)lWm@MM4zC6vQ&8FWv1PuB;s~+hphB?U_(+S672cG}fgYiZAih3glND=K zuKWO!FpBRyt|RGjpp+A(TuGMUY?D7vbxXDMyu1Yfzfuy{Z-uf?Iz8kFglA~Jo7*UX z=JR5m&aSIDYWRrjsl>$Q(7QmOrS{me4bm zV!v_4()L&rhPuNH|>m_(bc&PoNa8fS%2VEyRcB10eyz!Fve+Kj=qK-NT)+v*VVLIa6> zvjs5%NZ?5DtEf#&ULCJlYT;`E{_k}^JH>9WPam*(9cjL$z_T!bba{x(eFJGu8P2m- z6*w|ss--Ea58@}vHj7k|P>1v-Djfon-164)3Q93_Oc5v#6+fuujNCV8BPXTkw36DzXq!gb`vg$~OF)0$V8V@Q{fh&YIz|@k za*Sq5)h`7Hc@)!A@j2VFX02k1N=gI0{o30vIOUy0LO-6^GC92h-y)5W%Z4B-+A%KJ z_;4F`4+>qgT&Wu7_S}=ID7;t^Jm%?@4T9}SbGUDEn#3ylnxQIc`8Qbg$kXmMAuc_j zwO#1rw_1BUS*N-|?3{c1RlxaC1)8k%DFQuZDlwRHKzV#PPF$q9R@Y@Az`=`;7k~*R zYcSj)i1rKP`^s?FyA&GN6;i(~)N&Ssa}r1@@RE}vLd1NAfBPj$i;5#Ex|JV~fI#q7>ba<@e00y;joMU9^#ZqD zNGj2H6CBye^|{CpR#8_OplPvaMH`juFntF)CPS;}lV+!h&k*7gStml28wayH!#gvP zIAYFX>KN`MspOU5iKJ%=5u`%=HT#C_o&ER>j-Lw2? zMhJeuhDmY>!@ab|WspT+ro#O)q~wuD^c)QN^iWKQX~=tSz_{gh{;cPjdsM3TOo0ZM zy;MxS5WlWab=X{aS=#K-cJ55!bd4;Hq405{7`JP%jK;0e)WE--cp$y@iKHg2P%=D6 z9Z2#{h|ep1>lMYrP=n1d{izb#6b^KMDXus}i#5W>ytTJ7=8}j~4mQg2-h@`Z3F3&2 z*i0+cQRg?zZ!mOQ3oy)=8jClATi}rxdBSS7#WHP~UXo#Sdl&nVSeNcF=9=~t*EX8! zGz@vOw{3R#wJhGp|;k!T}|?o!0&w^G5eh{Nn8J-x4e~)g z*x=;1s4&NSrr`7>)|G02zCzMUV5%yBgW6s#zl=>#$4J5a?J(#UWz+bco)rs6au^HB zo_I1K=HD-76lCG&D!z@ML6&v}+Yq#*RPY@8!X;_miQsrZ5Jd)!dTSdKKD9Bbo?kY^ zDJJx-qx#A%L8Q!MjvP_5^HUW%uNr~H$8aafCv_D?gONf$W(#RZO&|*wK=IM~$w5$t z(K1;vX<*4FHS3z=Bo`%1gJ;=aHGIF%uJ^=!8{)eIP<_C*(G|!9`y++tz#iBe6}`Eq zTfV<+V4@(6MvXCAmc5f_7;EAyBYu{6uLPBrd?l_H!8uR8W0{84gwu$X78_-jXrRNZ zv9W@YK8xa+R0p^IVM`nN%M1Jb)Bqz)2~p!d7sHhCt?ItfS}ROj(emjJl4gwRDx!!6FzHdqCS3A(gPI3SDm5^}_m!7=?O#3=HGd;* zKz&6|b*1?_uDK;`KmR2=h{y0Jus=}Z<_RQt-jthBhb&2T-evo_m+oct{ZhOM z)9RsCo!OUAF76^fp`o6R)FY_jH?uO>0jD7Icv)4_WYXbd6n&;;^^NJkma1tr_M|mC zm~pUNTh3smo>i4YH}aWJI`XLULl)I$!wjWLPky!lj52rrKEKaLNCj+Z$l+8|Pk6}F zjJ;50Wqc;tsC=k=sU*bW6{4*tRqa2tfcG4dx9$wFW})DBw0l|Glw@M+un}IjRf*Hwb zCwnF1xN@^QQluC8z3frS}-p1^Guk2xqc+_nxFm!&L zFMj9O11+34Q@v9uG@E)R5|x4ZYmY=xF5>E>83JZWhWR=TQHnv#abeLz)_6Q|KQY7IRQLAO7-S=nOkWYWLbs>)Ixh7 zAW8CWnTr1g6Lj3O~ZgKh(`*2271Eeou_m z6Hm*oi9&vvIiYH5p4W8}DUyA~CyvPL858d&K7=_&Smq1uHePEOEt-`TfIGtZf@w81wt6m4tO$9(;eAc0%p%4= zS)dj0%xoj`G)%;6E0?6m;t}$v#>)lAE#q@~d+4Jp`<&G8! zS>N-^nC?uTi-^q{>28T`wGIvxH?GcyQXU;mS;<5H!~z;4*0% zRyBV%B2jn3s&?nMr5F`7X*b0b)IbV+Qj zdAiw3ij}=>!QLh><762!R6`;)T}vI+*aWhK^i^Y7xZ+-lV^PB6Lz|UDR4+=C%f*ml zahl4}AsL`MWqhN|oU(bEG(z?3ev;7&?ZWw#YgIXAFkDv0jdF^vEqy2qpCXk2>yyAUHGu$XINI=ER+}d2EQ@E8e-7h?4D4t+v z520lBoL1}nX8qLS;inkZs=PS-Kk=l>`0*9bIrCu#6}6Z=4DDK@z)=EbjcRe7l~x%7 z%hfV(9XIS0fsPrA2|6()X*nX0rQ3o4U5r0V{n8+-3{iW%`#N(yo>gN>DK1^v=Fo7d z7TE;LY4!CbAE?cVXM4moH9G5|!WmeG?#e}bb*^`Ko&#K&X`7RxwS(RJ_T{ZVa$#jX z^rUOwrai03D}=t}^}BKMb!LYYi)IM&D(EeV5Y$WY-gXW!yp@j?@{sAvd9%{5jLU(c%D}_n5jTYC3s!`^VLw3wBPf| z0u@vbYPrPRcx4C)FA}{k*|9Z;rsSOs`6N|axzh0iu@of+Er(D{{MYu{`K+)fJ?Ujc zrYZ(+t~?~?hG*y|zK9x8YBi9c^LbcQO?HHf&=cNP%g?iQ`2HlH%n`(E!bIvvbMwCLeta+*(3mC;g4b@4iILn2aoP&=`9wX{!bObW4wjkZdCiMAPzw!Pp**{ z(a9jn;!b&7F|NuLTPH?XAEs|Hb_?tAm^UN~y)6WSXyNIpRFlmZ7ji}rI0V&QdpJp0 zN`0Bj(k;JcE}rOa-AgYX3=?Q*9S7}kP_q+4h)T59D;C|iHAeG#NQ7t1W$G)*)dz>b ztP3q+LwA=Rh&p+u20Pvghmr9wkc8|+xF+dj5Lh)^nTJ6?)t2QO*Fet6lfhCai;~b{ z7}%I})_k^~h3n4|Xy@D^`2Jh#MAXv2Rq|7`h%rOqN&0L3;vgR(eW8#j#`qRl>2gx7m3N3$WAd-fixT+fHY*?Kr8i zBDD>Ct)-X~{;a;rR`}SbFywdC`h`}q5EjJe>FSyH^?HsK(U6q@iDi z722^;8SozEQ$ww1^3|ZFw?h13WBET4M)WJVgZ!-XmezHg)AGtyQ7pQhH@@)ca3*S& zr0bT29JrqQ&@vi^9ZNY4D?d-VGnGg>%bV*C{*&psFF_S+>Q^CmQ$Dh=% z*jzojXPBz2N-S%!8G06$Hf;#LN|K|+lY^~EkzA!Gm0YA~tE}q6?Gy>@R|asGIL8C-LushXXWaZvQ0UIl=IEFes;LPkemYXtmZ7yYf*g`~ zj87o4{03&Elq0h9NP&#EwWkRQVPS8sac+Zq7&>3)7^nFX3EL=Gs1{o2nXCP(YL z-J_JTVf=BjE}Nq|qA5kdL_#Aq!j%cXV+#{ivCBfGlq|UNy4zjW%VuT0#Jrf&{u>r) zQRQ*|#CSUwa_amTw4~B&_IMipIhVsvUS#_%cn} z_A$mG0q7Y*{_!J-bUjzJebNJg$KxbtqOl|g6iL2U%d+vAOCvTZT$5eQ_R>fCUoVgA63IRuxXmfE|@tneXn880Nmp0C{{aQqn))b#FWOOo|2WWBG?pTn1TcfD9Gw!~6S zt=-T_l&_i)<>kihHz6dg;a8VBm_=*dlstY4k9n0*X6l~?)z(-P=ll6kO618(O7stC zE2XiqW?_Hs>bP8ETu*g@M5A%V6zBvFk$RVRB9&{a%p^PoMd&nDKl}zpRAW)8@)av3 zWGJDN=oxLY=E->sQI=yW#qxh!{Pi_=CuM8fm_Sn|NI|wV_ zm(fw}-nHxbnPAO^eqYi**>btU@Ir$*BTw225=&5Ehjq3&cf>a`KC0pkL(XD2+AhX? zU1|AopweoS>%$uds|gC$Y*BLZFwJP}>ofJFSO!rG)UXNU&LCs1I9rz{gn)^th*3J? z@^u5NtsjZV;f7NU4i>S;5+D-Q)fesa8JS6^7a_*y_3HIy3ZGtO89xDzL!?RmjIeQ; z+qGnqTwh^#n0^|vS9hi5(*h|A8*As%59_5uyy_^TGBWwyeAO>$N1tX(dHUcGk=i(Z zcChGoUBqiqS@a~Gsw)1u&D_}(BjOZ?Trpl)W|ld&WO&=ripbFV(9P~Aj1h}eybXh! zr#|RHNY-VAyl!HFv+>UUJ_JMM6d{MDC80R7wjgr(CYg}n8F=gc6_+`0gv*P9Y{Dvz zQex7|hq8*?x5Rd}k_ac=d^37`a`DN0r|wZ4B~E8s)LG(@ztL2QAxIWUK&EDN+UELU zZXPF=@NtzkG^06|ei4f*P_gb(uXK&dg6(Y5LT9q_#~k-hiF;w=rS&OY&fDUx$$~uO z*IMf55Nioe`FOV0tfnN=X&9P_iSRCRCjI9Ia;?QIaSKIRbl@`18#e|al$I$=FtTj? z@q$?TMox0+nGO|xE<8>v$Zf(Ia(FMA7zO3tqi51?)$9vz??KZTG|K;XK0vUG^GAT!J%pCvovG|4f?$;2vn^<^hTRAE;HGn1%UbnE&vH5~)C zy_E*(hR&d%wIOU**EM`e)Ic9%2&5x{{f;tYwUED(ROs2~@j;?JepRGtNKMUM;r$-I zSgCLesnamv$W?l?qkkCi?EFa7d_X$JLh>1spMsZ^JuBo_rj?e$EY0$bi{YdJxEQ>y z@4DW)0o1H_RUzacGAtuyLh38U{1DppT$(qT7&Z?xZyWq-!J^zKA2zQ; zPX?xyi-t&L?{fXg>%w(AY2vZ!DI0apX#A1my_WgKo0JzXc)x%Y0Xmk*n{fXg?bqson9K)1Ucx_mt z$8Xe*Vv{%UnjNN?v_}7xQu{v=xk*5!(a5db#6KxuHwX5YdGp&#O$wme<+8?`tbhLY zuV4i{Amwuhz6t_E&R&01bA3O)>zw@uf+jO_j;D_Mx$p5(mGG| zZ>PFJ9etzwiwl=SqTY?v1d2!-7d97Uh)OC$nlU#gUQC1r+)oeg5AVE{WMI;(;Uw7m zTxe8FLo`9{3Fc%!CVj0QlaeAyn6D?KV0>jTct5kAt-f^et&HV?cMIaJqffTb{b`gW zXV1~do%z*|?nt@Ip~+0*mvo4K^|sTC5S30vp7WrBH*LMc|Z^|XM$P{F|y!CHZMeZ5fZOktp^Yp8p zC})|G3hEabJVpnS!k zQ9`9nzmRtRBT9%tL!N1)cUz>gWSo}#!vw^e1n;l#L`@nL%jeZmM;2l-DsbSz&aEs% zAIvRd$S-fvu7fCr!qTB?`MuKHzP=RUEvo3yC=eI&S)LkY+9!AIXF4Hj&ieA&wVcV~ z79XBjG}8Ufbch%^o6zJ@xSH@%SL~n)ec_p$&?IsbJu93tY)I#uPMfM&7Lu;AJ%W;g zDE0SOE0Rkm6Ql=U6}D03e>&CdPhd-5dwu&avS=w#hBVspA8FYk=j=^%>6%b>iX?X`_2{}$oSMviI(}2`OP>U7 zZN{nS@V~aP9SsrmQpdYHkMGgY>DW({q%%qTdEE|6I;PFI>u#yBnZTn@8mu)0T^Y)z z{PMtp<2SzA9VMq5mh6ccpoExXTe-u?sHuoT#*#OM_}u!_e8+V4nfm`gjuh$(cy_V) z+~F4HvltKirT+}AkQ6X<=qT&;2O`c( zk57atHPikw!3f>bNMM79CMkCo2m#)}7U&cR3S@$I(7hh(9x<%6A&Gu%CaR4QP5AYi zA@YgnDJ{9X^UuH`j|=RozI+HFUgOgBs;18x3o5WXsmlDVUvR2rS<0)D+##a`(e(vS zm6%EjTld6U9Ct?;#hdN)2@isK^h+*|e09j^i?}mgm3>WMhvJthjIEJ~{(_}w%^A3K z6ZvF@+s6y?Y}#jMKELI-+}($(SMUF|;e@zf`_YV2THsm^aOeTaO|Z1zcJ~S{-<(hE zHIg=k{E61DKTjjaRxgK@?|Z|pNa|_99lrjGNf@MiX0)O)L>1kw8YEP4^H+HqNMM&9 zibS3|NQ%q%rX8>zqBOO#ARw zd#5k<^Hj($qv@GklboS^-`*A`jhN~#+hi!@io^Tsw@f~yvwH5VPrXlAe(-D8mt(t^ zRm28Ix9PaKd|Pa@*Fo`rofDYH0xcJa%k0qB=z z&U$k`p048wosF;U3rZpacC1%5Q>M8o;k*5B$_yx3Zdkq#Schc!Fj7JC%D&5OF`wQ^ z`Btm~#!YctTKYv(b6V}eS3`2bl5NwWi76tEiA)k+2qsY{gZ4Lw9~rM$W#?6s@3dXK za_7SmflK`S9~zi<6w3*cDU<2v8<@~*xg%e`B2KC|wMxmCseX=0_qD`k{3n#ERKF(N>r z@6(w>T>Pb>&k#XxY3R^Y2A>?;;AZR5J2S3gNBj7~LJ3Sw4p;GYvXO7Hs zo*puX-`df8m`wHc#7;JJ6g^L$ng0g;I`OLSWmaVdr}spKdc{IMbI_!zDtm3Vvdxx* zRAOqWvKt^Q+xHK|)Ey-UJmz~*tE2hq3N?euvpel$mhjH8f(NgA{zfxl zzHOW9--9u${QONS%a|*IA63&t`(J@XdM+{vy&nhw8PSXm2unQuW+Cjiy&~a>7dr&5 zx*+wv<8oVR?X&~WRu50y_%bO(1`ii*Gh<2~`T{ynlS2rsJY)%!HYL zv0D7Lj3W_$wkMCQT?6F|7 zSzF>#tn57Cx7YA~ncSv(3egNHGcJvMwJ8F@d%fsL_87#rjiD^jtqU|Ayf0zY*H44- zFPD-dJ~e~X;xpn`T0I4C-s_i|L2C}eLzA6nYD8Fr58aFRt)VyJ=qqC;xj=cnQ)xR` zBCH?Weoj!>lM~T>a;|`>BVemvtiQN+KGnAM$>+Exx^Y9Mm&k+iX3R#_=p!^>HOJQ& z)juESTS+|{itsUhDIq#5WdhnrI){v zx0afGfF)@6kAFPasRy>b+pY;9G%kqfoB3MjiS_Q&awAzv1*y1|map56m+q&zwoI7-!CUWe&2J?e zVn57vo=wAt=$v45?Qg%oE0)#uwra9Pg^=r#B5U9i3GekQ1^TQ&?nrS(k=%+jBrtM_ zbThI=D$!I5zVGkfhXxNTFfK!F{I-V``;%I>^NRG-_OB0Rk({?=2!49OlEL*82lf%L z{&Q26)<2gIgn!5bdDT2yT{pMC+99aY&u`E&Re(< z*H8!~saX4kmO$OXDIf(Tq0@cl1=Ui)7qIgU&^3!zoqTD+tnluM0|QCWHNcN%Zzp&g zZjDKA^$9~lI)P=Rle>jC{t{9ihcqKNsSblgu*?2ztiO z^!~BqBARwpK1N-_FYQr636q2Q8Gb#yCUs@wx1l8t{EGCZ)WWs{;FDuQYgq7_)bJhw z<{H3WRiNy8+_-Qw0Ct`R*Nq!j**&hA^-!r?ijhne2>65IG-bZL5|1;%Ao+j`77XLF z8k%;$M5BRp*E=`SvfC*D8Ru}aYB)B3`bW4$rdQ@zYzam2!n`eXaLyN#-e22#v=NH0 zP9a_oO_kL1V@sCaq!@K?(k-=3wB7A@XTuIaYc8F^R)h=LK31 zD~5fZw~%>BK~F{neR|nrr7XSX3sg~Fm(N)znG!LMsbH3YPN;8GU6y4saKq7@ zLg?O#nR#)J=VWvKXS3Y+4d1JTK>e4FS#bx&s}x>lEB;C+W5ORrXPhqgT`yTeSS7rV z_g5eF$t#hkHywS!;+;GUrr0;rWnmOBbMo38^O=_?W6@bVj4HH3PGBkrWgHoJ4PF~p z3&B?!XBbTL{Wq%hzrx8bUa^?2vVVKfn2~+FKhZjFWR~(MM(RiRB4M-jq$NcHS}-iq z8nE|cae7E*&3~$uY-QR#|KZ)^-KBm8V<%0=qiM2@XugF*v{=}ZF=B;DWnr}-ItXkF zR7_WvtE_=tEKe6hs&TBxf!@P83TTY%iLZ;c-}uW$GOXW(RBHmip?}218S|;5o!VB) zfeQ!e6;4w^qeX_5i}Q#r2=K&p8P#8uL4)@80gR5BKx0mCq-(0qXIyg;C*MHoHMf?L z<*7TU3%sP})aj|u4<`|XvPf+Y-A8ZKE>N{hK!dRUB|vuq2X?*T1#kBk@TP^E9CPYk zrr1Emg><0Fxrm$diy8M1jcO@8TvHhE5f6>;pD0{YQia*Y(JPdy<8W0|-(}lKZap%ZV20nhG%T3G?7UP1tcb zR%c~XxfDS7HnP&?4M0CMOXbvnciDSsD;A zw0m9;nkP@-Jts*2O;fHG<*~n-&&(2wh#UXTTOG9HnJ}qNLQ3Da6&*dz*E@>fMfMnI?TWAHBv(*_6TxbRYINcvtk3MZoRurpR3QJogYQiEw z1}B>*#{vRnv_2+r@NnIw6z6B7M`M&Nx217wwiOyt)@D=4Aa(6R zkiS_zjP-Dl3=L!lsLN495K8=mz_A}bJz(6>Rzc5H{^ndEew|TY$8~N3`p#*%Sps)Z zIwW4;e9gUsHk^UyU=s)fFQ115NT52mUB>$r2^k=9voor#iSxlWEulH1K8gmuK&KfR z^I+e9a;pT_+#rD1QZrCm%0QC|;sXjd>&q^AA9IMCE%Tn%{?urJmb)JGJ=1rbr>jiJ zZu+~r%VvGZmq1KjBJ>rULCk#rxB>n{KWCQE3CjCf*}R@ewLBkNRf4M%C&E zR|1lL28BaD-ISvdCRUVG@r*HwsR~PWsFK0F^ZemA%b2rZpP&8;MD;NWf?6fvFL<+X zqM98Me-Fxq-gLQte-=YGtHD+>(n)OzaaNbkXb&ybY@@_O^sSuZMoP_PJ=cnBBS<|^ z(dP0wbstJ=vOhf(>tnTUx}m4%M}%x5Zh>`LqET0klYu;s%pzwq1g$ zdPfuOf;RLKc3xSaq1;eV5P-YA1aeA_;wA=kd|^E>?kke?jg%cOwE=ED~uEKEMbg zisB)MH>SJHT94Mr6w-I-G}G5+LIX?(%*9o6Qq;&z(%V$e~7B8uOcUN#oM} z(gLWId$|G>(DS;1cqj8!kB#byRP)0xjOA&=lIalF8O*K&iQwo{bJeYs`-9egn`H|8 z*qfJ=hTAqL7iC&?_9rWXe(Gu}ZoKADy(WjQ_~xbWN)HRjuO$ zKbS8)CPhsJgG+;`>~-XXX9XYO!%ArEE6$bb*v`3%^*90$B4aWAV=@=pqdrfk%u|M;}W=_ ztOs59a-p+q*;>9If~a{zF9SF0rAXYRL}*S2{G`1(aC3vwxZ z1V7~g^yegJbxp%??s~5|?6HfXAFfj%+J3}qKSJUe)89%EK8B7d-=78A?xsPVFCb3q z;2fD;o%)~;)BJ^7_X%9VH;ax-HaXLYU6e<`oAxd=qd}P~yK9G9%CdojJoMGZ+hQ

Qh$Z;U1%2e_+!209y3#b$?Dzqz~k971|oL>6H6UG)I$DxzvDBq3y1S2e-i=qOYEloF6U8WbK&so^kd( zOA8yXfIW4%sn>8%gu#KIZ{{KE_)r!q)kQ*#FE<0bbIXL%0L1l0l*Yk)EO~{LJszUB z3Y~lQ4B}(v)^_DjJJ@N^OwE;mDCp#BS?*K?H>lUGO12Z-I(F~A zMCd&;)CrP+)*VuQ=PmrD(OB+8j3zTl-+PlXUfWo!L=5vI7^$4%%p8?)9!_z-d3= z+$U4r)7LBpyh z9!dr-G%oPj&yND}h0SZ=PAHp$gRs`Ppd>Cg+@Sosar`Q?LySQlvQ}Z0oLctqTO3vR zfGyOkqSH(phmhEKu`_X0GS;mx`NDfzURCYBg(S`8#WU)16G~|WuUF4!B>6=qM*FfOu%tOD>JyV~o^%(=My_OfiI5Q|<+5i?n zJSjb!?&Jm9+yUUOe3C^5R~9+KLZkWb0vh7b1BHx!7`0l$%vE zPd2-kGB&l^v^CIc>?{(RoZx59OGvqoteLYF3%=P%k>|rYhjDtjMx)>7Ze{pWn#ZP{ zk?(tTRq((gH9^iDc$n5V2F*!6GtRYf+hc6Ya6auYpGm354o|d+qn&4|{2%ipiL3)2 zLVsW!wAn2%1L?g6j)#hXPoPIDVf-#jMIgT5?Oaw89_^)k$xH3h0%N`NnPIBz&ST88 zu5u%s+O?V!tltq3v*y<`m&P1TaxbF_XIPjRfe}*HZn9O(=VJAciy}o4{fY=uO_5jA zId7zbzIDxIMu0k0x1Q>)-xR&Q@>SENYdU1vDSo)76(r!ydPacwDmbUlKEAx3& zJV6(jUoweC^~#dbpYfU~#XEo_Bx{|K1ZhcqPh^)4WlE4-Po|H0FQ?tAc_w)x=Xq^EdCVL-AB9Pic@^L!2(5OEmDxbH%CiE!{4?-3hqOeRZdc`GdWF&EVwt?M|a)04ME z)S&D1dIeC*o6^Bxa^-~y7fr=yh&^fO{MtS_^BBf4wJc}TgGE?&XkEZKGc?(VXx;mtT!0!J z^!r7mbK(80wc;XB_{a;0=|Mukqv&aykxk13U!W{41Rt%LQ)~4!*vlTFvQizVZ0;&; zWxzKM65;Cy)mb2lx%QJma+LeQL{swstjQcyOV7 z2XzF!$H>Cis9^|~F<3orwQLT?{0|sE3yrIJXtsO@j88AxM5_P4-tHCMV5O51*1N6K$@q z&QnYQj(GerB^lo!2`#Lg|4N6> zQQ;Bb6#HF9t&OYjv`|bn0<(#5Y>gYV*PKz^H+7O_W$3uL_WY~)AV{QywtySRcU3Z{ z--uhXr5WN>vc86RplPe<(@`s7Uq85w6CJxS%#vdEw>P|w4xQ`awup{Nt5~X2V7WUd zu2%S_IsX#o!rFs>jl}#VU&Mrrx*Jmi#yqs{X&c&2){D_iMIcXBWTP>(0#EOeSQmAj5$T)AB zQC9>^WppjSM&f9DbPMgZJ$lhGNM4L2!SRdOEU?cXi!4CBtwFrW*Dch`u1f9dqs-yC zLo3E%ai7u>?kmLurE+XexM;c0wYVPSG+p(1rHet#G_SGL;iCTH1K5TGV$?RC+n4}N z->N}`$U0@11FPes?#3+C4v};)4kAvQYawz6_5~{{~{f zLPC+wPLC`Vv8m)Az`gfQ59d=}Y-K_6AzcCJY%&#;X^=VpGIqn@&Wc}r1}z;DdAGVWMH(B4A4|&5{VXkH^zVWx93?d zf|O5w5xd7p^A(QuAbh625g|fzK$+ip1tM~H1`0;8*`#(=UfHxv0@AH8{~bF0LE8-9 z+YgbL0Sd<2rU4)4UV$JQVHX{@IDl;sJgt|Vqj{N}>M8)0hcoma&CRupblX=cjeJ>X zTtZ*LW71Ka4}EX~K50Q=IiTaH6j$+Hc~xs(H;i`-`B2aOttSpH4~^rm&8E)`iEM=> z=a~UeLtyqY*g=yk-I}OT91qMNeod3SRf79tQ!J#1k4tSuGASD*DClHI#0G+#2pqMA?2rT(6RoFT%iv1tOJ zR6;7@OZ9{qh1y&~Ui$bu|Ht-UD_%puJxaNn+as)FGtK+~qZyaA53;K6D&Im3Gbl4& z(FaRlN*ZxOPVee>bFL@?XU6YDeLxpwp0!Ta`IV5ko$W1|yLR6+e41MDg9T|nQNMGJ&*m1E?V z1yl`P+`5KwzP<6+jM#TSYI%N|3y!9=+$Z4b#IugTZtg4^hoqf#bW4xBH~?eFKHJ${ zicLF(eY5jZGU1w@L_LBi68U2}Y|1?RpFjt+8f0oxW{$rEM zAX{Z0Nl;*6{fIN^%X=8s;GeieS=kqEl3Q|rfXf++OD`3YDAEYC*w>!rE~+hRq|Z~G z16W<2tqs69W(ys#?f4$0;;^13?eBf3R1?K{8y}D%;;2`==Y6;bik@0h-m%1MDb~i}iYu%e~K%wXi)UuqRI2qE2eCg+d0_XAH4z-$_WJr3Aiu^J1zFNBao9d^AjO~mR(C1 z5n!Y=4GTK_-UVU5tV@mm5exu4_k4KlqdvXQmH#OEMeGo8`cYeT$&IQes_Vc^)N6iL z1Gj&K|3sqLj!jr*LnAbEe?UsRU?a`TiN-(By{`^jfW1Jc8F8msYj`37L*|Lm=iN3u z*|c96<)-aWJvfai?JmfKmEbLV8tuU+IH|VB3+;YXUYHSY3=1#((Q$<#g{2LiNvO{N zIhs&26;%2Fs(MicmI2@@(+i#6=0FzLjdQo=qCS*bC3qKGSc=|5)w03OPwGCgw1$kZGJVZg4p>gm z4^7%Fi@O=Q_fHs*@ExqkxHqFO;7@V|+fdimT<05?g9RF;qW22*2dm@#)QgBWhd^x# z_<}vK|Kin(ntdI_{iL5U-#6G98O;W7)lSXSPmeog9~rScjC*Cl%K;!SVPA-BgNUtB zD`Fd&Gg$_RQMF0=0S}r{R^2O%8d|oOteMMhxokVsPsn|VzV%0FgFequF`w-_cZi)H z>}H|LL53a>dqxxaF^w}Rht*d8X$#N^ z(=aL*wKP#axN>zO92`433SXOVOtA$Q<=?-F-> z%IEZfMsxKJSnYw}Eg*T;q;;~vWjC9JuCLCs=tfam7HmD2B%dkK$IMbmuD`BLDf?%* zy79;WxH$lfyC_!f6Slz%wo&4-|7O;r^FX*_^B%BYrFR>muu4=3y!sMJOQ!rH{+4-g z#@o6ij#k=&cf&F4kXPrm2Bt5KTdWcE)Oo56#4-36tU<|)Wv?jQ_FyA;zQ|QETsI&0 z?$Z$^j%C;b0KZ%dMSZT}tS*yO%sMU>-~ZUNH7M(fx;&LwCeC|gwx|3S=^yGRl4J~y zjRjdaeOAsV4mKOM4BH3rEf3g|j5#i-VU1txndeIO3({O(T~ti>&i3WD3>>u=7_jPlt(2o^D6s_Uqxm4{f6GweA5-l#z z221ouXE@8&6aQtwwcPmk8-89L8F@^LnYtzT1-C4tcB0bz;P(cHZ6;fC3a!b%q{$KTUED`<>025K-Z zi2iZd-Dzsrh{lI07N$SM3LNTzS<(19u#>M3q;|}jxGD(1_eiJ#f5S=+TV6FE5D_9V zoI@*cQSG5^AOrAAzFrl3U!W;}P}eUy^WL32p@}N^=DlQ<*&d+8-r9T+N}br4iM!oH zY1sf^NcKpJH|Fu8#$A^C+pG1m)?AUut-WW&RNakw8NO(yN?^X!aSi4HEXLZixo0KU zVJ-S>lMG`j?BZY}RN_7M_dS2QhUXt2!)<_;u~+i;<0-0==AG%LJ#~@Ns+O)?3AcEE z%A0jhqKzJ`CPA+{5!cETsgkCSRvXv4C=~N-_#2r5^ZRC*gxf|FA^y%>=V$9ZF*)m1 zZXo%-QzpmmeDy6}L_t0F2=!Lee(q>{q``-?+5@=-pZ41ThM2yD4|T)63ilblhw#eY zXDDa>q(02dfKc8N=iAt{Y%H}uCpg-ak2Ev9Xjx)d`Dq2@u{i@kIBu_Rj0x;N@{tsb zNN4tQ@mQd(q9SQ-^xjA{PM5F%M(Mu3gL?ZwgIuY#8NDpyxhvXr)l>ELn{I|cpT@1K zLhT8F9~f0n_iyn}%JC;iE`|E=m6Wyup?S(Kc;^W&I;#f5li&2(oR>bYhvX;w-`wy^=vY@rVp!G@$@@i+JE0f5_!>xn|Bx{FqFKhBVA2p|rO^)QLB(Ys7^}K(U zsMuVdsP&JVeY39LP~sfzgMm2Dwl5CxW66MoG{1tnXCL z-Xu!SDH^AMlkV`Duw8;!*#ZQ{K%F}eF|9zB&4M&{>nx-zci|G(N?T=C?pUczPsrUW zfr01wVjY}H|>=_NhtP)aVry24hQNoFQz&^$1+u;)xH8r(w(jfhhEib5n;l<*CVwdK^ zTYVFEg<{-%a}9S$+5$%q70V7RidWd`c9GNs6JWBUM+vrg5}TBks; zgWX2SV8yj2VsPRcGqr` zkUIj*qYhHqTYV63zjeQ1_iP=9E_r_)5iA~;RG21+jKBB^h&MV-(hj^;_E{hOB;dL! z;Bjpr;C4E}jx`Qfbh8l2U&KUb2KQmYO5@09h>`Q*A~I_VdN5U7WaGUx)j00*A&D=? zxWVQeYkXV@qnoUo@7FhqslbfCVjNr5>KCG3sdFO|86lnL;ptDsSWxN5z=Qb02y4>0 z@0fi&*Mu0uDLSKv($=ci*ANo7wekfXq5D6!SMqmG4tF%XFAkmr&mVxF-yK zoh6qNiNFKW>#5sG`WpDx_o95Q#b#0t?%~kjMU4t&aFMGv} z)yT$_-@H}u)+}!Dw&88Sg$f#FJ0HXy4GU?q(Bf;s8KWbI{5z7!&W+rbH4!qq>^QVE zUm-xE-y(B_V`Xor$wg5z&YSHCZ#k}t=$veU?%va02zTt%2JwrLl zZGM*TfI?5nZQeQp6yy<{A{nxqT8V5x=^TymfTdyz_?_`TifP>+v-7lL=|wUhXD3yb zL6ISHo}5UGzmBH5KfVk)D)Px!w}GP&lq&M0Lc>CrT@9?Y2vF#GcYmaR`d<2gMjLNb z@kHs416<>NOlTeXF_#979%{>!@~zlHfaf~S;YpgLG6z4$OBItSK#70Nf#mNp>n9TRf8L0Fq~0hm2w1t^ zPKLMtdV!{wy8?TJ7q9>J%9~mPz|?J*^EybpV#gvmVAqt5P${2c9K2=Uj!W|a&DgC{ z^}r;DCpoz9;ZbQnS}w_}I=2qB6j+5ouDpO=x!`|${1|!vq2Q4QDe0APM@%~A` z%0g@Kotz2pZD2~qO&l#z*)i9Jx^SQ62VKXGih=Er#{%PM5c+&08G=}O@}OoG{4wPx zMUtEq<0&=vOB3udS!oN2;97e?t}<<2#1oO6h=kl&nd!q7McJOT$_`VP^U$lFfmT+o&c_wo>5?W`rxJ(n2ikfXUsvOv@#wMquh ziZ8JX9*K|5R+YGLmspN`o=(@c3Iko(1crr1?EEOXY}&^TZm1SUZjd=$Hf=JK{fMzF%u`Y^5H@rOW1-B_BMDTtHM zz?_k+m|y4u$J%BunPL}IrquId#`e;yZrbnaP2Yn(8s4Kv7oUMcg0Zs)13oRXV$BJm#DA#;Q=g;;^EjgDV@ z>s(BQC1-UJq-2kUi_T&=P!Z#6uoHU9jX6>=d=#Q&YAnIvP7n?pT*-hB=Rdv$I~_gg zG}z_Y?nGmY);=?c1egN@N8f1O{e7J*^@&dagT*KnAi&(qMa=Ygr~g4{y5eZNsJGs%HF0KA*sLipQos&c}6jn?Fdx z(3)=uueX}BrnMOwRG<&l8$vi)XAA}AYXE_*6Xn=EvC1+f;7wOQ&$!*PoK#0ZX#K+@ zB^e%jo^up8^}({K{3NQ#DyIO5&(nLL0_l&4Nn#xUDe;!+Q;xmFJ1*$tD-we&SIK48 z*}m5d@Y^`ds?*z*DdYU)b(K|MJE(VDsijIMl#W+ao6cJGowB)6b`B^FYG>Hua1$hz zUD)MOm1WJFCx&@iZ^wXED$LLB0K{aqmgUFY`3BA768Pe`IfIU(PwK>Tgh zU>;rLwf;bO#0?+1L`Xu9if0;@V07DlSUmgF<@6U4NzfAe?HDu%?{d@VVW`AJmCQyZ zS_6gGuq!EhkLQDmOUAAMTg@vdm6ODz5`hiJY;o!h?@@`2Je*?oU|w}*c#ZbTX2A<| zzzDv?(KKDat9WM52Rgr+!R13jz7dQ+QG&=YOb1Oa=u(dP-HAIZpE94`z@#dC~%W5Yap$%2MfaPo)h~A z=?VTu)g|g{@T2kSk^|T*oz9Stc^*#oF(wU?%Vm%ksK2Ns??Z2TA`v@>VVh&dZ{A$k zY-n|mQ7<-MSd$RjcFOXFja|>43`AWmbZyzQxq~U0YssMAmeTIWbo0vG@6k`cgzw9B z)P$nklEhR_V_;P{o><}3W7t`d%rZBcF7NZ+K(B5;$$v2vP9%SEE^7< z42nw+t~BV`(o1$Z*KcYX`kH$Y^u3i`rCsNn7AK}B^vwekY;$-sR!|rI26(ctr=kQu zcWHoI9!l=eVGOiyV?1e3!th*?s{_9i z0et)T_)z%9hhCsAW8sue(ZFR5wCpT>2f7<08llPf zThgx8u>v#yp|Y~O3sAk)hAtc)A8b;i&&O~B#LOZl*`R=7&wbH%!r{_Chi#3VCeRQC zbVSnH0CZNHznIv%V$Phv2=v|0f*Wj%6V2bI`^O+FM45u711t~Wv95{Y&d&^L(2PJN z@)_&xyOsS+ioz!RqzWR*y(0puoL*k9925oU$*Ryw=$ip3z>^l@(JQ(G@ZLoMBF-ad zumRJ*uh$tOLmIY7`_PzPvH7P~#qI8=U8uIY^cWzE7G zJ*&T}BJ~bX=!&o_##{tMua-ZPPX9EgT_h18JnQa1GSJqx0^qE&h`O}$gYB4NY;CRxc+f$%&yPU`%ZN4HMb9EA)b~( zx(&c{S<9 zD>SM)>Mt4pQop6H&eWFH1MKj{d;gkV{*SLhXhJGNlT#-&*(r3B7X*(FwaWD67f!Ws zfW+`hKZo*9e)m6LV4BF9M6g;yvMV>#1CSPR6gzbrqTz3>sWkcT|8fP+p8G=5LuKi3 z3J=z$h`D9}isQ_BEq{FRKe+$^#wkcR2lhdrX*K{Y+8xV;pW3Pa)gq_MGfad$Czzn8 z7t#l)g+UMy+UhSAUIe(pB>d-;^FOTWO*%j(iFCGqpAETMa7PxMzhCkaf^c}x{oCUL zzL01E(r;n!puW`9RJPYsfG7a6ChLPU5w;s*%iX*4f{Mi51;Mj}K8LN`~$G zSz>G`>UaM*Ck&*22z|E~aWc>{Hl_$!E0>-8yJG6^FZbUqDoyJm2ikQEDh{>;X8b&DZWXZ9zQY>np>#7c9mrtsX*ueM=SB zHOkrGbJCKE@Z;hS^NVd^mE{<&am$hSN~a1R^6~Dg$y^^RxPQ5xArLN&?`&|Lza%m= zX9#N(gRfX5Yvf;Pp%5yt78Ivy0OYc{iW?s$pj^>^Z)pgXOfk~?sq}Vrja2x7Mfh}=a~j_aH412r^gKd zblU>`Tp{`@K&Sr-kR&*iQKw?G&#IEx{Qi0^r;7KQ1{c4nQ#T}W3{2+H=5ptF^e&k> z5a~w^sM-#5KY)HMp#;>+92y+JCTfl~0r}4`VNv-`far_^gR&G%^r0<-da{^dM8vrsxy`qU;k~?5qf#WHZH?9C0NU`%qEx^75 zGUFW$33JflWEaq0vQhZY+xk~t{__)c$tviGIN(ApWb>8`7F7W0$4BQ~YUVFjO&;vgS`uuOS%X$J|kre4ghwVRR-$ zf*#P;=Whx zF5JHg_CH?zKRnOpzYF)@h5L0T`QLql)PFthzaIBrkNZdM`ak@zxBrIfe?#@Zq59uY z{lDF{{(s;=%m24IPD9|FXy9SF;ob-0PfpWxsM@i4|FrRg4SfOZqg(&Pzd|BzeEYmU zWiz){it)}zzY*w^+$_s@b*2-L}6f7#b1G~A$v}g-^^(o zc$3Xh>xr{y@Q;`FbN0?Baa89h74 z1jCIef1DtzTJme-JyEd*r|I?9U~AFD@71Eumr~p<3jpeiHE>!IFL7+_-w1;y5i@#F zjH2aeqavgxcsZeBLbIRV$)uvAXm?q_GtCL#DUixXC0&)O8#y~(&~;6CZQ&_9fv|(g zFx~_i$K5L!-e`9$5iHCMmf2x94%kYAFHr~ErMlLdj_6LTdH_cP#b?6U|BTw9MUr^@ z<0(q9<*1*mXC7hxehUVijf1%Paovdc5bCd)=DrM7sk~GO3@rlg_<1?Cr?obhyuHmt@~+YhIOFx`yrwJwab~$+ zC`uC>Yzpq-f*W?lz|q^IYH7sO7b{pau=yVv>OEl@jAFoNa5T~3 zcK79uA{_m<;mLBF&LhGL7ZiM#oLM)Ebi!p6u3KAi%L$;WUa$)|cZ%0!SrR!OZ*y$5 zCFrM8ZZ;>)L_pA7qai_|sEv5P>)*b+23aMV1^GTPO@pplit-!L?GvsE+a%k}IB5&) z2p21hKF?y5?;9M5h;Wk{m8dnO29A(b2O@4K=7Pvy1B?(-wld!%zsf&aPblZ!j`Us1=J< zWZ!eLAIVXm>2~rQl85)7B};~H%;*HMae@rL&gTBtYk6-+gDm$ zG|y;<^oa)U(+u@3uIBDtNLqmEm)P`6K!wt~U;y=PUb3nNd$(rya7!(}mtWYKT2?B| zUuTb7L^Ut1k7p58a>1zwrL0{R}|3 z33Z*5J$^jivSZ{T&78z5*b5x+MsUs3iFTZ|A8o9)FPYz<=PbMyH0d;7Cs>Qt9fgxT zs_{B(qkaWxj7WfQOw+r>1A;gNt;(7S;S;mX(!pjLT5_G{m7UYx-e6i{W;o#QJ+0UNqF5ZlE%7zkmPnF-i%tPeiG zJZwBm9O|jW9ybAu;Y4{&>{6>-oQ?2U`sC_qTh`I{zFR{Miohv7SM>nl$Lj&ttq97t z`XWD9_myn8Qg7nRn07RDEtKuse(n7J_EUbNC#MODW^wGhQ#Eib-rBHlsTGZ0jnw{1 z7FEcoRV+!%GzLA_K!Qi%vp;G5iI>Z@(+)M8wW%ICnKa#X?E(%&{sMTR@%9GuIT0=> zPub()8-ji)rlhBF9sp-Jml{~{+$5CXgV6{{c3P>2138|DODjxw$Z1ziGm~pmM9fiN7 znZumu`LELk$dtB$rXY zJsom`afFWC(cHJw4mclAX|TB`D0jHpkDvtVIQ-hl*q_R1euy$W<}*r$8#_Htb!`p& zc4)+zPzG^4U}2UyTvhk<&&7P$!MwX%@6*;e*K#m^d;nKNx4SR>UyOZqT$I}yE+Anc zC;|c!N+>NY4bm+kUDA!T)KFVRk?!v9?odG_hwhRZ8l*-T;;zBtK6{_Nx%d9#57hbQ zTkm>%z3X{o@wAGw^(}3c@fNahmQmcwkFSuv2Jznh8Z1Z ztu3|J;8b1idSJ2Fk-(j-D+sJwt}7}wuq%qF{H34yM5@TnS^jw*#2RrB(EPUR^&#`F ziV--;d#nuHRIInhpIz+7swo>3%U=Ucj;38;nHdM`Dbb^aMRjy5o8+nj#DX&gHSn3K zWKFB)#_>ZREMNUFJpp2_-h!I#CTP^O`J?j9VhBD%{t+Puj$y7CB?i7(p6JoG03xD6 z)78;^G4vahK5=Y7G2NF!TV;GV#!xo<2J=l~JUShML>FCM9i9XSc`HLn6;A=z?-=Rg zH?{75oLYWMVvv8{A;TQoHhT`SAT*1(COj`9Q&xAQZF`Oq<)y;uwU{OjAqkNAp zp;%s)i?`3UJ6*#6Hg&v~FBg#L5;Tp)(aQiPuo@_;&wUu1Ppt>-TCb*~xD>iI9_ML1 zef?^MvyT7dRo^EeFPwN8`g@x!&IO7sF&D?ReI@9eh1y5vPaKZ-8?3H5GdpeDPFCCE z_GO63Oy5T=;XPXBSFRrS@+uEx+Z17`)D$oe({LV?s}x8J^5WNf{mFyXy-LI56Gm#j z6*!780Vt0a7_F*;@B|H315qQ6imkXo1ul;4B;w=y+mJp_&LZxGk8}Jl`vDkHkrwp1 zV&d)7l+mj2NAY5QSfkP%mTEKK8}?+~<=j*Bt?)niYRdrp>@3Br^sLIJ&A~LX-%d5! zb=qf{?Hqb7HF`WJzXJ!Tg^?$M)_Fax34~wjZKrx+KiTX6=Ar{v3<((F>)?sqob-0Ji7Be-{duITeUW#cU zqo#aUy>UP(Y)l#Dc(5Pu>9s6T#VogLvl zkD3uWi=0D0GkIu8r9yMckZZh(Jf(4(DMgiQHn^@&7n18ulc<^JJ|A3LrSs3Vda{i}9O0i2pej(N!5b_4t|*&WYsy}Wz0P}i*+nV&Ngi;b;sPh6Nn~KzC-nLot$eot0?dbJF{No zce`J~S`H)|oD$p&5)`O*Jt)&TSV^z-vTXI?Wllev%OW_iIQHJ!=y+?Pj^PAYGq7bF66mNl_ve`v$?{SwU!&bfjqjfadT=#qd7R%Ot!p zaEG3{I2yuO@Z%GX!G!gg6UVGcJVju2tP!Ks zT_}oF3VxJ)k(7Ol@=j_7XSIUT%v*$+F)xIaX-%iM2dy^6zT^g;YgbRpFk=wim8K&VXP+_D)nx(^5IesHx+RvVYyj;!expUSn!k#a9; zsNM*uOe#SFXs`O0yz()aF5woO;r}_;Fo8+QsK7%zs-8;7budze{a91o0oVDsKlmA3 ziLzAypf69&`i7c-tTIc>#(e+t7Utsv-#o(Mnt3eo$wWt`5G4+C#kK5`hBKe74rGsWD0SUQU^vVV`8_?kB80)qF+gt*K=5t?ZG58GyoC zmHqZs;9G`DGu;kkBE7X>myUjFo$r$aF0O}Gn}Z9Df)=_tg#b0yw!%z3*!uh)KtsTP zrNT2Z_06v*zNc)`hVA;-iL{oNgs|B|_;n_x=7pdsdyChT$L)f>@i2*1`)FN9MP0)e z@OJXxghv(&SmZHB=kai9dewzsVh;;>fpN$C+^p4%nel`E*jj1gJP~g9kM1L95rnER z_PevdXa=1$+ZrLp6OJ+&VY$<2ZxiDn*hNXjtLu|QrXZNu`BqgBQJ^vJu{{a%X~@0H zX~TX4%0pkg_hR%tUZD7=Hhhf2{f4ZexVX`NY@V-~-Ur#5&z~rE)uXcWw=?j-5>Bib zACT63_SMx2B(r8GtYMT!t=oxE-bJ(Qe137GS0YzvF*W|2+?T|nl!C) zTg$ylo%CHnzM~8BogXG1Xupr?AIqc+0W500vjLCAZiTV4jdHnn+n&X$I-Is+`ZMHD zpNuMx^nSt9T$PLWt#!SdQ!3`bZJ{gTV$+%e-x9&Ww-TZU_;II$kSNW&f4s2$Q39@0 z4ko)RQQqZH>SvCl9N;*R*v_m?K1+yDgo&|aK}5u(0VP182mmX%66}gPeQy24ipoTV z`6N?K`=tP6w?q2{$GoC*)gM4f9O1AA70Qkikz4PrBc>W4jjrud`I+k(lj*^4F{>ia zY;y;1Q!tSt=f0=?LT|@W=1YZ7H9e*tap(M5*o%ozR;tzg!xV`jlroPN&qdN>Zk0d| zpLO-xiwZo^rXHy^4mryEG;JhND`F=hWIo$BT^-Ook&ENvlH+y0ne-)Y zWGIG4E*{e-1!StqKSYK@xJr<@X0H#{va=%Pok)4kzLc)^Vl7o|b8$;Oj1WN~VvajsEn-3&2V zPd=*{sZ*VDN$wxIQf1||6e+&hiI2*G#%KyMtb#5JDwm4<`oi2ABGZ@NUCLb9$USyu@+%Zv7sv|1Meo;@bol&0Yv z#agjb3KMNZ!@~Vn4$F%@0kz+!U3$<=E4&A=5Ub7hBr_Eq-P@B~qgOL9zvXxWjGea} zCbWse^?K*Wre7tO;J(@H~@CrSj2&= zS?$|rt$10k)i$-mBWpVZEU@{wXg$t=UVeBvY{;QE*K9{;FUaD|O^dxuHIe1ZhuPJV zA&RW?jcoa>HI;j6dAD1K4*f#I<=@6^Ho#tLNu0l?V=NW-euv+TXPE1=N(C!CZzwqa z=F+tg%*>N6pP6(TP^*f(0V<6I!Hy$I{&SL4ngE25RZL@c+e{@&u}F^M1_J_aX6SU5 z?E6G_rM*2yB9@1aPExw9n^+|FxXKQsO<*jv_pxO8@y~@+69^R2408atwC2>4T}_I} z%s;LSn$2LmBTtGx%ZF`4qNA%iW+gHWrVnS_EnRU zAAu@+$&;QfIr?SftskYW$sHM<_svF`^n6ZcCSYV;pYdS3S6Kh@he0~5L|^`pjNez$ zyK^VyPV}A8WIScz?wqs;xU{Ejz-($ij+>L$zT%)q{ia1>ua~o>ppn1g^;2nluBSP* z64j08XW!(GP@A5C@oiXv7kpa3S4_?;wfBBcy)$-@PZF>7dv}4#t_-496^S7y2N=Z7 zhC%yYcVp4*fV^nm9ce9Vnd*1sz44r%&SnIY6uZn_&ttwe(s$-g?TuhN*I_9uOIOU; zHtWl)@qej@9=*hBRDzyG@sIUZsQ-#p*X zp7LM;5nk|Jrkh+o+_6?H{nnH#_32zXvDg$QiQqy0bQ2Ki`gkFo1E5h=B0!3m3eyWuLa=Ri|h}tM=(8|_n+eV%vzP< zI#c9+>a+)uoG|oyRP}T*gyKahZ+m{dEHKKh<{PEJ(LM(7wB;eV{{>W0x-d$twhy_g z(I6orZX{BRaFe2qnMz9GK zbeh?tH8qE>)*ct-?yslZW+yF)v+n$P?aqZ1%W_^EswY;atNCRoOkYU)LG|$AM^W zl*yb{9!Vz|J@bIdHQC`M7jJp___Pt%xZD{W@%nX7?^G?EtNf)&ZzF6?wq0MkMYx+P z>S1a2%aK_bF5tF#sx|EZ7nHrnXLLN-Lp63r$IwrH)DANZQF33LwzrGk$cw@mS{x!F7d(%3{-pH zn&56Uc$1(}tF+(N+_)liPi=?QA&GC5gfh_0RL?;mp37$W zbtm`6 zF5CM9Hf4_|bQgN!&x6|Ik%}sVtv%s~ThtrVV!XF3F!)ehC!4b7(_tmK9Pv*gwlHn! z2Y^$_=ZcteC#L3xa4X;DgjA|&QmE{Bnd^)1uH^UfD3<$K47)YsiwwwdiQa=T?|f>x z^~~rGn42E>Uc%j1pn}YgVBL1xx<6}SR0@d08OxZ-q}#sO1~ipHxyqB9@RD&lwJ3w_ zW@eT*H{7jlnsIQ%aFtCTk`uFk^JIXcxI8VI%xGeBn=PcB;V3M^RO1iulskSkjleoi zG~q?Zm23hKX^WGkkABa3hcNgoVzLAWknk@NTNDJ{pBb$#Mi_dA^lGPg z8)IiRB=X9nW+&}V>LFINyw*2=7C!ldWb8_18^D`6UYgrF++?JLcn;g`rw1>7J3oWj zcy2{8W`46QepA9VlIO_23-El(Po9L_{d`88kzV66^UBZI(JPi(X0f%x+HmXf37AGOveK1)JKM+Q3&Hs!Y9U*}R%s?=r5<2q^i*xVs;+uXof* zug0>8HEYR>jeZ$&V$o?!k{@w+e}!~tm7c;v&;DTuMOM*|l~~kAF`se|5ym|~Q)2}a zKAiGpvis-4q4g7$#c-rcI@V&*pfV3e=ab~^jL z4vv%KX$87FtC}`(%+2J1lqH!3?!w6tgVZD3Mi}r`WA-MKZ=NH;i$Waf()rI4UFZWA=_d zVTEyfof#$5`Nni)8A^b3heGFCD>?VWxPGDY1l^{oQro%_<6g^R4!^v8TSRl<>UAn# zUWdg-c*Dp9{m-2*VZ{YcAGwacI76%MkreYXx+-+Y8E{c17h1Z8ia8*g57+$p$X3J-Pk^!Of+O ziOMqgRz0Jjydr@8lb4-yIr-V@4(p8Uaa!z@#S2>J#Kq%JuGg%t;`53;-j(UlO!YN} zznVf|4G2^TqxeK1`^~ZB?!OMe!6~@#4JuHGW0hg`@0*Sn)jZpET7Ag#Z0uu*yky8v zyhoyb=CEp8F7sg4rS8?~-apXC&&Ld-D1u;qXYV!2MMtmM<9pJk(W>)HyySI)oQvSZ za!B*Zoar`FNt_;sWva7C`3G~Yx37GIcf0PzIT3aHCM2@AM44GzQ~t81Fh(FytXq$@qJggPS=%+ENXfV&pM;d zFzSNop?yd9t%kw%dnWva@wQP6OIOi|3 z;Gip-i{#BFwB?NBGhf@mol=EY_jsLSRCKucO!!s=jvYMTXHuW7D2}^B`QPiR>CzH^ zbi&^(E+#^K*(j8AaYRJ%0-ard6x2`%$uBnlW{LgKkNo#H~Xxa)gkqXH$oJJ87Daq^CGfJ*3fY@BY*5 z(+Vt>bN4k`?#wF(Oi?GE2?GOJlU^enUd@EC+7f7am5 z3B!f29p7(LGQdQNEBUM^KisnEo4pbDT|7B@|44K@QAaGezTQv*?zQg91nA@3IWq)` z6Eqq%*`Hf5V{Fw!`FPh@WWBfN_2rBAY8`XkpC^xG!`CFl19w@=u|A7j#|B?yV*G7$-w8|Kyq&Ni_>gMhtJ7#qR^ zV=!UDTyL*WPfzLi$SYXTMwbCoX~Xio=CBkhyRW7e$~9Bc@Or1(Q(B<9coWY|r%Bwn z{&3di{4@$B-L#Phk3%AnM*wH(ZtKkqoi~^_pD-uz$pAIetnRh%Q(W*Nz_uYIWo<31 z^*ppwa*D02GJ)ROe`R$66N5{Q=H`{H0ZwE=c6QUtWe)ULI}+&CBKGMP*!LDsj!eXL zO4{&S`I;TFH!LPO=cYY-a2;RZ*1MK(Yz=kf3p57z&D99t)kx0-k;>t=>$1mWK~akg za11a9)Y)=2){Eulddo#E0JPcbok+FCKZ-2ZvRBxGv(x$gUFY+Q7gN62mJq#;$cL^tI)AjYtqCek*Zo!yIpH}C)Iuyf%`PDS}lJT6PsN$qy?dXtnqe)JFN$yQs9D`%Gj)mIdqEDJ7US4c6gXMov z`qTdSVWRJY$;z*1bX4|A6Yo8RNs1crB=SgaRbg%=N6~(My|~erScCortDlD>XYyGg z+xFv7#*}gr96or0v7DF}<1IB@<=}O8E$9rH+?!$$rqs|(HEg5O zGT>I!{&BYiu5vYRqg8CKV2FXjqXG@Z!{e(1t|MxId`#9tAHg6tO;al@TRF(Uz+*XN zt`YQHxB77CtB?BX$B{Ii=cA*0YFD>Tub5*^g3kB~m{)n*8y5GUo*#;jH@XsWt0U>+ zerKfpM!3S_RI(Qhk%ygTu$=;YYl~f7Q?H44JHl6i@B0`;PND)(p>&z@Je*|Xj~omt zI!x>j{UQ~y+k8eGqLsrrP$3fcY;7@tTMCoy2LF#gq+HkXR|4Kql6%bk+?CK%wZN6v zW5@)#!URvJx~X3R0%6Xy-n`h9QfvLBG44$DvMzy2#STI zrw`bye8p`;&mR3j@zqmrlgIt+$vl&CSVUVEYTG|Pz%gVjhZhLxnq*>>+#4iD;H@;2`B0b%kpEG_I~?O+!U6M6kP+D$q;IW*qdx> z_8+WnwvEcljyji~pBN@R>Mc+YUEI?~mN}D4(OP+2}z@S?*sC%KPYe57OgD4-~m2roQ~U-qfHszJ3i+q!<8cx&7t zK5f|Y=Ae_}ex}?r?F0=qYKPv0Fm461_q2+iPe>*Ey;F<$^Kd?ADP+n#inZYi;Sp&J zZ1V0u;HeC(`zjAg$mON@riN=-D>vs1@gCK=lotCfs6;E{vYCE)Yog3CN|E3Dc8G}n zkVDeXToG;$;7F3W7siBnU=FhK^*pBYzmpVHk z%Q`$+%Hc_MS3J=>-x9j)23EY%+SU7c? zJ+7}3{e)P{TH6CK*$?>EJxkB}>kM~cTu^GM!bHoF-l_W0DcJha$fgs&yxRN8X1zNV z0&4M0y5v!&tu;^I^STWcY!vz)cnaHD-Mn9J&N?TWDbkynHn+SG*i5eKn z304XollBR&MTD8^Zk;OcVjJqAe`ZGFaD2W0XGakjNeX&{S0DN^icmw1`)L#Sl zCW|wCV<0XjSAL%wp!kGbrNVE$*@2pKF;s}+^8`ttd4=x9#YpJ)E16ZlX90elW`-o8 zJ5H0zLDycijoRau8aa z?)suHhP}bEY_U^}V`4^{$Xj$O07Xne!}({w`!n)61q~_}vz@p!VAAcSZ>Hv-uZ@bH zCO6luAlUK!<(I{VqGZ?I48y(hC}w8rufaI4ttEY9`knFnbpP}JC4-Se0Sg+L3drNd zV`L}j49dd)SCDxMA7z>7PLNP+IZSR;T1fSS4pNp`zxUc;;8g=m*|xX8vlgK-qz?{^ z`fxFI_xUk+&5t!Ie}+l$eD-U|H0tnR9X7m(6uc|1Q`l5bd_Zd}`Y^!$7q%;9`7I7y z0_4J8W-48{wI={04bZJJ=bm8kotu@*n^!rs&&_3NrNMMtodhluEo^nCVVvZKqPwBB=7nFTe z2!I$m=kDM7-G%t-e30oe_rg@ui)i4E(VIdh3;W|4JKzMhx|-MA`jYwmeRW)l7HQs-^t8{ zE@zN~H(7;g58z!$|LUEm_KW@vKfaQ%0yg2nJiuh$35c*Bsp&XH0%}?+Urp)DxUGG@ zf#Vt?SJ)o}zg$+XkhXTh3Ao+OYB|DiG#|jc2ngcu;NejoJE(O4LL#g2GHPje$@1T$ z=>M|=K0^TTCYGo7{sjUC1?lw!0Z&apxQV6F@VfK_;EHiBOR=&h>NvYo1o7rq8&0)Pso zI!AN1NqewwCcLLw%5A;zTng9kKBfSqzL8J?M7PIC-V1uy^HVq*=EKInxRk#kA3Q^T z6S}|31v@)#BU&5{@|zl5%heU&T6QB20mjOBKA0TETmDFP10pQaviO*@9i zde#DL3-f`D_W;00LKkvm45+5VBN#N40GAg7KAnocE|uo*8Tj84{H+TaWxxfiV8UKq z{6px26k!Q$-6E~(-h5#Iq!E66urA|=i7mTRCK>wBghcLk{--VgTKeq-4uE@*TnSr5 z(E!}GgbyId?gCKdSfOmo_*P{fwL`Q!DP-!0UyB|gvw0(%(FNHyqqk~9!<4{ZhZ784@gM=07)OP~L+Dy$#3s&<1 zDu7QfxbYZ&5StTcOW50FV%z0N#3qM4Hc2fGj$;@cuX z8dq#?u5ot^3ysj__&!&WqaHgZ8y9*Kf{Gr+tY?E3Q)iNul>`n8Ttr73km|{9Gc|_( z#BwO-dlV8EHa0Og6X4YD0!L$lLS5&7iIDz~0MCA{or{ljHxwT*!~=<`Z5w{PrJ|yW z(O&{s8KTOa7X407l09~%WVRg-GkZS|rI{NEo}W0dDTiO`|3mnI>^mF`nJ+})_&BAU zf#WrR1YsDgrXA~e5_S1urt$~%fp1w~0(_YMoVom)#tOxOdl0h9kXygv zJ}ue}0){83-Gy4!(t!G`M>85IXHIKdn-swIkwqzg{bOFYhdR(IBSCoLLS+A=is08( zB%`$(PyGHbXR3iF^6pmTZA$SN&jT+gFlF|f*hdOv;qp}gFD&RFOq^3=YU5&T@}G~o z`V(9@S&jC`zVMC%`7K#;@!cC;NT6<}BE4o^z`1HN(qaWP96;?9XbhUdr6}Wdi*Fvk z`S%?$X9P~65Q@@%ftMnQ=K~0s<58BIJP{cHh29?E?02SsnOCi?=5q*MOLqiqIJ7G8 zKDW}_n?wcz+kmW@4?g8!K7e!UzvyRwf9VVfWQ6-sF!1QYERxZH%5#0{0zNy=nno^; z3P9Us73kEfGBxfil^=q$jh^tkCxHKa5zv%|1Gb#F*f;<)76mY(Q!n!`6u1w;cRcXT zAimiCZ-naLfase3nr0NSTo?c!S!n~pKr632&|mLokwyMX^m z3Sg6-Y<;}Z3xkJK>C`#0G=f=lge(dh4Gqnl?rK37I3SK4 zkLM}h0|@N5Uh8pQ7FTeuY6JD2Q?+`J{C-?9Gyr52_Q?+`{(vdj^HAdmkYIQ7L`7@c zzt=zjxX38s1C!naZhH`|;yu~#<&|?^4sY}wBq2m(Y8GP1Kqg7)1F*p2$D_+ldRQ6N zn<0(BiR;|Cb0-6E!qa$b)H$xEXo7CIo%u{d?Z#BCvfJSXZE?e{^m9b{pV{u;l@nyj ze{-T&3DisUeL%1??jp$5Yi@E}8e-FFwgN+y<rD9!vyp{rhcK1QExcmu#?V^2igHT?Pf8*n2 zTREra4cOEut9~5~>iYmAjUA5;AYk0JSOWaWmS`~=K>6p$`q(eb?ce^W2Qynm*Dg#Y z2S0|z@U6zV{q{^_lt2W%T5o~pI3QZj*(AZf3_yt$8O=X>9Io;rcRLf!&d!eJH8&uU zC?=~gAN-2`q$`S%B(pJn{NHsEYydh-E#`&$N|P2EtsO#H24w7vnRoJ!0N9!lG>Ly6A$;Z1C!ito>=-<)i*3&cZTWytFN?AV z(#-0L6kv=dhsKjt)`L05_y59;g0#E;=3*4uKm=c?WxxO(kOnQmJ$|=}Yx-Z?+qKp# zhVvqUi(=LjXlqYF``f4f`+1bV=lVpt`rw_x9ylz^28hYZe!Qj&G?VKK3F`kU`){Sn zFh%<0qm2PB@C_8#F$kEn&Cp`Bs;o*Yng9YIuq6Pl^>_nc?$0B@zrH^5MXrhilYgRV z^(zHe{jJ)KL3SXqjz>Wn_!l=1y32G3*Z(GXgHSs%Gh_l6l8V7;J7Z;{$D~>DZT@+l z3N3OYDr{#3L=b&o_Vh2J`>zlv(!oJix0z|e8B$kn(>yW!dXtFnb;$}(Oj4#;00G#C zH`nnW7PPU0>#~-KJz`q6e*OX5P z6GY-zVFkWvq?87f^npQxj{|jJ=*iJ2&$_113913c`1aCERcSySNRc=hquH3eb4R^15t&6o2M*=@YDw#zb|oJS8z9zbGDr{e0{DsD;SKiq8Nx zBn2stqhzG>E8I&Y05l0SB8ZM){sc%VDl(VYFG;f@lm@@KMFF0HHuNX-FY@jlcxGH) zE2kpOfk(#TqHFRB7oe+t1iIYTrXesqw{!)E&-)+9p#L>#UF6$8zIgl34@+FOb@3V! zfIrGSBj&J(sbmH)qZ1|fqoprheHw6vCi}@EZb(HJO=w9I3y?)|04YRNmDBdjH2M?p zBZ-}LmhgYS@_&8`r3BH}z88ZLS$++?X#wupc&y}QRMTC0)o$PmL7L*&FTrz3UI+Z2 zF3{ue2bs=5z!YDuf}*;uyGgs|Rca;_SvFnMR|Pre7xlhNAwmCs9_#<0J<|Xwe$Q(K+O{g5O3eBn;TT|MjO=v%v4NzW)gunxE?*jtMf1TE%=jHDNRUyXw533! z9GD<{YkDg#U$xL;q^aTiTasrU?0NqKd;H&b5NfGLCM36zTj+NKSqRQ80b(?UJBhq5 zoT%NO-`p^L^`P!AWYvF{VaNnfLS?Ol1!O$FO;utu&4br+^8Fq8RAFbbVDmLy5YRaO zFMTu6Ar3<(G>%oP$cdi02TxpgjqqCd-1c)2*>Z90p{Q3dB|&eg^#5W9J)j+%MZ}MF z5eUTRTHoV=X#Dasy37?a$j_IqfFf=%OY#0i&5N|!zpVW~zv2Kj*1m-_Bg{MMpfz?) zA4%&oy^d@|?3aSlwM*!Z08R7l!Hp2NYc0HazzS9DiV)Y1#+2JpXmdPYgWgV z=Urqtd^_Htg+bx3Id=D7g@+WV&k)jvj+gJ>Mt(ZK1>}J}8(kPT2x@|rfd^(o?1DP* z)_Q5~pZ9hDu?arUk*5xC9y%cn^!CFipnHJKC&0W-*K`_aH-Q4|$9x-QMkM>?=$bwm z$V*pmkWW|@qImG&6FM_MHWzWRwTJau`$mJlk1Q5{#*It&fe6_s5a*fP2SOOgYyp`# zFs7s3rL8j#tFb=NZW$?vWkSei|BDL!QmX-AOWpgPkX2GJ?BL)-tDF^cz#M#^dwvUv zmkNqII zw3f>!-xi{7WNhC@B*U-TZtK!x_+q$h=h^K^t5Xq*hHGN z{^7c)n!2mR%(B>3uJJNgq)$H#qQUtzlJ z2}0sXqwX(8IR@U}oau?vnhJ2tjw>PNGPU+g-4oG(HBM@;svqU@Z5((8WT2VjfNTQ` ziGr%c&^J06a-2IUawVF$Wk`tTa)582kI3V{^d#3~aIj`hx6{Sk3{nF?<7qSz%BaN| z&Qs;GJ!3w>^s7w>)a0KVAw&gS%Ev#-g-C$>M6P)pj+=tU(b^F0;5IPrxbf&wZ3nrx z_9^B6{OIq;{RG399hJ!ZGb5~}KV8DOFGU!6HUz&7c=~ifM-DXn=qi1`{3EOQ{QwHw zBcOXrz>ljbE{=aWw*8N9ct*W5{MFWIFx6U0 zb+p+=uOp6ICg&EB-NF5*o|59lv5#l1bQlg~hNB>rZjx`{}JUSx`znjUAqa$|$PR*#cxf{r(6Gc2(kE;(c6nql!>TB-6mHEm>djZv5^>U!%V zDt}#Q^Eo{G#M7+2md5gKA2p)j_K27*AbWwtTZ;;PK?{ zk+E&ZM$e7u!lIDo0@>!xg~#h+EAfbtHMsG>+f^x5zLT%f=?|}}?uExso%X>3T@z{E zk}+JjI5FPdwuer&Pct?aO3S#W_8Thp*~j?{tpUxFB{dBKEpejf`T&_5xRljF<|%EH zjxaIITlXI$ts8SFrR%EEO&=^m0s1bNvzWBr!@_a7U00BatH-x;c+GzZ6OSw&SDRo`;Qctv-h9cGhU7=vG?Lr0ncdwf(qPk-=KGA}894 zZyHDE-ndXNAOG90?N{@cXXi%n6lM@9Xf1uzP@wjK7Q|4!>o*y-W$LY$1YN5WD>_vTY!>1EoaRAx)x z7+S$odY99O<($l~)P|OJot4tnDSJJCSuJ7IG9cQ5 z6Sw6OU=xwxFh;Ye>3D6(3bP$JFcpGtIp8t{;mu=vSUqpEtg$QXi}lkI#jS)>Z^V~n z{J74!bu4iptzmyb9Gftv@-3X2I=*rH08IS+yA4B@$>F8hi%mxg9d)2A(Mk?FCim>p zQff4NQEa^_+#RghK&cj;^SxT0^H(&XAjZIi2&xmER;kgosoi(W z)!6u8wxRUa!r_UqEaAr)g}{c@gOa4#mx6&&jD&1jH9?5MBo2Gdtpvo0=x5pPS2MQo zm_7fKCNIDGX1DX&hV`2r3A7Gx>%=z`6U&WDyQk|~cNgT|Rlo10bH+C@pz(a~v5zov z-HvY6Xe{Q0C{3u;arYF_`^K1yLw(XkflG}@cpL}Xz5SFsGw$0RleW#9Hm`sZ8n<|( zn$RGf>Oi&7W8WAaWqAn~3FQa=v7DwdenHi;jmq)~ysN_~*fiw)?%ky?#|_ImI=A#> zmSx|s8arW{1ei}VFbit)t{)~bck|IFP91vIHoP+KvI7Zr&q0ggg%J+O3k^bg$IB$y zYf^~rXlK4$<)KM&6*;n`wyl~n9~BDPP^RD(mMxUM5_^M3PJY<2DADAQLk6ox+6~9x ztZEy_)uW@!ex>_%Kce3Un=HyX()PQBGw!{^y?aE}HhBm<-xw)98g!N;;s>}ozz?Ob zd-Q{`4}X0hz%B^-&knF&jzR{lssg^fZngnXqh7V)kP|`id4B!G-J|-xp!?_2&iZE8 zZqloH>T`Gc`;+9C2GEwMC89P`tyIU>ANmtH8?;|pxw>K6YB~dQ`GgQ$yi?!Hksj5-k()lhd== z7rRqs)ePp1Ma-i@Z)bmgFct*+zuM1#Ef{f`XN&+Ii)KWt&H2^CpOxO@Xv%vg@*bOk%7(PI-YcO>>BL95Cbx>Sf) z&{hPit4th1>q9v(oVlW#zA>L%OF7W~a)#vN_+Y1pYZe4baH=WmHBn`4InW-(q{E}L zA1n_Np-v29c{BQK1BLhxj*Wp(%|J=l%-c#=gu5%aZ(nKhlq0Z0^m^{RE83!l53S6P z-NRxqtZz1>pq@v&N`G{D z(KB2&>msb6i=F|l23o+GhKY#@)ASF&%$L!N%nlDyG^%`tm_Ehzn*$13;cjLdQp(8d zy7OZU?kSR{h8AvvLv%ze{;`@>{gC@$fr-NZvdSF7d#pzOAMbMy>{HUvh$p#kajy>N zs|~>eZh=LbVLZFt%I9!aP%v>CaN=YZuyV}iuCNs?vl|_m_l8a?&>&hS@s8S&)qaDu zj4|v-;}(>j;PE55E{@&W(`YYf=ohk8$zpmfXhHhV*<3$Xhds0|2qpgrUcZWhr-k#L zhVAe({G|z2Cy@a6QcfKo(bA~k4_Eo$09^J3_< z`gGm+)Th~a@4K}UpNnE`+BajR!dJ%T&+Fv#f^fk&VWLt!CR*c<9o1F#^kR2q9ov<6 zHf+@4R9W!{1+cay+pcc8y=lRLmR!u|I&UaUDAuel()SQW^pOocW`}BqP>gdA=6BHh zrH)#oQ8r44pEtVc-ZYHn_dXUkABXDu#=sV!T?gT|)fv0+Xn@35r{$%ezQVJt6?8(` zW{)45_l7=pJP8OPf82R2!%*C{vePm7_Z<-$2CQu{H=i7p!y+xjn8M@Q^4>E^ zg{WK`Jw2@ERsCi%Nt|-%JVd9eQ*?`U$7^HinnojH^agCMMVuS5^@j?hwnouorM80c zBB+v7WUW?oXAvG@QmXV46AdH%j9HXb%n3~PXrZ~;=sfp|$&+M(;A!oA1Y$8@i=_vd&00D~es^;}dA%Qr>~CRX>PLlfR)i1%A8 zYae$R8F>-t)~=iOZ)fFet5r1-nQi1xxgHe;<)(``2_%lRdNlQ?v)ZmJX*77&Ztyip z*qink7ZtCGF|lbl4N>HV4Q$sH*By}lacf}Q$)otbfWy#9z4Olv#f~sq@m<)rM;H!w zLwvb+yPS;Iq#Bhf!>ty42H%N}9#6m054U<+qbT2=6h-%juG9Dpz4U2VW#%&4|D%-9dbc^31L{ZD|lKB~0YlDX^Z*tqx)p zmeq+DS6-m0p3+Wz-{5rW&@s*5aXNEuo$yhY?ZLQHv*YH1z2(w!5W zV)@yCtIW4^8{$Q{v1A#E+2oI;+F&iRrey0Kf5(Ceo|Uyto4BCEqqo7w1u$5nWhQ*8 zb;#U!FEVVL^lNSPBY&cOTK$VAsLxG`6lOP$klI(1@U-)&2^l{nHJNaG9+C2P5gnriyECR^2e-<#z9;Mg+z|(Ao-K!ahMkp|Nw%He(Y?oqM&or*Sa0n)qye5Xb z_qKzyhd8|H!>wa*71ofZ1*Y=Zsrj@o4Q4dnoF8sflVv1FH0U8op<+UUeG|BGtEck3 zhxxIOE{es|NSy=Zo^yjsd(lT zxgTadfNb|Vdf2ZLqh0+&K_8HFgM!rS9O4iH#tk~b2$iwPFSf5oJ`w0FrdDx}m>Inu zQK*xK9vy5ZzZV!AlR2km?g-m_PusIEtX+MettU=?CthLQ!Zqe~+Lq_Q=7(EZbsz7w z4)u>Dn#?>x$e+RB_gAhQ`IG2f1CWzYOf$DJTCZNmb4e(yM)ixB9tyhhzSsu=@A}S1 zjPGJY=bAng$qku3`TT@Xw)`1r|Y@Qtq zI0S7A7snno?-5mGY{IYvqq{ z-G)bo3LT8f+Z7otYxYNlyDFzxQ(98=mV$jk&5#vU0Qa z^byYM#*~2}NtvyfH3xL(oMRiWv0*n3|6?n^cRe2mgd_?M6S;&`w~usdR?mSq{e6!#eUBHCEUH#f|-yrTQB!YyxeT0=3w58#99}$CV0JTkVo1hJKJ_><%8~1 z6L~1L^HOO=9rD@A563HyR@h=w29}nJ)kp^yYgOfaz>eP z%l(9^j_Ayns0;ck+KZ1FpP-UDi=;Jo&P;i_>92-->{iF@>Y`WhXb2Z5UCQcHw9V_+ zTS}JBND(ZOzP*uKPTG0@(1RE67oUEo-N3K;5mbhwU;MM0`<8Ki0fIZU+>;m*yt^6n zW;t^kKS>@%Uw)aZu6pm&{U+0~zw>fW&gH7i7ih0WKhE%+3O+qr@L5o|#BG+4K8jP$ zb=*;*-z%H{BX`pd? zlyc)$930H>$%{=us=rTbDcM-%)tL^n9txKJEW+`Y(TO!a z9{3I~HRHnLI|bIS<&qcE$#RxG!39zGDj%?JbY5DY2@xF7B7a(z;JD}-6j)Mo^-@?? zDF5_Uxkt^D<;A6S5%cp|6EnK%^qBIMNij^SV|R(Hg}LFi-`cCsYGlN+^BjNuTqrdg z$F0wW$Pk}FusRvp=nUJ&h9=hS`)GN1ir|l>3)$;(lxt@ceqPxAxKq6*(%L$hh}arf z(7o@BY~2?tZVesJwb8P2i{(_zfMiBkJN`q+^(V$Vs{m3=Nvq@b_rZs`6QZ#{U)|Dg$UcYArfXELjNZ5 zp*`0QF3NAY=7zlpm^jt=74{!fgjLDa^|B_Sv%gUf6q3Vf-JvtLVgfDO0C8BES?{*~K_~vd3&9HiLhqgMJF0R!2Ez4SsRi7p{@pRg9%oZuN zMr`};dGe8?uFHR#ON=9XoHt?BRgGi+XG)1+(&!!`G&eF}3NxTJ&CobM|90-=`GVHo z8BBNlg7IvHKr^%=28d#YgtL0^1sJ;AL-}34*ajX|ZuFCMsc!sX3+McnhfbLy8c(Aa zO9Ud*YdA)2P2!X!8nS}uuL}3l>iK%od%DSj5hUzQDxC|_LGSd-EOsIjyAGI47tl(A zwlmi_A=Or1`&@i(LN=ZChp&N)uNSdf3nSBbFo^SLvqNeP{WG2wJStp|=^;H##g^9j zy*zkue9fA@ifVPO4zxQ!2DRwJ!dT;2PU%n>q{?E?Ekn_N0@QN0-_N7Y1Lb3M95-Ftm+!C`l6wk$QbEP3%3%@+t)*^R|uk)F!;3f z1fqG8h|sJc#(?Jq-UJMpZD(T-aOCKABPxD<9-9x$Mch%~I zuo>~UOyTC5VV}!%k}h|$L3M}2%#3~eu8=Hg!_qvbnvX6$5qA!n^d~0TUK+mRKlJ^r z-h-%7SI@gYUG7*|O=qjNLZ>F`UcaL*6R{*VxO8>k;y?uoWvqI5CTo(A387rXAh_vs zM?IR^eEb|wgdJFK{1%v4PawW#5S2g915}+S_o5csjH5IQ1W7*x1Kq&RXz!|;#&534 zN;<5(A8m*w@h2CWZaU~zO-QUh@{GSoq)R?zf?M+Je-^#0)Q!DgS~<12QD78R{j?-` zEtf3T@1Q49UozQbYGd`R1D&eR3Mm(11CMLCOdIm31v>}2daK#a^Szty6A z@2*OP*o|k?uXX4?A5~bXb3i?)?~?;yd1+8fzYir_8BWhYrRPyiDgVXZE=ZA0{bjnU zu4iVv2-Dn>*@KE-Iaa(Og7IBI2iR)}tN-?4ii7TLZp+SHMoLO^+2yB*Bl|upA8)Nj zvU^WU5+^?OAsjg)#Wsl*6+dHwCEiksNgOgB15IIDGa<;im|eLxOIw%AA7`4yYkK?= zle98%&#hHr7P+7!vU%1=KJ}obQlDl)diIoB_#OS}w;Q5D=uC@#4Hp_MeLXF)f^>sr z6@B+J7cy_Vcb)IR8h$mNj#28qmXoQu(OpNmoNA8lek7AC08|`&mY7U7A3EDb&H#f} zV-vF+y$p3 z>P%bAJpyMkT{8_&NY|8rixIL}{Aan*rMXfJ{+y(U5Sn!#;?PH}w;J07*-h}OQb|On z^3oi=vM|ZggwLG(8*?pr=e<`d4OI_`2Ci>Ue@Y@4rNY_N{19=i zM@w|}{h?em??&GB9Q7l_S(@%==n&VDV3eI`0N>z1N! z^(LT}vsNBceeA(O#$^`-F}*nN(X^=tqHkk-)sIdz{^i8XqP*9RtL=!Lv3&k=0mcdH zGhgQ}-n4=oeiKccxJ{VEu%{-q)HJw@fVP4J_Wh4;J*v@hip!AT4&D=Xw62SOK*;JM zx3U5;DthpdLD=SWtWXelNs%-Iw}omDcmm>Q@TYhgFpBzStc`WLV(a%XK?u4|(F`KcNjTeH}?oDoWg}Cv;+)l_6NZ%Eo4A!NL z<~@rWB)L|%oe5;!aJ(DzR~mLoQ{G~JvZ-Kx8-eUOu&6a1m1)ycldT_Srg|N7q4!zz zYWMV`7-ahHWAQ>$yiy{z$SA>6otL%=cJWIaU)&@E9{4WAc7Hi!NKG{ZBg<|3%C_9gBR(k=$9>qI=TL*(JSBkQ^j|} zQ8zQ!W-=#=u$s(^5a^~YW^d9QB2*lTU{h$im!R&!yja33a8t8ngO?Ijd0x5XVlInh z^0EgttyJVzR!#Qi95_!%4J_$4LLv%cDxC*44dc-9l%#CAU!^sJ9CGS+Uyhq{gvOtx z))C%_Kg0%}E|TX8;Pt{~ouO+6#|kCcgV*c@2*uXy_2BAFvfNQ)cAV?Eu+ZdUcRp(| zw%<&HRL62zEmC@w61T97y=iz_=KiW|`;v|?cIi~zlEJ4NGM+)zJ*_i>*ew}p`suii z*_m@ev%b?8vDIh6oFA8cJ;9)n{+L8G>bCK!@bEPaJ2=&|AC9xv1#;A!y1Y9}ThuV5 zl({!n`^p?;E1*5>MAQ)1EbnmG(A|z~?-j0bmeSxiN_zzhitA-v$wRS&quY8iRsEij z;wgXRP;a{W*+~2~ET4d_HD48joyasBw&IHHdpCI^-X%>^2EhPTd8vs=`Jj~|5QwZ{ zs=_-sx=r=hbF{v|o$ii#lwOD*od?{24(Nw*4iRyt=A9$_>$Qfs2g0r4y-TY)@u-g`nI5dR4~L;SLwytwM>T z_D?-mqUOdtlrn?0^c#y9IrrZUKe4@7B@O0PB3iWy9r1w26)9i8z+gg0uvL~np8n)r z4U^5A7Lj>JVv3c*^v2EFW1FvDwd&RIiM=$Od+0YI*~7S%w&x&@gQu^jnlqhp3iFF} zl@&k!vBp_8KjJH1yxmij5nZK&XLk;(eKCAsX%R&>S zA2Rc+PZg0!xao)M*y>;(jSZ_d+D_L=@0!!$QfP{-@OtBQ@W_0SX#Nr<-bfPXwOlFa z3}qBy+kMY64qj7#lTvnA)#iQk;3KCS)Ab2%P)So zHOW7Vo~JAnmn-@Ls^7mOb7)x1{xu8rETF3KfsEUC{N=P*7p)K<2k-y&UB| ze^fst;%E6Q_il-Ws1UR4dc9m&42ij$2L3zJJ&2GRywNN@!}2-=TfYPPO8%!OZ(}fo+A-#9yq9{+-VdURTG(QVSZ2w^Ayg zjm>Y%OyHft@{cFl0qPv5vuEo|@-o(Jq6AargSU8)CU;h3AfqC>a`JaJCU%%dsOkY1 zz1s#!LD-I@v5MOemNz=k`>L}s$H=|q;3)H>;ai>GdMNz%2n&oPA%(=hYA+Uv$Py$} z?C2X|CnwyiBqMLL)Nl>YJ)GLUV{vx!^&TBKi&r$i0YCz3BU5h3TV#9g%=4~q4|iiD z7S)>b9^^kQE-TJh*DMpeF8|M30A8lXnA9GgLjd!!K`xr9w|8N4e_CmWV#cEnC3X5b zC0}9^a96`pD*6%KVEm3{n&_3FMylW5%-t6lv*Z>^xPcM=VJ-ns? z?`fZbEjJeB$m*w69Jtjt0=KvA?MOSZ)gA9PX0aH><8`OjD8}pNjV3OGujbYaUXCt# z;fjV!>DlVuZcQ+_Id<<1EL-V~Zlbt{hBvG3Trc*O`8BGTArMVcKCc z*E%z0I$^EL-|Cfkb(dJihir=UIw(x~rD^Ncs65PBW50We)CAg!{P*q)sIRvLhJP_bRC9%1{m9b7-bstl+47pdVT=tPm;bL;iV zGthS^CdUK)&-)Ea?CezMcb|k4mYJ8T=9ha{(C0*%)2&_|Ytg-;tJ>Kfs& z0xQ`>T^&byZM%&%ZFu{+aIlWCQ-FMZD!{1Z^tsrS9xX7tJh=M+4FoZW%>GkQLcmeh zwaZ*%$^;Uqf{aSUH~uU;sTTt*r~blIwkgi&w$*hB#sjXXGJ!?zHq!amr0Ve9!`0?}7*xgm6ouohM>2s@q#QEE>v&Bq zG|cU#2YQh62B@+OE3z!%s=a*mp?lq3UkTnQEELwZkr~FVZWzlXlgk zQthHz>xC1M)IygUO?p+SC^-1mR2mm*jN~Fg*AcS#BHsBl&5E`+5jsUJVsYP!_8eJ) zVxx!arh^k$iqnlrj;PXbYKZ{>J;)p%lq7glR7(?e+@oJWCRcV;On}%{d9{s&<95p_ zF`LFjnZ!xO2#fY4yH`krKrI5p<&nQKn1X!3lqh-j9Z{|LbrX>i-=lfJEK!DN&~fZ~ z4)4$$z--!>1v(ofvfex)B2>g#cyg0594LA~3QLkFbeTgiJ){ zAaD1p2Vz#ull>Qswq@!IgE}Ws9?Y7Wdl0(CDEYzG6XD$k5-Z=4^4E$E|Dym9c`3>2 znZUqG7600VhokrRtV#JxI&H!k_@5p5e5})L;E*mS{KmD;=V!CH5hLTr+NG)l3?VnJ zyaJ8^}Ph^F-wpA8Xd_t*j%GO&j0LrUV^y{=c%x->aa$jUHM(x#eAt_0`!ufZlaf`kS zs7hV((x(-4m$Ma=%4%}lPiyljl$=wmQzc0BaA*~FA#1peS19ysN4-B2pH&h$qg&Y6 z>e2HON4v~lZO-7&3mU1#2~nRcQEBycGQ@|#%)k`o#K5S(BnA|vJj&x8Ai;$_cT?U1;vR`Xcm?NJg3$D+2ugKp^HkSojT zvZ#&9#awp^8irE5uMj8Sq>Yej>nH-{O_j3<^TGL{8;NeRm2UYDYSs*ri9ww8j&zw; zBbTFwdGX2y$b_4Md=DCqeCV;6VGUWb=x-U)8AO^(ur%o2bJCuv6$vm~{f%+ZDnMa( zxx8;pmkG=eQ8cn%Dnpi*Xw`HMQ!2hKoXWH5AzPZn)ye0tBaZ%@-m6ZQHl9t#9S&8pb5xhF1!RX=bO-XR}YvAE$7<-{HnKX{tEX znM%~Kh&^r_Keuz|T?f&X@yfn2F=fOXDluoF^{~K=UB1i*pm*?dwzu14o1fqGX=wX# z`}yL7=B?-XJP)9Cn^lDi09|Gs%LmYdALU;KFKrihKgnw*Lg|G|b3sdJae6Xag>%^C zK4YEsfbzc5kZ_aomuu3Moq^v{`!~pl#S7FNtnFV%!y1n||?Vt9@(+G&zZa%zhU$(=b1?Qo1bN6M& zrg&NJFAi#Ph>nLRC`CeSLB~-6u2e@znY& zNS~c10qYTe8i6juR{nb1oc>I3N3x`|ko%q0<^=@q#i0RwlyO4Ad`A0CP68pS;^n_~CW_QgEekWkOW9*;mWy(EhqDv@ zr*14~(f#6z$Q&O=qbwKr+pj|fx3~N4mN$+(awD<^llq7;zqMjoMo$fxCXHM|*D&rg zc?Lx?Uqyzi4%czA++qo_TTEcRIsd?E#k^)+-J|NeL%%j{)V#RIXL}BApb5pF5dW)F zG|<6!$T2a0&lRyd85u*%+f#)fBX4Ma(KxHa6%Dyy{aBSbd2{k5_7@U+nKGYoI)x$^Glq7v zdqZW(kYd#OFP)8^8Ey*7&0^zssnL~0679CyW=U?Zir3I)MML1b&zyB#iF^AmayXF| z$+!8D&k^<1bozWzi{YS`b^36wWSa4zv2_M;b98MDks)kkw(C0vLdy=GLxsIhoB*9dw}|U0c(6PecB`^yO+6M1%3iO} zGgGD-A}Ke7QIA=u>c))*5%2nNg0tXm+aYm^N89N-+uL*NGcPry}~q&Fu|uHaW8A;`y&5B>GI`l~n5?4Oy##bf@mVdveRsSVs3Y&wq>N!6 z8)9@Q7BRMbd|rxd*;_`{G&Fl}^igOwgSWavhTe%;T!{r-fzHp!RA`3zu53KnmMm;iw{Nu`8X1b0$QFosMwva* zd#NOjR_Vpp4>BLzMfGI!;8ERSe0h}}*)g#fL&Q{>9rmTB`QV7_tG$VG#4~0n^NqH|HfCEGit79@9WZFejBlDD_ zf(q{BqUU>JEQS@T`IV-`HwH4Tq^8BR3cg=3SG}pzzI?2;zCLFDdO~)2nCFR3){pLk z)5|dz98ZMb>UR84J9l)m$uT{*dy-(jJTIRxUo4|HkNvb0Xvl|xTZG%u)=5T#k!#bP z?m+kYgVQ#jeu&&2bDCk5WOwPwZp^4T?UWHjBe_72;k~|$aH;aR`eY;jt&F#J%jbHw z#}v&=7Q3nDSa}Iem2A9J3Y?up9mXY>Si|}0+8DWvl!u#)T`OdV6vpzJ2o@!ywvQ@pON-0GyV$m79TRPH|m4AHn zOS_kRecXd6wEczbElj zEVMT_9F4y6=;__l9!rz=H5H(LUUip<-B59SOhn~SzK-6$nZXJjfZ>d67XhqG655F5 zRolrbv~SatA{W+6t5N_n^Nrfrvt4XEsK={wh(G9_Jfq1u>N&iZhH90lX=S~Oj&feV zzF_uP+x@_uzW?mSq=8$VmFrj>lt+#pd@JWYl)5`!3Z03xh7TU|V=)ZnlySe|HG+I} z<6zyFFW&@fwtjBE+uibvo{77n8K@|cp6d%SqH(u_PrU)kNd*}-P*z~cl|Z%ZZakGA zcB3_2P8c3grK;*l_d8k#b=2+?lI~2h+x$oOneSJC<+1in1oXh~;zM(g<+5mzpoHy! z<;r_RMM(@M!-`FVd0)VX@#Z+ydy9EB+hNdYOV~Me&6)pZ0J$HZ#lR+d=_Wc~G;8o9&A58k}_;L5!RH$vy&f8#rzOhbL;A}%ITF@rBw-4Za`F1BwM z3A8iutun)pzcrwMi1miy)YW_6fP!MFV@U1q(=agpF=w{WR9ss~UgIQ{+f}9bRdy_8 z9Ghpl_#aRm-(D83y4Mt|rhSJ4@A^_PNeczAqD4kJ(R#(Q9y3gd> zS3Tb`=&z)8&V_xXfk$aAYC-FL3_uh-fv=NGNq}LUZcFXAX*mP_^8dO{Ty>hxx0WhY z1&5jUVrzUDYMoc9d+WGnh5p;q|I_&Om-Y7750iU7%p!Dxyo%SkdiJ_6PhU97t@4Di z2#`bzU7AfaFQA@RZv?m@F*evSAO>tXUx1c(jVgEa(TCbu+RdJM8fh>4CgSX>-vIk2 zW?2N^4XuOE=Ls^5|M5}(^Pyb#eVD^tsEa7*1MCqKb0eTy4K0;hwx{4cX8=ogQtWZk zyEg()7Dn#^Ud>maxGQnpO7DdBCndi8t~O!y1nCllXmQss4MmkK86Yrrl-dn{rAv1C z*W&rNKgsj%XEwndkxqxYvDE#*SiEs7_|#PY-Dk@2wYW4%R(8h|JI3j^&qp!LJRn>fhT&QiImzm zvM^t>L)gejdyIe1&*+ zEc|qd43KgCmDHL$_VvxDrb77iuEKmErm6JYleREI?aQ<(H*^_9H%M{*k#h8#8$Qxb z=k4#az}%B@UkZii8tNvA+w?~-6u@eI8Fsj>IpRc{kA{UVAppyA+2B`S5I|D;&2>?2 z(PtF>8>dUK>6pc43A3%W-?<}Ba|60i8GKH+(6l`JaS`Ccwqkw zcas3v7qd`6?JTA~@sk7mV4I0`YA80s>AE2u?;oMZ)`Jmfe6l#EJUice^z;J-2=|K)k>DtDbRZ!$8W3xwOnnpD!9&2kl>Upf}>w!JiEVVL{U z8@XNXCmnpDI4ACdhJ_v#5h+?MZU6K6h?~=`G&<}?8u=mW34rDjLRTB6e8Af)uUsAS zD8y_!FD`_u%YxHq4&0HOEv zohxIH0ei%L;apN&GIN`5B%9uJls^`@R1VVC0Om6AH3No%s;QWVwq3!?wMgIMgd|(4RkOnj!7(-K?Fz~gAlJGiF-|FGu7rwb!+wk#X((W{T z!r0dOqN-ajG^M%4=L3MIqz!f&>gvfCIc|&NR61!V{@Q${rj+xHrMX|ne}7VV`j6*e zRdec4`=&T)DqQAI#M@*fW4mpEmkDned|%g_1t0tw*{wwcbh01*5Q_!H*8lC}v~?p8 zI&L?=*Dtm_b3rNM%8xXx4DL@`XWw%jxKJvqPjT$tb7S-qlU#%^E&VsZtsppI1t%Y~ zh^W5igt)UBgM~Tat88}xMT1CA!}^=`m_!&_6aEk3_;6HEc1dEN)0^Q6I2Ep(hvBRT^RQhe+&y!Rl(=LaUUl^FSQVz8jLmZp&q zK3OOK#h-E#b^1rc>whd~5SMzeqTellYq#5T_M!ZGkElBtoV^gQGxwEO(q ztNzzItxJKI&Tpvo<+(iXKF^J*bna{?JK#;hNg!h06n#MEX9Va3e2zs%@)!V3ZLY9s zpgv?jD)Ps&nf!SfGT*V;eZ8rxn&$0k-;>O)Yd-?azD4llzM*nq7775d^~$&XW-Bv5 z9M5jxou;4-%C!KnKOH^maJ_wt)GrNnZfaw`%D7aQP=b}8<4G&>R%5dXs$WWTuFN*mRgmb&pe3gB?fm*cLZz4makiH*Ix6idjv-{RN`2 zbihHmgXt@$A+`3VFvo1lcDC9cSR#mIGYCvl0b6SMM;Hqi^HEqjLiVhzduVvh{Q`pH zw@!6An`?cH5(CEH>amyZpEZ*@{MbSg@E|dd%8hGjh^w^phpq-|Sm@}92%r(l2#>#e zH~g@vzWmfOQ`z@iAqf_`FvMh}H&p2B6ztNIEhj1h2J#<0+s~iV<^X3v@U!MO=K?J6 zAu&kTn9B%4uiqon|1Jxti?s|C-GVcm=ruHOU>;$-vN}IJ->=pfjau)u$}Ojpk)r`Y zeRSE8Nq~t%YP2k%#T3mX!1v(2kH8IXm-y1>Q-1N zQRG>Gg4ytLhw)^@bJBIa%kk>7uf9 z@>V2+0p1YXra>(1n%{fiSl~CIPUIi1BI*HshPnL=F8BCqzg1qt?bDc1#~C(b)ZIpp zx$@g^&m1{)F8GD~!2Zd^s%vv)irTpbQ*0hksy7+~Q{VzP-7n2azop9-;bjptv_2>h z!YMOtIiky^t=I^ZpOHV|da1izcF$SyI;xJ?uMAi})~?T;x{m~_mcj;{Nj3^Lt+&8d z;{P>UI;;c?NgF``Q8z|}1uY6WO}5p-TZJoV;MWX{meER5 z?fqvhKtx_oe#ZfUkkf$C--_@LiYuj~{UfI3&Kci81|c!gGRg``@Rjk>WV-_enCU%2 zy{x{jhaW;n$yNMQ5_4b~Gu%Y+9RTYMZJ(Mc)G@+nb$~i5eC)ur$3)@# zK)Itsr>aB)tT^jW?ca?~?+WyD-rjQ4Jki6;m?a`0VbeeAVhOLykFWLQA4Nh6f-B{E z6KvSK@MW7Vj5Y-40)X^5g0Bb!g2URcM@OswLeAR`T&*D{t0&4X04e_VCO&0n-*c7k z0jIrwZHOe3zoJ~-$T_9{GtxYmBx+0jhYt81lE-31EIgf`b>#D4{H~t<^H!|ge?uiO z;{qrCb+$iKbw5N(sUQFi1@nOZmf*eZ79(Mo(24;gg-1ZYgh9vx+X zQv;|yieWI&WEv*eFjAyUAU4Wc*iZYWtHzzsI-~lwMuOEVaIe$21Gy(>Y_k1b$vL_8 z(Qr_IA9uz2LpG>b<-5Gu^(r*na{IOCCi606iiEh>BL{Y|B6^ zKiP}bM)m?vWqtNy+M{!G!X0P+w7dXHClH!>2G>dOR69oveV68kjX)+GeUhYSQV4&A zY-0z?QZGXz&8Pkm?H-exNmTwO<>4w$L>TXB4G_U|CeZ+1TIFT*`0;UX4d$Jw^$i25 zKT{QbP4>wpxmjRoH%NyaTs{jNxPlC^uRt9tPeT8+MftYEB)ZoFP18)3%y&OuP=+2w`Ye>m|=r@yFTw2$gAXxqiW%AQUh3tkZ z7h0~(-vNL~I3tB<~ri?Za`_f=PabM|isM(ICYt)-7HEjsHdmvTNDpP>(_GZ5Ttr+~yM}DIJ{5_iJbA48%?%xJgiNW*i;?}=-*fYcO{u#ED zr3sgDxgo4Us>B0bPp0AZV0fsyFN7PunU+T#ZsO zhSfJ+`}HH`0?at1GREOj6^&NqC~beDw;6TK#g<407~;Ffy&DsW-byjTW>00K=%U>D zTXD&guUSQ`Cm1%8gi3x?%vbY4T@^lv7kFy#>|tSi2S*l5M;5TqnIv;(uR?7x zk5*;`OiMlSo=!Aj*1-T3CVfQ7=hDg3B%NHc8yejcYA(Rh*sI7D)dw@+vy7z3v ze&owt2*EjAh6(5_%kRg(;T|oC5e-HhwQV$dE`X<7#=}9*3gWn?Et(xvk0WEJJqC)< zv;EzBq8n^L^f)!=7b|_5UNvS*d-UwS$sdWJu+M*8r2hVSV!Zh5_qmJjMM|y@7CYr& z^OvpF$hIhIE<>^Ya^==SqdDbss(8B3FVM!3S-Aw>@5xRAwbWo6Li8g~Nb>BkOm2x@ zaz??LjsnACUFLbqcq5Et;K}h=nbC!BO7(-WmH;@lMtLK$%Uzl&6AUGk-}R?E0Na=t zXH|w3v1l=7Vi6%N&kgEVQ&#jzt}np@_}Hj#S0#3Lq)SY>yIrqUqw(WLtkA5RvC=2* z7qNdfP$1&m=X>m+5L)#%<3}g$cnjs}jliC5aD*bLrTJ9T{2O)%411W@YEJrro>d2gbcxx&rPG@U+tJ0olydq)U``uB5(c7+u z;@lo%M@9Klhm%Z{NPT)>5$-N7kmUh9+Vz2XaO5Q@PmCry`d#5aO7YEZ5mr^|OycM9 zMN=U{a3BV!?<=lGYHS=AAWggHu-zeV4p&WWD$zEEEXP5V<#sP-1o^s3X7j zQUT+iPl|m8x$-alqNxoCS$Wm{Sv9qL+|`~*dv90n7Z9r~(t|*26i(LSoSijL){_1% zgcY+duUTyZdal@h|KJ;pGr}xK>b8pTs46=QIkw6eTIoU)L+`{!u=^I=F z4%XQFy&C*=kbSDwCAAk#snmevy4op(VyWBj3Sas;Dbvp#Y$dkuWnB? z@c%|09(eHSR;x4M7(M*52|+Ef@mK9rOuvSnnG}LpTHk4~6R0@si*Vt%`GvrhhzT>d z-3A>dYw^K(s^ccKl=UTTvK(Ybz4+lWj3GuW#>0^mqnceLn1j~t9?<*Lao{qVWXuj* zHqzwUpYJv1ed*NixLOI%Em7w=S3cp1RmbmWL{4OGr0}+D>+C(yPPB8pNz|T zz%;*OTlxD|JKkWBSk(1PIvUixaYQfOe+Y_yscsELuFuz11jIG~U+fwLwoov)E8M5X zzaf)iKcGsR3)vLO1a+4-N-m5y$iu{6ml(7JwJ&QSyFo5(xe^cNDv=u;pD49>DapD1 zDi0S&*>v0wt4Ln0RQUhmPpzU8lTkKFvO~bEl5(J%MNI?YB=~SnpViO=g*t-!@m6&K6HcUxHwU4}#)- zPL?Dx8#hXx+~wqKa{c;KqH?v;oD_#*)90s(+rv7%f^_&}!DSQTopiJ!=oe*9lu;lG?>E?MeE>w0oe@p?LrtU1kJkXj6cQ~e=#a!%@uNeKs# zV+Y`{2;M|~ohbLh*_HwQ@-D<1Z4*+U1gSbUr8eB6GsbV&Ycq$~il(T=Bd2t|#3sWx z`tH>)*V3PM&6%3w6YWL|qY*8Q$N;9(;zfI9NUsE|FEPn1-TR?pswUo_oqG#oU$@ccuJK}P#Y3>zHcq| z=Hxt=p1HP#*0f9dc-Gq)h_q9UYo98twiy_D?5WYAkiTOI`OS%b@P+3JAnStbiVBgi&cMOP76V zib2UefG+$QbfwFX+ilVB3^)PsY;4QNP9(+|VwSrXV5_@U$pW*86=p9GTPx6HZZPZb z_GEVg)A8%qH|H=dUpC=%NP{kZ!-=k#Fsa824so5a(beWDdJ|F}E7_CiCwj^cN@L)_ zvkYZzwSh*Pjw~HP!Kc!03xO>@GH|~baq_iw-75m&HBPztXZ|%TsIzyuZYrt>L{1Ab zbm31zR-AolBb==V#*Z{)hn!T=2cW!V+ej9*=X#-Lhtf(m6F_eQb#Jf1GkXXcIttB{ zB|!nTIzGtJ>lV6%KCtC9s4RfbD&o}Cgn8wUBajHgR+I)A$IX5J4;JMQ%n4{#)E>D-&gxlF&!}YM#}c6rSs29 zx5d$ddQL|J`p!ik;F;z}7BAzyQvE}zQ^IUkVUXNc80m4t!gD$nySX|qk!N*P52O~d zzc|B^83LMIpZUk}ho?2YDKqu?O{64y}#`85=Ea7X{cNx{ESBR9(B zUj{_8t>BTJ+?PtgM!ZYeSZ;QY02L;>`v)Y^O~ITrkH5|L;o!Kj)T{f%p(P@enSsK< z*ru4NiM@9+($uslpKAJ;SG~a-Z5wSww{d+Pr>*wvMqsAa? z5f+&b>lc0BC0vq_1ExNh2sP)IpBR#?zPmSz5!YmbVStz8_-jv4;a;CC#fWG{P$aH> zl$Ur~s>eX}mGd6qE!(drxQ(+?kI`8?q&^5wRT)2ZcMs3}0Ge=xIchrbHlqacZ@zFJ z`Kx<1q+yi#nx2R7TUxLP6FfllwWq7F6{;0}H>Ug`i0NxKZULZNT>B*Jf7F-!pU1cX z`NrqbQe@r{YEnKcq|Wi zu}--SBhjELXobxp>M}wlI)-X@Ff0*tYjOUuQu>`nonb|QCl#aqBTO7rhz|(*mvHzm zf4b=VV80OloZgWVNLV(+Hbf67XC%LE@oA#Qhl-PIkp%y7INifMctxm5p$3R7E`We9 zA=-;hwu^D%kuMAcI;Xmx?l%l4-JOkEY0aSKsvW?;<6dnmp-}xx_sUW6c-Bk>J+Pa@ zJOa!5?Z5+Z0IA;qnLV2k$ix=x7p9sPrXc{&?lZTVeyxtE@6rLwJ=6+^lf9ZG67}~h zC!Z$cgb9jX(6G|doEPnJ1vBOVJFR=S359B%ruZEoTP z^59I2LI$Pm$&aU%zdjzM&IsGvDUfG#=|UX8_|cRdI5|t=(f+A)F<@S*4=L=Bl=Ct? z_UpxGOxc;^tA}%9V!?jJ!w3#aVM}Yv|5;4`zjS9N(PXfh=oL#X+C#O}IG?1dh3d3a z9NQYQ^GPwabq}iPBsp^-<@nbeF?1&6kj}gh#lti-^A((bYJ!Li+I#?jo7M+;8Y6ff zoBQd_fJlY?7J4dF5L)fC^=<7Xe!wB8k=P)D4tU6pnU=DxY<@+0DRhEOcK+5wpL+909=8cFs$McMQ)^%jdp< z?&4#6X$wUW5%y@KL`{HrdVA)4)_r%2QRI^*ZS!a`MA2VCp){A{1*OQEn;_%mJeoth zKA>U9K!c>}2aHqmeMZxr4YFV#n6mp5#CBcQNDX|=DbsKDC6n_B4E)-yyo+Ai{*+x3 z)BXXcPE~5rMBG#VKsM7Jxbyj?;Luvs)6s0avx(Z6&{J%Nb0xM->R#VOzEkY8Kij(S ziuT=j5Xg#p16sWiawwOj6q_=p`0-Fn;JDq560Fx3>ea0kB@{mR6{UK6t{_u;ML;&{#&t10qRmJuQvfS{u{ep<>6_~&tt!- z?0zUeC-^+r%v6cQt!HpkTD3x0?XvX(J~$-)EYuV zdaF)c(0&m#&QVD`9%JJdOUT91QOhOmXP#E84=Ogp7o2XYBgyhNYce%Sz9}FYMI8)D zjabfQ(Z%TTYBzS=y53osC)McynP#324n`p=nEvdgw_d|8s(P7frz# zW|lKJLtX)nEHlk`z4LT2Qs*4O{CI?k0^>@HTDx}_Y>S7jN1JN#uX9^II@&?-74AjZ z*jsdsbYAg0%Y zxY`BW_Ivl|1h2 zmJOYjaqoH_`-(+Sytb4sP7m9Yfkci%GOglz^YiD=1KF84{yb}i+A>piKnT6YW`c^I z>VnhY0t{NxyiuhtY7i^!R%kg~ykzAN*P@gKwXV7@8p=;@bui#dhuoEiyf$q}ZJ?4a zLcrqT5YXs#_gGs@6t(`#Mv(D}*w-$3k&d|SzBXe`MO6yyz{tl@aB%QL-IAkUEcpDd zmy+A5lMwz20RQvHPYtfQ>nc^>2t%V#Ie(TqO%1Vhhe0Uk5L_>%n$4G*^-DIxsl=RJVDy48^(e1fi+K045Dfl0^#&8%t%eModT>kI8+XC{ zUB>KC6-n!9Pi&4GOgXGS)0M5OnOi>2cGOxM@*}bSca8p7JH;HR>s!Vkw2YdLb%&0l zD2O^$oo9L;?+%QHc=yx9YONHGHE>gczwpdgkh6J8u z-dszJNd4;oY#|MGEG0}?@$egk@iZbu!l1*W3poz7_c`4vnjpk(Skz}A?;`oSx&n4t z{>=>-x(cW-S!6jiV7kkC>96;Dj-tQU7{k5fON;uAl)FMh>D(~|KQBqVP$ z?iYS>>1cXQGB!LksJI1HC0&NWek6s3W(hYD#5ncxcE zYBhL!p=5CyuCK680wQdBrz7)c zr4P(##i0c2+;MYHs(!v@Qu<^XC%}jY$hb&Ibf^IZv)P!OOt(dp2H2xCO4fq%GK7Ui z62I-FQ0qlNLuEL&KXPt3#JJ6H%gUcsPxsKFQLmp}-COQ!KD z8D4QXv;yddgj3}zx5uv$8f5g7%!BT$V|4v9G4&WbUg0kF}Rr-3Q#f!BHE!OqMN z84>e{?yr>Gp&-=Rxx@!BIt>!IkHwRsfGq??P`$>{OD?3 zdKNA`CIO=1;vA16PB2Dv5{D+i3C^$>BTRBXvZi9FuF2(DI@sG=ut{W-akmsY9jnWtV zh;K$=Y~lKdD6tVLhSte*;M{vAA|c%tGR)gqRsOS}R&WD>Q-w53meHufX2O`yn!Mu8JDr zD11}~5+=$Sp>fCV<6pi3IP{RXah_C0Ev2>;JdfakY;Z{) z%&F&j%)*jqfWUQJxcs^xR1A27H~!JMF%VP1RDyFQ6;E?i*gt1HeZd3D?Sc?T^|8`O zx(e&-^FF;OiudI9uYct%O_x7)Zx7Iv08!`@u8?mlOW>ry_N`6Sc%A2RSsSMxw1PXr zD6W-!|JAn#1}eu3-%Sv{yDM>*e1Bj54J(?3G;-+)V`Nm{sc4X_w|Y@$DEA->Yn40) zNZx~8B|kG_9$fnZ+~7z^0zCSjwQ%t`Wh}-8_vlg6)0E&d%~6fO*gf-xA>o71;a)3G zF%Ip2kJIPyfSm`1Hgp3r2Hqb^b=dQHfZl?b;+e8U7l^}G{8OpN`A?*0Y5-9B?F~Ov z{eNHl4ZtB>5MLe%j)mC^BzWW}uKvKwl#ee4*S{P$qS0$t0K^+M2bW*qE<4Avoc!qb z!wipOw&$On9x`~ih!jRtbU<0l>NaF0!dq{6oXfaF76nSiEQ%kn^;;8L)$ZprA=IWX zDge+6AvbUcU;cC}0ZLR5tV;k~{o*IDG^67lmxoh39pT25XC8cN1g$y?)1yP7+iH~O zEgV72-_G#Qz7`i0E=Nnm4F|n=^?@m3$*K$2dHFI%zz8?3EjuXd1fs@y6^ZNvfrw>0 z){~7is)6S2;xAq1PS5|kO@ za_a<|a0efXB_WV+jjgvFa1!BeOIxwaO6R|Ej5Iux!nNt(L7Pt4pZRX&BaqDchDjG4 zxL8v9?IPc^@6TBVwZY~!o|x-NoPwl*M$GVN3~U2QrQRK$YrHmbwOieSxH#IOl7QM% z5fCKwFt7J)%MX71W1jiE(FZP1?PVxq-#k6(xh1#PaI`O~HRU)Q+ueaYSr5`%dPX7c&|Z5Ka)C3He5_-@0&<~#nE-RFjAw!req ztkI4fI~K1|0@hn`3f7y~2!OL0m@iqrqhR{O{KE)n)__%BDx&;G$OAuYl4Tkf^8Ed* zkqkm{X$l^zg|pmFUlCbjMFvecut`CauE%N_J#tW~hk7(uF zCkZfG43Ot9i3gWeo*oI_q*0VdUx*FzCcfzagW;vrc8HBZobjDaApkXtn^-@3^W9eQ1jlH5Irz zTs!~m{o|~Q9UJ%oXxkHikp|rG1@K>teLM)Ey^{dMPLQAIO_ta`_cFo%sn`_tTJjvA znqLU$LH|e^guDfc%BI$H(8`hJ<#a1kBN~c;+y)2;qh5%dzkY0l8vwvyR_MaYy2uCP zJ#k%n^#_|WS`FY**?vJn?3qPD5>MRF2yk_|ti>BvOFb{l*Kj+Ur_FM z&uwv`5*1WxcJLt076IB~ncXqmxlrVSHgkD2){$opWON1+iDxkCR>y&uo8N(5sns_B`bLcM z3>=;mt_ZvbH^M5_WLS$*`{2@zpO7a7qX_W`Zs=+IQVS01hO3_gDGQyYkMnU6r6F`H za$YICYjmHc`_G6+askIIIdM~?r94^7x-WbThQAF+!S(-S7G<8F?*?#^q@EBFFL(2A zMajA6Rsn=}X}kXT7ynO(#b0q%tO4*d4X$|s5{3*))$*dpq(FQxLFcNhoxMC91S88m=M{A~C@EQITK^~i-#_ug zll^Xy^%xI$?S6YOoc1*iy^TCI%(mRn6UR^XY#Zbya<(&)U_1!dulnCG629tJpbKp_ zw-ZJHHqzbmIrJnmX4NW3?r_F$WQZf6c>3`^%^hC|+G}1wPiSd90hywELK?0=7SfZ9 zkfylAqNxHSOT zE|SE#6StB+r*-=ej)Y(5wQ)Qhxn`M%FJp2#oI(|5UX8wt7*>Nj?I(P03_7s`PIv26 z-nn%&PERdAMf-}3Lu!3l=J8gC+kMZ787iBeK28qLjw{jkdDmhHIbYA z{Ouq8L%w{Z-|QAj1AjZjPJPtX5veH63fXMf@)5C&%o}$bx1GXFsXpRJGa5Mx+!|wdpdY8mu))6)S>-XYTvDxyI2BXe+ZojaONR48I?A1{G zj{;fiS5`i*CFYkq*$yT7hj7YZRMt1i^ztGK=U44^rr+Xe%&vRqg~l#j^@l-Oe~geA zl8}hfap%lI&E6Ll$=T-Ut*^=Y-GM0$nSD1U^p`qL_8XKQrEA=<(pJ~C zc6j*K<#!J4mdQi{nRic(^Rrut>ou3w??F4YuQAa<&0$fgo9!FRBaiEsLXus=Wp+2d zJ$VTZ3%Hie>5YJa$6=IkO(&?|Q0oNu5^ypt>4bjrms|lFfa?Paxg~kNkNfUUjW@K3 zsX^w&bU%Sfjdg+a7h{p*7<<)^{8!?Hiv)D2ryaN0rgXo0(%dI16dSuBWAe3HqCB+L zPL01RQ#?6IUdVb!oLrQb-E3trt!R)joHNVoq_s*)k7x?#!-=uDt4mU!#-s68JN^Nv zlp>8PbxjoFa89=lp0{CSnJ8qLa}8T!>kjd1?T$g&8baW^2pg7*a1n~NcO6hQ8-90a!#LJq)+;Kub?FWW(88LEq- zK8Ls{7`mSyUwSMZV7?>jHOd9X_gmWI+vgb(7%r=hE996?#F%S_u#RNM<#TK17?%v2 z@Xw*VRlC&QE^bxJ9WQ7%s&{wm-|xeOvptl2@xpnrgt};RmfXD{|vKCSoPz9ZmUC&wV@cgMmj{E@(=A zrp|I|2P|_L!mxgG`jJLmr>?y7*U=_>%y_!K=5^MEs5fk|Sm_WtM)k(?NX^3{)@ z{}y7>6L=5Z6?p3^GYoCqW`l~A=zSv2EL_$(quR*WJm}Txl1newLFY>4Ps*u2()MjF zvFtT+{XkVFpL9Sfb?-WnHu@$;fEHm~Yr1e6FOo?tz^&@VU{gn(ZMIt?lg`1vG)1A5A^jC+^PsnO?VV!pbwb8y-gm~2TVrV$QT~4qJ0g87 z<;&|c!Ay-UQQkhmXssEYtUPd2sqPv{YWj@RqB|7@ng2eah!3{IefHC*%TjZuR}8>?cdig{y#x72!z{vhPHZcou1U zYuPnA{SFp*8gV^mZpW>9s7Agm?)~eBJtbEAcFOLh7JXJqWoFLiy8OX2T?wTluRaHL zdezA7%Xg5x+pY3;)avq7x;vH5VZn)_ig2^B6m)JVkFwNh)r(xcy4t`KPngYl#ZkC- zl|Z4hVvDTPqb4y|rX&Rs*s72iiO#f+e5e)FQSO(S?wWv9N;l)FYItJM#~mmX8xe?| z=o&JQ6WA)Vp1>0!tk(kq;dRkjcO3~+OJZiM!MhH(H2Lf}5wuU1`GvkR7U9?$zry(C z@NPz8yu6ZhPU(h($c5ny`4fyWf}yf{4v*_&YR0y)D^)~FpPun3XZ%jUe-&twVYWJ7ZfQ=qUO{eHYgjGTaJZm(S&vEdvj>gJAKnWnI-J19OC0IM z6?-=9QMFh&$;^i3GR;I}%#uAg(#d=n^t^$z1ieF-*7KguI-Z!mdC=&v*0CW?hABiu zhoU`-HGifpuKVCd1{4Kcc2LgL{I~SFMy$t-+Z5hdep+6CR#$+=ekeQC&1=Z4s=&5( zcg))~l<->{S&Q22vW|Fc_alEE@N=|H>=2CvaeFiwMAy;UVV##-JV!XArr67@`JYKY zQW&%Go)UHZ85hPDHgs?vBbNFlr0<7|;)Km_TP@ks7j+198%%q`bl=Xm-`?q*;c#x( zxFHfDfh4;6u-d1*!*$Us%)2LnG5%6p^pmA9BrW&b`47zGCy(k2QIu3xm`@TeO1P8bDOBa_Iu@dU!As=IOx$WbVQB(z6a`OJNSLD$4Waa(jJl*->*OHI0WfMCN ztBHqR%ek+nZ!lb3>x%!HJCr#1tlR%qG5Q<%h|hN3im11$d_olse@R-T&`5u55AmjP zCym0SB8UFvq)sZ+tjP+px%E$2LCg0#M+tpCwq=X+7p9O?ES87+mmK6lF^vY?~!s91A!`*vDsc0vDY`Q*s?;c)^vc~=RnZ}CHHOz^LoIgZEGqYE%>Nz zV`}peS>l-ME^oH}mTsP@jjPZYmYSL5S`knEP4Rl~(wHe__sKi&s~b`XsT<;~kN!dm z1o+n_Iu_gwZWQ1au-|e*&wS~ zst{C#olG(4emYB*W?m(eoFRgIC?kBLhgA6_C-GwGEHX}4^_+znQO^rP_8OmZ&3dix z2Z{+!=np z5~{P)ru}^Dodah{m7CTy*}8W3cYDj+38vg$g`FXdoBBO`U!!<6ia$bRe9x)EqJO1q zF9jVhzxpHhZ3j_BIM0u4<$(!|f;O8`#`-f)9dztyMY-d?W4NV`qL(ewD>=b>|ALQI zsBb7wwf8Qf#$GqMx;`t}xMlv($vShk)@MwOJBQ2uxFwIUI=VSSrUG60)+@6{tB-)MApk0%rzS~ctUC5d^CiF6#P4aiH z*@Il8O3W&`q4&2>#f})WSIdq@)Hyd5Wu)h!2y8IR;^K2yo$d}ck zHfWzWnh?Rp4-!X*RKeXZ@V1aj5>3o8@-9Gl9TC~@%ynY%?^L73)UF z+5c+hk6u?TRt;t^mUyVVHfT{YOdVlbuQSihg*GCHd6TVR-|et}&tm!WKsotacj7qd z>x)t)#mU;xW)+#lk#+1hck*j&#r z+MTP#iHc6H+v%u zH8UkgYZ{KK;ocfuShQ1oU#5v1cj>dlk?b*6&+K|@@&K1i8}5dAEyR2`Fjv2{kZNO8 z7(Sm|XHjDOl65Z$Enok|s7@^!`z1WXI9J&LMAB*5#S^Q0(-zUUY!Dt> zwu?eV)b%%p-FC8VU=gJu3DmIJkA|5SJ`s+S8THh8-}~W@>Fy&Hr=}l_)>XYa91dgI z{K_^-a(ZDuNu?e+H+3Cri`{bGM7s6a4`>#r!gz z^j->!rgx?eDhA(9HbpQ+J|o04EYd@LkrCzyOP z%p7dQ-9tKn?FsZx!04iUjG$}E>AD_z?#BV|JYy;TsLHw4gjCZsgpWr%4Rl4%5YuUM zk~nomvnJ+oS<-5*W>~nH1da`KW5oSD7*T0A7X^4UW7;g%3}$zpZpMTZNGilfp6%%_ zr5ZiUC+w%tbEqM>ILcI}Cr?@EZ>Kq`f=YB z&dU$*&$0`X>~;_4bGX|79DUHV&dR2#%YDhZHF-eErI8Yq90CZW?xme}iEtfDr=@BJ z&EodD=Jm^`i#w*Utf$*l9b$-=_qNNsi|Y1$I%`Ef+-yGABKX`0?rz?h1p=%`P#n9$ ze5PFJT*LZJb3gj?I{(H3^rx{?*K18p90_i4qrLj0NWA?<*7|vF?8h8k?Mv0(WiMU- zDS|)mKo^8USF*itcgOalT`j70E4;HXtsQ>FI}K$Ec^lx7zpd(u851Dt&A#m=r18oj zPaaqq`p^Md{3>XZ6CnBVnA};`$aOx}qkYSoRTuUNXZMC`rSisf?pZHxG{IlkuBgp2 zpF0w=Xb0B_-!gAKpIjX`6vp4QM=vZ_FV=Ry@o-BrXLOAr4wLU2=Z~+Mk5~zeFQ@rt zekFW>-H^Z8kz#$1=&H&54^9sA4P9~(lUz!Er|z=ofphB1z>T8pGns5)K>n>dFLx%R zo3wtbj;?(9*J9Jt+F`w&IND53)&{TA>Rimbh?pUdY}EL$+tAi4!RB*2!tz;bT}>Nk ztypwK0KU2E!1tAS52nZ2wTGx_rzIKbK+dXlt=tCp;AP!ckB@E$xNpWWGUky8n>pMy zs&O+TL+xv!^yZ3@xPQ zZL8IIExRoSipzTB)ETII9(x8%EKUkht_Pb5(bFQhbh$B5D9)z8=eB9=HXE*HJm1~y zV+?F}rZ13C;my=+;A4kSKjXP+zzBBK(B8JGIfM@Q#ki<77rV;5lYWT!^)Fsk*CBa; zI{j(X?>$jxt~G0yyQ3{eT)Zn1v43vCQ72GRRLe=jY>m2{>lz*zbQU|`np-ii+ z5UTuSrpef6<>J-v=7mlTJL0+hdnJz-J)^s1qaDrJ;w60Ms|%D2qX#-)1W(7u-puq<+^$%v|bwwyi^(oW1i@kZ}=cfIQOqTSha9pNLAriU^C+0%~b;;-0A>M`RF zW264I<4?Qu2q~-vdi78t6)_Fm!V|&dwzL`h-<)%Kxrdg@2MMszW$at?k zm9e;l7WNpM`lRe0C*?M!iQNUD+B|fkM_9VpCYS`x*DCg0zUz+PfN={HQCTZpSstBW zNCPiliO1(kAB|{3>p9a*(k;|vSz1i%Id?C`RBh-zUhfLEpD=RMx{p+^uMPx)r!O~) z+_n(Py1U@x*;9%Br^ck--7XG!6oJ!_M@b9(;)Q)|*|7hJjZ27Jy&#!vieLPluy4E%B zzHBCmo1=3Eeu%0iqy#+stE9!&)X%fWi<~MF8<#M}j$mltTQ3_{7pmnHdPMBoza9dQ zTQ>KCA-?AruJ^s!_jy6p&e&UQ3f;~NohE_v>embdP78I$`Tzc9=fIZx%R^1BS5fjZ8V5yYg^t+U8?@3ro9{l?{uuXhVG9*~ zcd#Wp@w}$RAt0O2qF`5b>!5B5Q)L>EP4aTdFF9c~GJW6L@~blh1o&n0p6g3ie8gdS z8E9Ac!udg=Lb*x_oLPLiG%55wmXG|pYtU0u!t<4NfocI)kjctMai8mL^A82PnjJN# zO3-SSGo2%EWU4CF>g|vA9|j9IwlwA&3Wa1S z_c5Fl$g;Py*8fSspUG_m&=UjH&#PA{0d~)8fVlqxYo1u-{L&AGQ#PNy&ConZBJ~wg zZDrCkzbGR2*($3Ke-75~awM$@E@;ZEDKOQDdXX=IO+_@ujAU9qThOEwNHDc!r33Y2 zTU~+e%9wjJik)+|?}-VTb~?-Mb-F@ef6xMRLirxuBB z_Qvw_7#}oAd))9@JD)_iN&Sn4k5=>~9nS%6o8tE_y^6Nj2KT~;;V*Q)tJu-pTUp!e zXMK>qGX1=Bo)lc#1o7#teWthFhceQ@{VX-SB0@WMUOUFbDs<|2>Q&jh!kb+3z_dINoX_t z869{i*)Mxxjy`iPK(tlrq7gn&zs`p;1vl&)8u zH9zXegS86hf0{pKYM4$G?GZKlah_emE0`?DW9!)gcA1OwNl$8tU2KHRhujW^wx^fv zYY?=UWr@nKB7b z8{Z^#pR4SBq*<-SHaYaaJ^GI8JH{p8AXlsH1>n3GLJp{@P_kpwZUI-i0G<>v`%dJ- zX||zANZ#-{f5U!d%D!b!61g3@eJit@VTZJQ)xlJ5UGufWw=fot?Z{>l%Zj=Yl^fXY ze&vRz?+}rY9cOWQuWhlb*V|Apt6eFHfAE1%#t~Pi*lN&7S!38Z$6m&~nxl4ISG8fn zC1{qk#0&c(#qt6_w+~o&RNCx79wMAz_m#*mhy{N3G+%6e8Aa>i0sK0o0fr~AzobPM zh8pjSR-zAC?Vqr|^$v30NZFZabuK_nln>Ndw>pW+KcB|BOg4+q8R)IP(`aQR_fjRY zqfSM;rTz}dbVZd}<#(TR^f|-WB6#5z!}evvrC-}Ap@eEF%o82E?$}|nw(;Q<$ z#uR)m4;Ku6S1@Z)rf=%LFHmn95Iyd(rN2*2GIqi6YtvIiAmoHqAsu`^YO@&ET)!-2 z4$hf40{Wf4xZ9kEtnfz`$OT(p4m!OT;trcO4XZUZG(wg7~Qip*<<$g28pYG|J?vF)1e+}U)Oy0&c zmkpg^58aYCrvAI0!AfW*_G_!(3$ge7{l5IDy5n3-x0%IL&IWBYexZhemT9M7R}JNL z+{6R|<+Wcw>5QB_>^4MMiDfp*=LUOj_oPob_*s8~1Nc^1R69ENR{yn4`M!C($l}39 z$L&?Jb=tWMFNXA%SfaY&UgDS6tO6F7X$Qn;5t&eh@DR>Dt@*AGd$?;4c6~^#eHA7E za|<`w1(Ri?=OXF~Dt>>S5`4gPd(mg#@-RpIm3>^Y_mrm5<~erJB^3>utQg*2Y68@9 z2`^=Y;G|rfLEh8CL(dMEpDqh!-3^a?`TG z>isAwpq`&B4S9^^;HT3I3e0d!_Gig;p^AhYe>)r18CFP=$iErW{1>rWG%vVSIpGY^2iup9=F=r(A^u0+5WUfCl z(#hlEl*AGY6QkQgL}+XDcSdhrHC{d(z|~X7%Fek2ERz2us3stO3hxvZAzT(?8bBW8 z5%;)`7k$u_*;#y>UFa~qHZm@wxoFv`Yfj@<%paK`CuN~C zojJ+mpq)9$KrQI)nvE>y=3MVkn`_(k%Y2B+jH+1-U>>y}to8N^uie~`2&bx9CO70^ zj+LojUGq@YS8Da-r=1HBEpKm>dP8ToUvc-Aju75b+HUKpuFT3H)rSRgB!O14=jcG4 z@rMYW;pj5lf!h~y?Ehh=Y_56k9hK)B>F$r~BcTF7)`d-nHK>FVYt`nvWQZ(*F| zBT@BsJ`WyrNakn%s02=eGHIj-`stZ!pC9ABB?28^BA4DppwA5o$>=SKi0^r{a%J`aR1lpckES z8M8r;EwS~))XeVAz_QmfO+L=whGmAZafo~V7I8mZ;ll5nV?WvTmovvEbFAQci@Q?u zj~*2oJul%f^xWq-wj1TN&4I6Tk=OoQUtEjMa62U@B5<`9y3E1=iJESv$43s-ss&>A zXft+ux86L>jAflDIBF*5z^tzph?|>b{LrxACDz}G^v*SJ*gi>U?SrcjUq#+U*Eb-A z_S~$~$BXUf2}j%(O5L!1mhawpH%<@UqRVR{7Wyo`lPpzDi zZx*Pa`};x!LfAysQEakO(riG>k_Tj)ypiiKH>63uIu4=!`#rx|w!&!L>*ECsZlnEGhsJGWDE zEWU|h+}YjqNZ1!uXKg6t`#w7N`UZEJO?KP~fSw!(Bcv|o5;xS;jysu%w_=4*)wnyY zL7~2m74fLdF`C*onC$2|T6XWsj$2{ibdheb9j+?x)2J61LzNI?O-eMV2QB2$1OGbW z=e9Hz#CaZ$%aZb(*%I8{&uVS+ypyeN7aIP%Uq;usFnCwymasdiW^{K0MY-4t(W&c(7H`y927XS)Nw2#W3?+>ThG)qU zYoLpjHD5T6>>6cUkA2N^L8nP7v1nMo+ZWhW-u0 zg*4sZ!Sn3Gws`Tw9{ud*8X5k6bJfy0i#=d4-OK(ps_(|BE4IRS$*!z$p8)K8f}8|1 z=W?;CNc%^AFQE?)kk?MWiW==B5P|)ydL^eiFYF^}U%2^EPzzt<@U06s5zDIUd;lGX z%k+gvaX7_!=AM0uD?_LDPjF^O{C@5p5ZZWk(VFvGWaoE+BP`6cz0jWd2MDq4O{0VgdU`UK|5X&{FjYwT zt&6QmV+Z@=;TbbUL#4MrMc_HS zI$02Fl;<>s}TfC!CI~%J%+3)-85vqb=Xb45f^uncb(a zx^oj_oGap1i$D=By34V*uYD?im^B-FE`j0tF1<)qR;$a=FU0tY&gP7oWu1JkrS_6$ zq^efgRwdK-agXi1=+EZd#A2cgQR=&QC8|qBOwVZUXEl}ky(u|6A^5DdMy7T+U}Gg| z@|j?GNq_82zm0<5vUl|a8Oz&PhAg5PlORmQg!)p2EWA zTHW|gs?uI@+dhd(>1r&iHAS`x--c^)?Gw$0`7|!7kjjj}1a;B00ft1TOC5+|`+o9M znh70SF@Tf6C|qg%(^V6II1wM7_9NXJZ6z?UIV$ZoQ!VZG!1Q&Wk|a$O=|G7}`j9fD zRXIs}6_-Y0Wum#~w$Wdp@G5n*&gn-PNZb$aLP|cha4Vt$rF|RkKZ# z;xV8vxBs-n_59YepD`0}EZK48ZEJk(Wr5RamL(|#6Aq?bH135zbL#}1a%%aQImaMv*VSwixN`Ih>ev3h;Fa3;*fHDKm1QaZqqIx1pqlKzlS%vXGH}=&er7 zYOVg(ljw@Y05e7{Jx+?IjkSaC>#LhTo12vY!|v6?Rev)wtUIwK@1%gwzy*y@)0Khi zs$D~;dpu-yI-hBya+GqL(;FnWg@1MSV>b_6-keA-bI8ninC2a=JtwT3Zsz={SgeOU zHW!~eEKZ}AI_uX#$@fbcE>K6}Zy4IXA;mO|1FdOrW`yPwC`3!MYPt{C!Nrg#*XwjWB(`yY?sX&@y| zxU)DI%QHZVpUY?1$xB10XLze;Ka)g_{fOSJTrrR{Y<*0Ep*sd$H3aY6Gb&MVgdz~K zhZ6AbKN(7%%DW>cX2YgcKQA0P5+5DUf?iuQK~EmJ-AH^{qujmM zwX%Y&ov)Ro{w9i9MzOYezdXhB(3dbT)cqo-%iH33iYv4( zOY9dD8d?Z*TfW6tS!tnacI=6^VaY}3t+^>U=@9=_{jQrjYwJw4jF*hMv*UXnnR)c= zp2s_9_mar#dIzJNMYEGZ^`&}6%onmsMNn0@xu-koIWI11a8fm?kPGMSa24%i-_6lk zKZ11ot&Q&Y)s3`*=R#VA1x}gii_eG3K0Q2K$g~f)d>M`^rg&a=z#z=4mvLiY^+*WS z_f^HZD05_}uU6~m&_q&S6;&djTP=B;Rd?j)LZ$ht#C#p9v{iGHym^oY{gO5Ama13n zizv*Av%KOsTlWKS|docGV~)PF6FEBPZRB&7nR0 zQvCCvuY=pg@2=2o?g35l+f zgQeP-=W93KFU;l%j~8M!3@>G~DkEW-;L&0$s+L-TXl00A*xMs;Wqx7DeT#6c-FJ)* zJ;^dt)mr{{ub}!A9uvjrYwXj^w;|n0@#d@SUmO#4 zxz4o%f6XaV-zHEey3jmWez0zo_!j^9z-1~gbX%eGrTFWEIvl6jxwqO`=j$UbZ9CYS zdLbhhxxWl|{xGnv$)9doIc@ns@FU_b+mRhF|8__P)qtq_@};{0*<&u5=XY{4MW`3i zJ$GZg=4M4#VoI|zvL=?P3p0s#C$aC?=QK122x{Kd#lLd>%2;mdKpYf&Xk=K>Cp#bD zT@a5ycVM7KUdfuZS|%?t8eO|E)Kj&M~8)sjBB_-cd59rxCH; zFF7B*H$YNxZ?SG=G39W7`BU+GCTp8=YeW9OGa%AWV9ejvY&MCipf^(mr4k*16=&lq zHng3|(47{Z-8q38zbK0BK9$(DpO>%wa(BxYzZ9>wT5MXsT`P)mI<)PN*!mIKe<-WZ$6D=K-DO)TSq#Zh30aaTF zC;Fr?>vn`=YR3Dz^{&gCXi1Tc+Qi{(2G*$B@aR^s_0=*NG#RXgsb<`NPFfcYrevHN^G<5t zP1W!jr_&*I=xf;J$kv-^iI?A<-?w4*5k~GuO9*Z+X}{7{4)V0!Czh4g+b=23h#k^T zQHMwZQK>-}zz9oIhoILU+@f0Q04 ztgT16@a~KpHrK&bV}8*4$a~42LgnG8t4|^)3|k2QJ0=QWOndh$MSNvcS}p z8P9l%OL23k!g_O$H~-@M7Gu994$hurZiTw~+j`aIa(6rME}q4t-CW?^()7nW%v&RO z+NbE%ellfHUU)R@ac^~|Vx&>J=e)&kj>U6{`7?uVvyQ@Df2E@HHMbK~{mL(ECMHcC z6yonWiXiV_@$&C`(xsPY!M*6>GP>$VkDQIvejQrb@6QJ{_njYx%224?Y& z`GkMB7IkRUcHy}We{9D<_gJ)f>>frtxaG@iSBg1rnO|}@Lv>3MvQ zbO$a)d@eIVB-~%FU6B_g+6h!X*E7d-eROu~X?}k%1-rxM(w4bwv-YiRQDRN^(>XQv zBJ=&-hN8AC1j0Vt_wwcobt`tPSG?=;p#6vVMqvB;2e@`?`QG)*-NZej&lTO2PN z`%x3{mdVjLj91!COtLcl-TKr^hKIRVfj+9cFcn_^7_&V!?O}*ofnhSZSR&;@??)-j zeRaR6OMm{qa!Njie~P_zaq2uZErWMv(THDhHhB-<$emB;@PwJfNJ~uXMy|tIeRPT!GRnGo-=D&Wt zmH3|iYO148t!_RmP(*TJs~N*AG@Ym5wfkPo;>3%nIA}I{E_UGzjyRD%stWp|T&;oq z%J@Yz^dGUlpinyf=Y{K8N#DO=*6m}q^_tY5J7d>#BF5%$xZXx`{~tZ>3!Qi|DeCj< zp1<f5kT8Es}@!z#GzPxlTsqXs6a}}R#C=XwnG~=a{jd9?-oMSm*S$RXcS3d z6HXhLq~f*#eY#up$g^stxRdfa-+Y|I%%HNu>fs{|vcFG}^-SPl6O+;uMYqL!FG=DL zOu~+-O+^uS)_2rG@J04=qJ&DkqdH=#nE?nP>Oi+J%BgDZH$V3WJ4XjOoO#dO|SvB zb9Vi8@P{{o=kA*6eB;AugMTBm!~7eYwp2UXE4;(Xi^Urh=vM_E96JAgoSlDWTfNEk z>ms{_^Gh)xJ20FJ4}FhIzO1^*h5_8|89Q_ddfe z3cEWNTCx!z*GWdYFa~p4_jflkcfeh^kJr`J#bNaI91KoG)DDCA^Y|ixc{W^8ChP7$ zH~Rm+#v*K}y+7`Ebo9@rywGrvz;+mj@`R2>`Y7_VucY(1Rj&T?O8!$;pmhco5qB9U zeS~M3)r%TWaU=pmuj^{-`|GB7(Lh7X^7+aWlYegU|81PQ2ut9zj(C>gv%`9MHL~`6WMP>DWE=(t(+ddZH)Ipg}dPwh=hBtsj{Hd+wb{=<3gi(nlN} zVrA;Rxc*zK)}P-ctrL7I_}pzgN$eStZv?-b|Q=&0B{ZG{ zzOjvPAi6Tgizc|?s*mDrqd5L6wv*s1ZZsp*aHpt1H12bR#mP}0KWD!ET(oHixoKSA zz*7UC7ROUE)gve}hVat=T>; zbNQ|NUN|YhBh&^~N&CKsS6eCywu~%jl;=DDb*KNMD*o|lMOl!acZ+6VnIU;oY6&Qi%BfH!TdXqw;nH`4OaI>%fcxFdUD!S~ z@nIZ{Qd<2xaAk1$B2l^?ER%2l<&-%&5mp4|;6&uVeoI&}oS>43`yk9nBW?2&^YEwv zA$ZnAypa#=_QN2?@2^L`7A_(@P7J|8pa)}SsmqL;47RQ53;p%Ir#K^FY|ZGkv#(@n z96%k^m3MIgOyR#@g(N;uBvd$kM;K`8(<6qp@un(?~1;v5FYmyNxbkCE5wo?d2pwF`by>ATr5aKRn^Ff)X6A{ zw!!E0|ASt{lL#cS;?5W@FX^7$h~w3SUc(R2Y5Y|yE8B#-h53Jd_$+)llYR_mF5Iic z_$Ya9aTM09RlCXntE2uSlb{>I)sYnmbH`3++YW>MPY6MX!U>A8UBnGile@PCbZk8N z6u?al$}EN}`)XN8#<7B&#|BqnStbFC^C{_Ae*tB3l-Qil;Z~xn3~(MGUPLD=W;JU` z?i?!v!lAO3s9Jl}aUa~&rsgf$2!BSmoPK!m0UC?v_+YJtCW zIv>Z8PKelDpK2gE)BE655)U$92%t6x}#=#;}0E(fJKdqj3A;#yWs=;QI58>i~?6m*t-h% zUOjfsR@;3r?pGc&|FIk&sC}6VD(@eHdT-^;fb^D1Mo{b6Oek4e5CvRC|<0a4NKatFT7b6uIN<9x?I=BYGM0pw|k8n(UK=WqmIRz~Myq{$g zdr;Fm2Ct7j!WKGErj4^W3tuO^PwKGw;9@RS zwnIxkXr$_D3Wj4pErK%!!Bo#;aIiZXJ6e1A+{bZ9p^!g9=nBfx8N0w&tJZblMhTp zBaRH}MYL)(mw5;w;`|MvN?Y+>&10UPL;|XC#g8DK9%l*CZ~g*H5-m0dU_jFxq5p(t z*<8Nv$I^Z{N`Ub3_NxEUw@LG-aJ4kNXy9p<^vs0FDk#oef;1JIs0F`iz7Y<6$eq|Jah68mZx9som}*?SMz+ml+q5p&>JG7@` zj+>dbEdmR#TaW6$2?w0iLx>xF0I|sXM{;8@DD7`6@;{vF|7(NKv_X@W6<+ju?N(49 zjzoyi7)YG1P#q&X5QmU~>N6w=T>2v%X;;k5%HJNtZI*MW$L_uUk+DMZc=@L4MbdcVQD7`5mv`|DwX#&zANRSqK?=2Ap zK{^CN4N(ySgn$qr5ZZb0eP_P&UGK~}=hyl5{qwpc$y4^d%i3$NoeBu5+78~Gf9>fJ zp7Qsp5n#YAB7ij$9m~Q_%;n+t(}2oN+6Bi7V1M_R51=5+n?8jOJr)@UmQFN_0o3){ zK|sV=7`9rimWFkD^p97%1~|D@#7(!KCU&j)J&?3LG%7SU-?fJKg5PH=ELYryw}nx|h?YF9P*vbaO3m~63%?tk*c zr%vMldzg}!#{MkBZ|jEvyQZ)-;#>)lE*(H+^gFPa)$tQ{7H}?`MB8I9YF4;$_7;#X zczxP71K#}KJ_n_DM8G?=dVcy>RmLOoCphPYaEbbN#UyS$^1N^`s@S0s=bLo0u=5}! zG|KJaKZWdffRva0b-ZW}uv`;3@ba9)V`Bi?2F^8z8H_Iu)6N2k8;{6H#w$>r&8zKS zntv~=9ZUoY=5|`wK9Rq_$WvjclQ`zZ=4J77$M_e9;gMvPek(xccJB$WZ=E-NnY<@g z?%7I`+MDM%c2*D+6lBy9Zv{lU=KtwvbG}gn&UQfO6T_dES-}5OjHM?CShA*b=2JWe z;NJkH=?(W`AaxW1r2NFPNq}VG$r{QO$B)I5|L~R`=ATr7TkjwKTptsLV7?11bz7+E z6A_nP0|XR-%oWh&DZa(B^%ju(EwgUL2JK9skBs90g+)Z=9oWBUfdBcj0*e82wF$KQ zSH+-2p*cA{fd2y){y3fiyntj!Kaf*)p92<}^M~&B+4kds4JuDYd*gl+&4tN2{PQlG zh*Qeiy66988&;=*Ua@3YKd^7LCrAuXzyV55z)L;;47?n9R6nql;`@h7piYK=G9^xW z;NHp4dw74o#K6M$z-}6^7a449Yz%)EZX2IO%xmt49L#g{ow)HM-_Ja0J%Ak9oUEub z(?2}Y;qa6AbtTjKr*lb0yax`H>*)-y_U+W-wz#G-yTvaZ;R9%n;4S@=x@zDU?^;@} zZ87!BnRCfD_li$SxWCJ9AKpKCQ%iOanNIFL?87BMozwJW1;R~_I(@9Ho2O^$4ROcF z!SG@SAj$xF0uXV?>*zz0m-opQ&f^pD*52&n143Ejz>B~C3E*cx13w}o`165KU%&5S zwiRNw_DOJA2jsyVVuTys0NWQWPGq0~r(W(3co<#}_Y&{Fz%PlIw~#>@$7_ySS~0AU zeo-+%9hFQ0AZ*ngz%47xYu@{Z)I02duiviRwm8XowLMXLj(|{E+zB{Vi#TccE&{25 z&_1a#+r44RF%D=`&^=KAltmZUfgNBHd18RXoR+vEG8Ncfmr+;qk7;VyfAjvyP0!=t zLBRUk@4)L%19lMJ1H$(FR4$;snQpfTRItLpE|*p#Q`5`^5kSJI`Fo;P|MA3D9ZL_O zk(IO8nurW`QK~vpU)B-e^Q*y58C{^|7dk3LK|>5C+md(S)><1 zfbuM&{?fVd8bF;Gz+K`RtN=LeMoP2u$gSEoWNI*Q%L@P{Sgb>rEw&lwJHw=}RebTE z?1Tk)g^|gN53H!CG({Prty2a+L>BCe@PnXkhpcY9u33RH!PGRX<-Mg%*_b$ua z2bNp)vX{xU{u5>z2pqlsxo4~{XHR}uDU8M^c>yS`7>>OF&29v2+CE0Ozb&1mZA?F1 zM@Pr88SrI#ae%J$>G8OMe@YhHfn?orU9O_;XXoPGsnc*@Ec$7})d9(G2XQ2z>Wo_4GeD zg(bF=!`uBV{{)ylb((1Y1=s>68m3L>yb=nxz}j5C`{h(KfH6)$gs0^H-`)TNBAfU5S{;u1Sc2wf4Z)p-~T@s_t*3K z^DWR^`JX5E-wVV4vAMrHYyWdv|9g`3|HGBs<_-H{3ewnb>-%ObfG4qD_!U>5UNR|2 zWUxL=s2f7x%N4|@_LS@~xY>v>lwGr3;M_2!ONkC1N_#(Cy*$HCdV^&S;UcZ$I8nVF z2BJq^1`Pvx29=C-TI4c`?3hS2{(SKy;W)5BCibr&OwCo=Z=KMXnMG@r<>{-)YNtg9 zAMxKpvFAnBmnEk<=`lvB4k(0M2HC32uuVMPcTywjy*tFYaUXQmZd^2Cmop6N=-oEW zxrGx~?!P5}$LLReLrqgoi{p&@CT&TEXAZ{i@!WBbf7{Rjv|8S@D)oixzS*I8Y0H_h z`?UY@>Ft~SS5qn%!_wmG09jUEi=Y#gS<9^@FY z9jV8Yvf0WN!(I`k(wq+o8yCm2YBYnGZkC}hGA$!6*k4}`%dz-(ID6P~x*T%Uz4g7n zV0Yh(zn)L1-+@sc<=Nd$(cG6c?WS0wX1r=cZWt}~HlVg>qlAJbQJEzivq|7L`b)iA z9VE30A>g5hT=fcgN)A_5lSoxD&sOe7(uUq<@RZU#8b0!rdy0@|3Is^`w$CjKTRvT|81k+hjouN48=LTx#a^97QioG zyKq@iDaiO9Ri#!dHx@as*(7N*;cm+=ZB-j}|0X)R(iF0qnNfS@T7tSLzLxoFQl3CT z5XiC&*=10PB@CrDNP~Qg_?kW|7|8FvEGtbX%Cjr?mlex13oW=!Q!m=5PD{peI(p)- z{v^2kOi&YPsIFIPxA(*gNCPqR>`aUzZ#Jz51)|0CA1(SaSZ_aS6RGD5xpfp`c}*2Z zRpI%d<*0?hF@Dpb^K8jB;>c3ZH0(b|6pw7)`Jg0Mo?Ka`xuWYP;8^mOh&>uK9^LE* zB{k&baU7m$kZ@_YFE8cBwmewTwLv@!6&^a$oF-VWnTe2M7M&v`=Bk}WeYyJZuaAY( z64HZ7{*H1=LeG^rt~$3kDP9QO3u>!(;G$3YvWQQrDG%jFm2?E(&$*j9<+J|FphO6* zjbIL&uBPRb*^EUox-HZ`!|vslYX=|!aET#`aID^C zLvXb2vRdCbZ||r>YPRFI#b8aZ)Q(lS(1=3zIZ*j6HGH0!SX0W}|%5(>_$k4JH&?1dZR+9QZ!H;oiMU;Rk#26t^rXW8g}mC?zm^Pi{<0Xgeio}IH)wuoDTds$ zau(6oD0c`^E9qRx%O+dtVm>=+rldn(20C}xhxh}y&OJWmXk7AU3R6fLU7 z>#p__(pn5nVk+Bg2bm&6TFruTST>C5NZ1AdV~U9dV7~YFI1k3Pawl4UIO%ElKyz#1 z=kA1CvI=z!Tu%JrNE?lI5OCtW>J^C#?#WCbVE!M%SA6zJ4rtJo0hFK>!Ua z_`_GOnbtKEZGDX_4?D8Rzze5rw!4S$1g>MXqnDf`c8OB!-dq>#eKUkA1(P>V%-y%} zv~vIITeH7NQav%x<1R)E!GuxeAx_ zr<}_+bNeNubCa)sP$z1wVCN6jZ|jO;3zv1U&l~DDhHA6aYM%-(QBz~~5bebBfj>zw}Sn`;6&9wE0-D~}M{u@#3#)A*Lo4fDMZO3CL3w3uEYsA3saw_R2YEn1NH&Az zIt4q=WJg!pKQBzE^nR1PzEN1$EsL!&0AE+Qolt2j1!9NJgmpaTzT-}R!BbTK^gZ9+ zzP&^juIg0dQ7Ov$Mpv)zT@Cwf+sFp#npNnDqdReh`SY)ZvfLJh#=-lp(kYwOg&I>u zpO~K> z3cL%bz6N=d)A#&zs8&P!*eUeY4cdWFh0qN3BHhz~_u(=<{n3|sV*cX}>nuQ)ItpYaf_kX9K|iI@D|L+6mHsWO z!NzsWGt zY)2CKh^-QeQmWwVvuu?Q#ye-)XOePr18w|-n)p1#gP@sOdm>Ig&BZt%YOVTDyKdor+{z3FDdU!Cg3jom5V3)vN*#@IM*qif3J z6Kl$O9cI}taSqR%-STUg$CxiRiq`<`cu06=ur&Md7))L>iB;c@-A$F{b5OxHHCK?OWv{hcVYh|;c%4Wz&JTTSiKdV}xe2yE9q25ChP3c0@( za=T^|H@GUIfZla|rD6AFhk<2Z@WQXNA})}#Xs7sB1&_ZTG8}2n?7=%(&{7?g_f^jr zW}nz5iO3j>qM5yHzwSNTn>)Ui#VCih0hNOeN8~8LA2QgIco&=1vLA`X!DD>YyI;U~ z4!S~lrt8Yclp<)odXp-*k=y9q{cY)D*D53Srw#}H*H&z>a}V1oV|%}G_1UB>t(;v9 z5UTp>$uR2=;-6W$onWVWu_3)q!4FAra+qOjT$ADE^{CADs^F&{6c+0rhIgww?=$uv z+9DH&$Wj=TLGauT6+O_L-(^}`oxJX=OUNC$?2^^Kb{wN_w|lhVlxRl2=mOi$-q1Ox zcA&Jy;>tuJAoef+(J5DWPSDVbb~AJc;i{{BXj6}d)ZZs6GDrEO6D|bL@#9I-i>>-g zxkK@695^O?YzUw1BHr}~$z|I2Kqq0-dWgZc$OJfkc8j?a#C&Y6Jw8;+ZaBTpcy+Da zJ*UE@Sz##P$g}LIr%n1iC_p*4RN)Su`2HPReh>wpLBP3(rwbp;8|S)RPj^EpV3s#1 z#=jPI7f2t znTj{VmlgGFcxMH+C)~NKv634@5}d}6GV;rey@zS-f%nwBAi-Y^$VsLH-E8CTelO`VZ9D2vRSuMXswX(v(~uawAA<@ zGD2dm?OssUa5`iDVf#(inPEGR?RZ4WCrdxwY2Sqo=?uFp(~)Gq0eWp8nowNdam@nl7Z#wCDR}RK8l&Cfqr_fP2D0}QE=NgIE4Quw9Y7mvQl>XJ9V^a zdG3&nd}mskefGdEX;d%#gGsU6QlP$*U-eMIym^P+OQY2d!XR@Vdl>ddp`JSC3}dUu z_1W*T2J!*Eqf{1cw<((slb@fJrwa&X)0m*LE+KnFF`fP04 z@hy+a1L0uFppa!hlU&HX2I!jZmD-2hn3CFazgr>{4Y{xD1hcU~UhIjazeATw0BK;r zUh-BJdt3O}!4{iKh`V?1yQ)1v=_--XXB=EyupKQa+!rlf_ff+jz>ZSMWKQ}6pq`3` z^_W7yXW-fWi9NlIZ1wc+b!o}Vw+Dpd-@O0QxgHuNSzT083QEmfo0GOFeE$HZIUovx zVMl^F64zK97adTP*0{?dYq4-x9hXyv?$eCubK(|=U6)?ib+=$rzX3VViIt7zQos=V zp>Ki&PQHUI-BpsLplHOg&9RX}(Q|b)00+!4NGgV?=9gJ)v3HPBGaSh!_tql)P}N>m zvcE|wihZU5VGd2ECzkyhj*z}{t;{PnCXMAC&N^DGM+eKv5jazshb8fWpIRS35xV6D z(mwK-h+PhL?6L_>dUdnG=k=Y2_1h9V62-GaQ^n&{IXuAwNe1Hxar&bj7nl74W6Wm{ z4;~*6wydLP$vJ8!{YjrJ4gEH2Rt)7AR4;h8H9Boyd=^h&>U^!r6{nlC)9XB3wG!EV zfBpE9`PQ7pdUbhjaqSGgxwl%2^u9Dqg5><=wgP$#v0J=Ld<8QRO@mFTSYy0{5fEvk z@N(f}9o&U_`wyz4N7$|``A{4Wz zU#WeA_h~qxlKo6Jn`pc**st79eRWeUYKLrCzRZp+JA`VD?BYM-kd zN8rKX19ehVOp=Kk>`=}n_^b4kG0&C6X-37XY_WDJD)QX*axi$^5a!F%SGiE?)c)dc z`2yhHvc0cdVyMZ{1RCm#L4o2I>2%VuQteCPOrbc(++Tv&%3VM&{eRD$p%2|CTRIaU z#bfvIY4)Nmrw`=)}QS-p}P*vJj_8T=Z;fm_5 zd(j)AdMwoy(9srXLHPr;YR}D58*6ioA?lT^^o{cxXz^(|>BKPtUZwU zt(08=oGWBqoB;;$E5X;vQkMHheUfv{Y4=efJV(`%bXq4#e*jkRTPVfY>oyc^=6XcB z%#yNxL|Z&5yRm8_psXESvAN>DSatD_PMWrh>H5G3pmu;ZbnI!9w z7_g)Xf{g5Q*{<@!thoG2mTyZ3X#$ZMq9|Y zFsm2o19}HWV1>5vX6eu>@mPcWaPi9#Htos0JHk#J}xZ+qMD)o2S=erb<( z_23a{2v9Jl538-u0oa zApWAfJ2Uv!x%`W9%Y6^z@#Sf8A`e4JNCVhtA%A$!w&`V!3d4alPyxKL4*aIi<`Q|SMdFDgTK+CWtz9?d>*44@kG}4M zY}*64N?Vz$b&DwB_D1g+5oR_vPtX88n*acZWGJd;qI6$B-PqG%adq>FMBT?Uf0b}3 z55D%gvMqZoi|X-Bc5bJ9%vNTSJUjZns2OYNGIfiz$ihH$ezWqC{aSm-LqPty_Bqq= zs@XwR9_ICtRHogciCTJHZF0pWi8-0M{F%n+xxAGnWj}M6hQl}2cE#I=gJVPwZ!BG9 z+$|R8VTmI6r@3IgCl$qoWt(kIBSxk02X~5{Ee?B?dF4wc)?1MLV`b+oE<%Uapdg3Q zLsR!*bI=4t<7rZGexUcQ^JP%XyQh7OibQvtI_p*R449-WH{OP{V{K~hoSyJSj?^2_%@s?_Y#&iiQ>077^`-izm#yF{XgGp?R+F`U*UiNPau6%{JnWC?s`(Q;qL&*DTH_n`;5X zu+yS>U#E4nH6M}X3Ma4<<~eQPPuGA?3=zma1OerKE7a$*zdA-f`tMqmwJT4-m#2y# zjb&O)Mf46j6gKC@p6X~MTd9m=oRcsBKTU5iynoI5cBeXE$zxcehHYSDTSYcoAZ5Rh z;}^o3hneWt)BJcIofSRPa`1Ke*}Wfg0}QMUYP|upH|wN#O6cb;rMPJK3-qd&D;{9Z zMBFu`I2va8lUDjj)rC=4xRv-ivYg;kA`uuM>-DjGRE}uI_`Zgj z(xZ~ZF4$%D&^Tf)8(K-GYLzlh)Uf|*AIKIz8}FpOQpvi~^ohH`C+=0jLPnBo_#rV` z#!uHg$$PFI2q0-c(uQPG<6l}9IQGWmqJLkcE+(W-zBaOuPO}{Clg_pk*f}cv!}rKI zzJF@0fSmpEDzI#VW!T;JX<9&s*9%o*bOxhe-{A8_s`gbI*G{lg(_FVjLC{2Pfjyqg zHZa^AK`P!e#650AhbGk>I9b?Riw}HE$)%7?MmSGEE13uL%g&;HPTXwerlI3k2z@I` z>Lt8$jyIcch(q)n+nx)YTtTV6pASmf#V9u5qTf$ z^a;T}v&Z=w%#~pYuZWBtGU6PLYPA_3tEpk5JvR)|i$n9caZ zHU9WcsoSH`+eC@wC+#a)M0tei6+_uKuC8u$_x)1SFC0yUf7#J5%8+X%H}414AyY@* zhaN7RVZe8k^Jz-CRTqHdiNB0N zVzd>}k1Ocux6{T8vO+~MkE(_ymTjaaIFW;jIpl|vS_@&OnTVpDTzZ*Bha}-90{hvN zA>WqpbTGXE=~>qDq7VE1Qm2OC{CwE4i7PMJnbk9as#U_EGkUd!PiXVk-Ib39#J7sw zX~IC8Vr1-%O~D&_jh#biKN?5Vb8^sL0FdqZL96}lInef zu7}vy?5^o`(cV`mut@yuW{W+T?`_){eGpy(tcMA6(^YW9V~Ucg{JR9 zU~1sLX_rgiKv)agqtd!|Z_l8B<8yhk7#GUd+zC9gl_Jh%-`~TXb;<-ACH5KToR8iut&taz+*0-O!LGoQ2B70Rhlu>z7p%da8>x03vGxj9$V%?U| zZb0#eScCyHcH{f$4WHoDHwTZI#7}q3do7By6|7tFR+q)8tXqAe;s-~TE6^GGTTu=S zT*n@vmW}a)!RAE23g+WXV|ww6bEx=E1;)OX=_fnuo=)(V=bR+eAA`UjRApUw zL=K7x>6qt*8adN|noqp{EX|QYiq-lQv~^7%1_mJplh#B$>HT&t?{BVgB10U-+zzY= zF2-yEBu3kD`41SM$=XzDB3DxOSXc`a7M9>=h@`F zdIL%+HF?}`mbhe~fKIPFFuvzO>;w}=>Ja)?%5g!6#^$>-6Hr={?SoOW(2g z2RGWgl%kaorAc^<(@ip0d0ECP(PdK4SUvI{k{3Mjak7!c6LRf4F4cw7^$0)7 zBJ5KVazm#nJpE-$+CaCY@J+OPHeq$>X^Zv1=66X`2LoTBgu!baL@>t_L%v$=e0djk zEAp@S&?mh*-+_~+b669sFjOb}cP9;aegX*#UZ326o2lnFtZu<9<-@*uJs6XR+OJ%P zZzaauCv5Z?c?bLV$f(RVerOY)a>EG~gIA$7zo-Eb<)DI4&uiOUUMwuHO~R z(T7p?&ataEj3a!J6`IVQK8+Ybk_)S{8r0RI9tzS4=L+E*SK|)snu%A|ljqLZnR+m{ z%*qAj1L=XGM9}*BZ-?((oD2(Gbpneq1Dt^2wj(L!)DD~r+C&}@T)4Q@X1$ZZ041$9$;Gt-*u!T;IgM|1HMQyO#d!}^CWEOu}7KvSs0EsFQXb<;3_(% zMO|+9<8DMXV?a79jVLpT$YNzDwsuuzU}PiNLtdjK#2-(cgleqS9+l)o1rVU$k8YI5 zT|>l5uX6_C9a4-S(v|H+LLpxH{LOBKAdTfmc;p4=teWZSGTEx!iD&JILV@WUI%-Oe z@QU9jV7@qXdlq%w=mzgBnm!N>WOlG?m7GY^LQs}z%~1*6yk%A4pd{r*#l9S!W@+Z< z&EbK>c-Yh@%kF4LYLzC?N739v948rZRl*8u~O&KCLWOizlHiwU! z&>9dC5=~QK8(S0;%}4uIQScAu>n^qYUDrEbJB@4VUN3W$GPaar$Ad?JXuXdkUUbCaM& zT!nnI6yw@y)aYj;dfF;fxJ;#IxOra(+<1fP74f7i*?&>71FkK30+vu^KYDy{5R+U# zI%dB!;+zr4;dXHv=oI;T>Jdm7B@-#0iHL}xvl?wHa5n%Y4$m0f{JycFQ>cX@UjCJM ziIJq?mJ-w;j#t)ZBbDZblpFl)T&}3`)1wEsb6j56=idxSS&!h(f{C?#SB(m3RnNtT z*c&9RynTM8Jfk(!M~IGlODZ$6k7I>#v34j=%!aIeYB>{>G5XonlBwlt!Ct5xMae22tc{B_m@tboFA*f%V8 zL)ezX2R=Gga1~5I#Ukq~QWw>dhWLZ^9XKR&@S@pjU~{_2j4zdsJ&VRni{>O4YQ(IDd#v4@EmSU-CYPH- zB;qBv;2f6O%{%;(*C)c0G zhtxg@&X9b3)f}dOJK-Bn6SF?;Ue<220@CuCz2l;54Y8XsbqP7>j%x0eH$VWr&}uGK zyOL(Gc;jI6eIUbG^F?Y|7oqpJO@DoD%4yBVUdid$8<)AW_vN%hTmszY#<_kyQ}dMN zpuD_2?cN1{_+bBwy{0rMZT4sZxEyVNraSo@cp(DWwJiX6+2&9Vd7c3MELySa#DmtM z?DRzLA%chFz-sDK+EQ6V?rcFf(2q>-A5u3QsQw9qCTnWM0Ml z^rju$`V0vh^Wd!qkPutxY*Ihq8RerJ1tUJV(E!T%DE35&$vN=8yPCjc9dyh8cV~km z8=2cE3#XRvUHsDXA;lRh`(sBtOrs^gjHRZSSD!uS$tT&}p6xpoL?bF`SG>FgRtQMd zmivM?DeHK9~OgW zWkQqdQTq;+J5?b3VZ8NJn|Z@RGo}D&frwWW7NfVj9zTs)cd}|QVt(ouwCpP@VO*xs zI=zR+hGs2WSRXeW4g`4~HhRiX@T|u>lMx!qrTliJeUiPrHV#r8@xf)~UQ(Q7i->UP zk!*_yUD;u6=W%0dBQ{nh)lbApp_asj@*G2<2M-I&VlMocY0PxNLHk;;qRY-~&}Uf5 z={5r&f#+r)({h!$OLt!we=D7O7-W1i8f3&+S?VhyfS-^5RykKRQ)p*vp1>0blC*_3rs$Ie_qis&vUDJGsCJ0tD92F1l*~yC5<80&~9A zEe_yA6wwI+*&US!v^w?e{~{N1>Yc`2wzqi`qR{Izj!6@u*6>)n>?xOPX&Q zc<|ZM(+A}Gy7NwOI`$dzY3#$IWEe5>ubT{Or~}Ynib1WED=Y| zb!eD%;|g4aq_4o9D27znDXCYxA`TEUMw$=H5?$65D1ILWBa68%$ttW_^r8^w?cwD4 z2J708$$D*|QpbV$TdEAsDEGg%*tzC%gV|U3VZ*k*!S+X(yeWiM)NlZwI-u}T%jQk0 zlWyu#3B9;Th07NJu@;#3E@*m=au?(OLcZE5sCsJUt?4fVRU_43M3&9IuSQ;B71i%; zI7-+9jTf!9ndCQmX=A>-S)ieibJK<$4RZ3<(|Db>KPnWwQ=Qaa|0t}zZ|LRM*o0Bx zJ$pRp%y{9Cme#%9z~;Hy2*m&^IQxO}HyeR_5motitHo||;jh*>9a~_1+ULQ3?*f(; z*GKdB?31u`f8*l!!~ac|jd zc1)=fE2EKC?0MZ(w@5ot*D0*0n*@}>8O3_8aoM2lH0~D%84q%&i28t4`1~?yaQSD1 zo^XRBF6{*(+%oskqhP|&21f8FK);@tTo{a;FD8V}>&(FIf=xB`05<-15JIOFU|{5R ziv62z@mvW(({i)3pC=_R4mF~Gw>bKO&uoq1=xt%rvbF4C>1+9^v-f{Ed`y6~m9Z^4QD*cPPtw#WR0H5la07G)l~eJ4s%7&4}#hH%XcW;*1#?wMqE zNMP9W&EXo>xPl3|O9HV$0&?KQ4fmE~7{gH+Oj(i5B^RXB9xWSb9z2F&m|bnfwOlq6 zRQ=h!^-B4{7Y{`W*hu?T!15Mak8OO`v;hwI-LqR1?ND2ZQ;!QgCU{QY>(+eQ`MZJC zw+M#GVJTDT=K)vMC28B|d1^C2wF5BjNAsqmPX~B$65Bu2PkHliNgSCJz{o2kB z89G!kvHR*~W8B0&!}f_R_$_F|xd=^zHbj69{KDIMWUCndK(dg*B`vOt zKC`aCt(_Ca6`O9Rxat$G6r&xJlIgd?9<>4!KWREHS%+>e zFs_7}e8G0x+NltczNW*r#RILz;4E0~Ue0~ayErW-%10Yh%%*Qa7GP2>7>(s1W#b5i z;S_-Yk(+z2aT0U&eh0NaIe z?@bAi%Sz3))Btv2@;GjVl~`4=hL#_K(!rUe^u^4~bz zdh6iD&AJZ=RGCxBJh(KTwE5y0Xd$fPU@n$nT0zv*%MV?o zq1^v@NDF{xTU%w~x9u6ssqhP~4QRhCE<1$Q!@KcJz-ZC6Sf%c7YWar)mvpMS1dhUH zG^Zl(Y5x#I;{nAc-w~sH(s(F4PW+FbDj3CC4wD=Yy3o0dg{lK!_W^O9%632dZ$vQ|KGa?!`T#l$n@TjA)&*Yd^6!0d81jctHI=R8@s zaXxlLHz2Bv@o><6h|10nBq1UI(%OJx_}{FoYZ{e8?W5%${H~-<&BCT{T9u*O&_E;O zQ(B{+*>FJB%?bd?p^aDvjVn?s38ww>hdM}3$BK!%AjNE_7EfRpoV)X{aS$c;vp_K1 zf|qhnNCer?_tbZXamo2?lAvFFU3g}>&=Sw{`QNoY2w6ywDAj3(U4)M) zqIajonbIle9vg(ZSO@7|x8|)Hu`v)|ID6HW6Qv2TjMaf!@~M07V!Djulu9yCtklP? zHET+IF(cPfx@>0-x13w7DM9MXi#yLOn{9>0bS*iLeuX6^>v3aJvvh+&?s5TozfR@2a*NQXX42jCRT zbyLjFEVDfHV-A)n0cnt4CZy}~=s1+|Cza)LK~!cAg)%>WG2;c?+D$Rg&5F6)3N;~J{ifx-1F*0t|xM7CMVI)Y1=*7c`~@^~a%Yj#Vd zg;Ex`N&=_DuLjcRD?fonpilbUq3HYXFgSn8K}KL!slw*7k>|#yF|g><){0dl2xQW` z%C=BbX=Ymm4$7(yg`M*&2odXLSiXhABiN0_jV0z#ne%>v*2q)@5DU~=QZwCI2!%ca z3pOUw(>sf*qtpZQC=#(*G@yY-TK9>dCabvH`RP_BUeToO9%t)urn0r>h4Yd60%KpBW3)1UO46=n!Prn(c-kRs;w^KMB zPlDr#qY4e7EW}e>UVcFKIYcBPevz@uGOzQXt~2csE;N zHV=ak%Pq=(Q%V|^ehD$RV;>ecr@l8S4$%4Hp&cL6Gh{WwEfW^V`QK!ZH-kQ(5L&k< zF8j3^RpG5#2{fb33{Xs3K4}0h5j`ZQbB?EO{&Q~oImz?aFEwhGZ3vDu`)ql=ytvj( zpMi{Z4cUF#ogItwM5nxe!`5XgSqGW>@CwA;;+T-U?%m@24r4QoN2ypG!Aj=6;EN?e zt1jOznP_x7>45{`#1|%@uW|^LI31>2z#?nMgB%xfI8Pm^`WxOZc~>AJaz||Z>Lu!? zv6`!+rrWY^O5V)4kp_75KFnB4h<60(z+l~K2Ed>FeHTiXE^!2JJZ8$8s0Mo3-v~iZ zricJlkt5e-ceVhifVAww%exevX~omxu9FQ&CWN;Jfg&bBPOoUBS@}j%Qy7k(q}C1Y zB|R1qvz`ESdC{i?XIvAF*n{$RRiL?C_qYJ6bf0*kx(|tOkPI>sT$m5>l7n6=n&=r# z<%{=w-7jj(4;jn?2tp4snI%rhc~lY~gPSGSLMxxamzQa6oYbc`Kphv06}~+qw_|eY zTj6tt7>?E$loI`A#rLaRO&k7d?@$j3-XdJ01Cm~`l(*)AxVpl}m{aAgG_PK7YwNfR z6Hx3cS`vqIM49=@IrR^=L!!kTk`7+u^lu^9NlHrJMqNYF*6A@TXO$BYWjk~*nqW|AzaFY?m$crgNdN03c;T%zRt0S8G-JsO zJgt$%wh{f@z}OU}t`@g00>jbPg^@w=orEKn{NnwtQI#wccjmBM#--g|1+x6`DI@?u zyJNrTO{J=(<3E16ZcXpQWX+}3rm`l)gG}maVg6|DkT_h-A~7HC;aRi0P8lqW4*lYr zbwlLC0E35>KmwCj6!NZ)q~g$(s0?@CI=-1e~ZA{yahQ(*4RA zo4i})9^dB?xHgH#cB{rg2Z$HO3ldcpq*iw?<+1Mf6Rx;JY~H-ilPNa_%QPwD?5l8& zOa`x#IQ0tfYECB_I=FEyLwr?XApUrAww30br{K{qmH->pj^Jfb_mSrP2cSgnrfuvb zSkz(1g{0vmUgOUTHduG@>j_lBbKnQo&^g1mj~1I^rGkY$DvtzR;PLS6Hi2oE&jJ7u zSF0{S^#BjP)yv-Y6i=iX7cz7zl*?}1Yx&iuA8ieH;z3*}BTb8+Z9em0aeq-V+cn?% zL!S+~%%XJP#yLAwLuozCgJ(E2f43wepdyVn3mB$(4&qv@9gevgk}a?*GA1FEo>_aV9J$(Zg2F$gQt?UlV+W2V=II(Pv^0` z{&1a<5l`^SfVsUv=z^UZ%c%CusLc6#icgN_?=RA=<>Ok~&VBQ_=GDRgbl+WCj`z*= zm-^bLlN4e=nG%70)cdwQOEDlz>f_+A?;t@U5JI9B6n@xz+w@5Gne&BF2}NL4Je z(~ezsefIRSRzK_gmm#|0b0fcM~V$=s*72iy;b1Et;Xq|Nf7Y+(;CoAhm|^0VUYch)w~ zng0@Yi^&=0@ybW2sZTVb##e55j}H1owZbJQrf!PITzEiz^x(%;1QawwyLevm2xXX;Em^+uNy9Cw$_bSnRGkdV}mOHES^TQRhaGtVlMHS z-&)$y4bx$mc{@e!(WB{gd($|;y09hT!I!(;R~Ax)xr-sjujt0vk8^gVCwb@BT1HXY z1dmTy4$aTcHE$zu)>|m^_3Zkl_20L>I)r)QSBH0Uhr$Bs$8m|C`JNRN|6u+|mpF4~ zL?1MO(q1+p+P?r#r4X9cTg%$pYz^#2$g>c6Hvn7)+81vmLMx9Po%tU4eKEivmf zY4pbw_PLgrVZ^5e;5*x$T^eNB!1ks}@Rq_zf+N2caC{Y^PmGX>0&JSd?Uw!KY00@7 zLlfmMM#a*3coC_*ncP_0g34Elw~bFKACHhx+=^qpO%6cbFl^gifD{0g(Ua{_A;TY^ zT(z;FRyiAkdbxnNb)ioo7R*W19llB!>fKXX(e2-)a56Sq3wFKV$eGQCGX!gPTW)?j zPodF_7t%Q2Er(u5e9D*W5$Z(pXR-kj2-La&;ZV~8@!9jp#1D?kPaffCv4G6(kNWJh z$JI4sb7@3ndD;R6V9$1BWlOyB3=sy+ta@&F;ENV$`YF_1ETj^MYx7Wn`v9D~{#1&& zYIqWp?~T@~F;8~5a@!>gYdVUSKQrzoSP9COlrJa`Paem< zrgTVMYmdi{n6F*UD4wk&o0Q1(oB)!Tk7{=fUL(mvD<~m)$|{&zyqCarUv1t=03dp} zMz9dPo&e>`TELvxcwZ1ZfDte%YRL2sqGGMq9Lh;`r~~F9?cdf~R+h8|yEI|fC- z>*LBMc$nqvd8~t>QG-73Ro+qaMEo|-v<@=A8w;CEXTHbbD zvttTk8a@gLf616YZmQb^lFqMz?d_b>C?okQH4+pHtZ#{57*=y=WUI#0-&l(1H-k9WKM=*}X~ zs1#ucK}d-M7lFTDCL1&zZT_M$<4Y?#aC)RlVu-3ikXZwB`1sg&{c%Yf?3k43W4!_I zGbDgqvx4L5(0=U%Ej_wh#)n@#m(6zH$@rb9avP?N2iU-m#mj4*JmBlCg9!y(uvJxC zApoj@Qzxvq3e37fm@|#?dAF_mO7a~vZ$<7cb|E^x3hAoVs+({jKbA*9c)DI4O5W5M z*x&%v8J0Cgx-lr+uCLEgA+&h6ZHK7qMoRf6U>ywH*|ks0(a0Kdpi5%V=pIkgXH@Pl z{WZc_{HR)a8=(lL;tS4vB|l#5YnM#EuljyfwE&AiJ;Z;?n8}O_kIT*DfnIwT7xKB` z{&c~@OGy5FcxD#=eR-@jglpO$fDgnV4mksWfc@HGZz`V1bJ!u)rX>CI$U|B=%}%2d;D-fKM6xKNgVMnblmahnW-IqBCWy1Ntso*8Ja08&G7EXPpGdT z!~twdKJYZorfm^40sH!jXfWiIS&UqO9jOZJSV${6MZau?#iw2CWR+Jfx!jC_fY#!m zKnpoCMEausm~n7q`N$>Jxyl{%Ua>`(jS3w zT1M{Er}jv%%c2QK?=<|%mun>3D{Lmpzg@Op!AqX19D;lWWovo1=;HI%MJ_k>Ke_s2 z<$F4y6n}H|NV5FY5P|r)vgD$O<@rc|2p!)<@y(b{0aL zc-?&GRYv6y0VEEQ21ZQ$1~ZW4UkYwi8#$}6bveIKNw_TTms8m`*dJa@e~oIum6_Wf z{t@0HaXsPvI}(gi6#caHzDc{+1J+L#XeQetB*txc#~e1L1AX%1LY!2kgv*%r2&C~i z@>hbVw#087*7COY!lhlN&0)ZFjMt(SNk~+jqpY&bfZNPvQBd;O(j>*#Q_Ul7Fr}D$ z;hZ#1dILA&TY1z2@FYqnIfI`Iu%s?EUIIol?~$#?noXeWx^#cQ;q?9oJPkpZYM8553ID5;58B44T6zFB#Ol?gdC4j+gadjC!|$_OXVo^ zyia4ls9HA{s%d7JOC4x`+03`r7~o6!Bp{=`AL4tC9k-fF-WDd!9za={jw)3vP1O;X@VOF!p+~mKj9NOrBqVOj&ne^Xp_V z-$UG=5V`~LJNJr|;EPz3+wgfLep0$t%^289#8dnWbQ3%d$-$r*lADDxf&jpa8*u?4 z!Hd9@VeD6d(MhiU zRhs|X&D5MIvX)=#I}C;Sopv-epZn$D-hDy%JE4o*oFcbxyvr3nPg7kFuM*HwsVjKM z$UsC~sXMD_ZziCk@-0q9rAt61_nJlK%Yy4?Z`}se*keXtDEH-_weNVTnN6v43S0!# zg9wBD?GVBoo+20U;`0;=FGKj`bVLbOJWX@{3a8TZQ@{M(KP+x>cDi4v{a@{UXH-*L z*RCQS@pz=D^d@o;X#!HEBTYp_l->n_Kq%5Xh@wOYK~y@SMFHt05ITw!h0rBPfS@2C zy@XJteJhIJ>m7K`ulwVUabN%FaO}OZ)?BmCx##ojpPv9P-_TQn8R@lp{o6uRXjLWH zzzJJku7CO|@-EVH?$MU!JfHiQ9Z}Ij9)pK?u-7mB!=AsttfY64%T|wXknSIk1|I7= z0UpwfM~43X@IQ7Dl0$I`n{%_A`(HQC10ISMkv;lbD*il<-r>-Ay_b#}|GIHa%FuYB z+g@Pa_s`dU^N*ZAsYG+H4j}$@c0#H zOx}OZ-+_?-x6I%F=Nd}M{cuXa@_Y2{w~rEe6Qo_O!Y%&Tb2%*YfS=ORigZK%#!SBn}OB} zvC4io=r=wMPl#NFX{DP@Jwx%yv)3-uNvr#R+*-@Yw<6}W*W4Go)E&txwR+K?4Lbed zl|4X(pwQAj`{TqS4!~K|L3vyKX4SyM8F;t896i2}qatD|x^2fxF@2#RF0KCim4_@bgv#JE;870#y4||5cC%Eswg)Fcf zv=z|E!!UC>Y_*znse4Y;_!2`C@@wci)V}+sRi?mMAE&uDb#kBk3{O#_d=$YTG<{qc zHSGq5XBv^L`xuJbtG;D-!CSFZ2E=J>MJ52;M9UdE;n>*Ye%u7a0$j`{fz?2LpYz+5 zPC-YT;yaO0>tsz_yg-3n8f(D}=b`vZnQYaQ26!}M;eK6suN&uKKbB$=JqSR}b!2Gw zyR0E^4rf#?J->DR3f-ONcv){}{lEdlXB%HhI$QN|2*Fm;7~4!@{dv35scOeE3IeG?SqR(Ww)9ZTQ$X}goQodJCh4Ilm7^(!G4Fn>mWsvq|;S^ zs&Kvd4_h=M^3Ue%I<_|J?Qf*1FVD0)aBXn@o`~~R=Z@L_yua6%ff#69>K6Ttll|9e z+S?YC@j7p$?R#E$ygAJHA|)jSu^4`_JylULAMkWiCm6Hc3lLRgt(?y){~$5E;2y2J z$2JnVPZrW19JV_C-f;Iq^K-siJx_Twn;Y?Qk%XrZVb**%>uZ3mgv@Iyx>Y`=?7iBh zp#+->*~beYf}#wuy2sr2Sm))(_uLcJknRk$FVR-nmUjf={v%)#IsrllD!l#vQ&nlC zF+mU4c`1$C*P@_sS&Eybv>-@0`)ib_Q zCg1tJOAy>e>7AeK_RCh&X2?V7lP0hU&)Tb#_(j^Js+jJ0sR#@g_;+c=e?K5v39viG zovdHp+T+r%0U=cL0M#Ixth~g%=ZC~y2sIt49!TatJ*vO0Cdqy0L8fv*>5rk*KXm-B zC+(V}pfgp!$E-Yow(liVx(fMsQZT{Tdux4ZYPn5VTV7t?^ydpPAo7cecU0XcAxf5( zArLOsn6PJOwXPbNx~heDb*x*2fXU)ndb#>9AD`sS?8`MMU9#LG)juzkR<1;l^aOx* z_U(xhMuv7P^wM9RsPr^PvOXh;$}|6vjDK++&K-5cqxsnVzNQbz#2;f2ybMN9Tj{Et z#?L;w1&0V!gNe58XZFbO0YsL5Nm(C3r4+1gkO|VNz z(90qCj71KM+L!)d0?J2%ryJQWAlZMz1xA)Yn(V>2{wu5l)7r`eeJ?N--kwn7+vknu z`Jo%SJb|Vys2)FiZ68fVPEd4nNZ?=m`6UNB@n&ItX{>j;FE^jl7}*i~eXoFTLgUT6 z+xB?mS1z=ypa9rk*Rqn`t|kKPx~i$nq(}Z01^D3A*#Z0EDo?Vyw}1bX$4tWe8P5-D z8yzF@)y7!nP{J-QSrcU9{k6tLQU42U}d7#k=GvVy>SDGgWuWT zP*ePLk=Gnv+^&O7FaCIZj1*EaILNrnK`#-&n;BQ!{VamGWAMWyq}>7*0_kVh9=Z7*>@t1G2tC=vc7 z^VWcI{M@O!?d7=oYcz^mcm2LZYxJO^6cSSa;29d=s=Vm0TP?Fsaw>l%{TAmG8}`O{ zxx8a2;rrQM8v=cScv(Gey#L14^(?zH)a;wW>3|l&NDrngC;Lkgoo|?LnYf$JM0d=F>UWdj}`YEB0qy zLL?dE<$T@39k5ixX98IBm#|`;FEr0r{*45uId0KlLRgOV=(srf0_jbt{2Zv^7goE9 z7dU2xdO6$p_@u_SMZ}(UfZqmYi>c1{{BQKJC&h*g0(cScjNc=0A1hp~6Qx|t!$-oz zGETUGvD>U2T#srOwl;{pIeNKy!ZMCmj{HznNZ!+pvo-z>>c2?W9;0f}zB73bkWj>g zOe>0x+A(cCSsQZKmH{A7y}{f4!N=>CPMgfuZO)Sw!Bk~u;CxY(#D_y*=DTEjf|!Qp zKsp6J>N}fw6oGWx1DAqV`^_Xg`E`02M`8vGk*vx9>c;Ln_KMNypyg+&FSrrXXSQeU z6j|>^G1e_UoJxB@eYJYFMAHupChwsMUVrNW#$3}>Y#Z4jW3Eeo4h=WxN^UpbS&p+i zQU``f_X2@TazCN5CqB)|K*M5(FEH*Gwxgd~w^l><0+-V; z1V--nyvj;Rqz1r7Xn+{FR#^vto&=CV-=-wqVyWa8D#j4#K7EtO*5uI_WC57zFVwc! z8fEMS;4d0;GRZ&e11&_DZ0Uz-3{>{%k*-G+zJtBlui;m*-7Lk_6U8kYgS_vC0Qdpf zcj!d`tMOv)$Oqr;Tspd-6G zYrOy}BXu5}XXT@H)JuSgqcB(WmU^HtDrg0T!iQJLKgaL#j}pv)`W#?941S@)0}wUG z$9Z>9a-v`n=+P?{oA~~ok)64bOhYL@pp{TP#c9KzIDjO00HmnC+PuKEVJUV%tU0eK zCwKWv*z!vH^ZV1FX=aDpwn@rf zU6_d8-PG^=^p~drKxDH{#UsTtDCPlpiL=ku zLjU2Jo<0MM;l-f0!=C{_k2b)}xZ}6@=Ew|(L(hvVWI1{(kKEqX{uC}yaCvDwGyt2b z7!1BG2h0l(-1M=!G0T> zRER5+UYOu&WZM~Lw4xsAqT8?bWlT?BDyJG84SNqf2y3@;^5p+s0rREsp#GhdwHjU| zP=P3!EXd<(U*d#dBV45PRV{!6BKVf zHrhQ(XXQm?y)(aGeZ^2Nb2cJK42fkyX>59#!6buVz4z%xtZHiZx_OO$sBZo8Tg^9A zuvEwEcSwbp=Ujy_p_G;3g&=Pkz+SyX0DTZ^u8BeB8FAT42aU0gydx|?6xr#~sXHrK zz4O%z1gA!8uR3d<#sDBYC6Z0W&2I4Z1&EpB3L{LIPgQe0fJ$^mk>}Di7rNiX88Q^w z3LAIu0L-LRsM!t1%%z^s-$Ry9zDv)$*^n$&>U~za!>2UzqM1KiDqNRaYFudv-f9Uf zf6QMKo!toik`lC9nyNL4^fo6C2=&-l9*<;eC|5ZJv4t_mouW@DR+s@1?uAAH^dRIY zJcd?m?Qv)IdXEaP-iN4_T1G3$v;2*Y*f!jL%9)*gQfN<4e_w}BTEB(ednlT)vx-@( z-l8fvqsrD^Om@+&n!?eRZ|;jjae3V}&ofguM|QVIdXoUO0f7i#O?_fy*}oVW@GrqfsJ=&VZF8K+M!NJ7kk-i2@cISHsJ+tZU{y z3pMLEtLu|E0v6xok!I_+2kT#wp)3acVhVlRp*>`q;{{M~THRZ7Zxh+AgfRK#_?+dP zrsE61pg9Kiu5W|CHHG~L+Imv_=M&6x4v~p1wG%^F=X@TQl7#?3)^w12&nWZ zjQb|rT3r2>4ek#y!b8I~F-MfK*GxWU-kZ<0qkbc){{8BRTboenh;=n^3@%o47SGSR zrx25?oGrGe4SIuj*MiraPp)|ae6ZTxP2t^T;ob=Im5js|*q<2hnlW~X-SfsR>P?=t zOXZkap*diujt}>z6FQ}@=uYGtm1eF5+{(Q6J|&FGZD)ydS9E*5Me%z5M!QQ11IBfi zZ0=TSCgls$XINXCbrPzahpQ5+o2zO27gu_uBSwOq9%YHX0bE zK6$5_2(pG;rEaVZoPL#6GO3u-H?mnf!g@FV9Y7aS_?~)bK?DbfAM`dgsqLHrZtF|N znooyoH+MfBQM|l#ICl9Xux?^AOp4o6+NHkDvYbx0$w$tTU;W6PWS8H0&nIhiI7iC- zTTfgjK<$+DfQ}At64vVC0&N{X7IAF@Inh}4WpM6RvrH4#YxHYfdKnR*$^*&UHg;*; zK36`A*z^ULPa5D+fOg6ev9t1>!A;d1gUyphOvhjLmD84{x12dD%1SJe*eAG3aYsEa z!IVuOoo%a?rO7f+M!6QGgfBJ#`#*@;xO7G9BhmOU{MVO5Ixsg2ay0v zYAW>P^{?AwRELCt>oXg?P&HGK&;(#shU_q87ZRkkJhvNqLvTE1)m~1CVs;((h{x6{ zqH3^8B7)mFc9 z)a>mh!27sDmJ?`-JoZuX^n0%Mx%z)V!eT}WUUZaF_8$p zndfdOd;{Tmb<7BHH%~g_g#7*0bOeCYlu)}DALW+xb*ppiVq}^K#Wuv=45}Kx+aw%R zZ1I)-_J*Z}zld#{y{oENsN!F42|!5<#=VkTNO!xq_v`6c`%**HT<QfrFt6P=l)=hHJB6HQa59^ z(3*q$gO%>CYCNe~s;;;*lXMA76Y%NK6pyg)mGHC@!g_~yT`%hs1{G|`>f^g&IY!XP z`rXa?&W7mGij^MCUSJPJKb|VRF_x2C(3(TtW-S;R&+V#cO&F&Y<2M~6^t0Y(R3_cX z8TzAEbQhysOG|9C*l`2AU8gH`4L*5R!Jv)n%Ya>~nN*{&xkbLGoe8~Fv!|+ryDsO1 zg?Y9O;|xiC(Tt6o-AY}}nQ~W8U5<5WU}coCzM^x&X6+5m4?!zA5gi3=ZM_A_mNF-d z&Fykb+f^Mc_85ZRBihpNo?REziBeHvll`eWkA&BTG@__K#Tr_Tg$@^1jD`m(_dtdS za*gn-m412wC|4)$z>erYMY*Z4hp_8!f^^x{SX+S`U*E}7KQ83LEG{0-9{vVz?mrh( z2Xu?1IEf518wlL{S&X)>xO(oO+l>?S4|R(5^C%E@Wrjv9`4U9BnM(CCead5^Ymh^% zLI!O!A9glUcYVobxpcUd%Qm!f!GC14L_E^G}?1Gg-BfK_-Y9`p{64r8|5Pv(E@YmqjH2sQpIzpy^=U% z8X23`L??7HqEof+DmJeCP&kvGzBdCb&kXt zkw}N)QJ5zUcb*^uQp%*PQI~0+XT&HZ8NrPoR#%%+8l%}=r13wLOjUCR`XS~`S2$Q@LK};6=*SHr+*A5(D)4HOntSiHP zY5;A2tK17Xh~^)8pFF-g8G5rnppbRhW)0TQt(}D3`yl-efN;j<-%5zuGK!d#!^cf<8?$Km;{sG!yCESg z+!X6R6Q~9;&hgc(VrqvPe;o-!IfIK>VRNeuqAdQSem=FJwgf*D_W{99G2_jN8D;ef zju~f$NxB)EdCW-AgJs5%{v5!?1x?Ne=(?KyS=LT-N)6prS*>Gu&|%f&S!cGiwPe@L zP~nq2x-_bJYy(y%i*j;~p<5G4iCZ6fY*%5}WR~kth7)}lulHg%n3c=c+PwVizI@8p z3n9g9R_Z?VLdT$y*_IJ$Y3$&_AzTwmeGV<0!2#b^G4;d#P?W?Z2y$b$7vA7J*k*lU zPq=cb!%;Q!=8h-RZ7L~58=hJn?zeP;wxz35axW<`^fyKtRce? zDwBEn$s~d(EJstD>Riso+r7gt*#kI9YDkmr%d^;CfN>A3Dlx zLMT>WTj1|s$^A!_15ha+&4vvCfVOJ@TWu^pK5*;as`kzo?QfJ68xJ*PyC9-QHgg5~ zT9Pzc48rEg8HcTTqoR#ZmYTmbMytyK#8^LOFr^Y?5;}Ig^MnlqbR$ToiFEWjeOc~S2o~fff!VMk+ah%pN{@Xlf;_C!*d%!`ZpV})sm*w%%)07u|~FR8y8(E z4M=>{XjK_$UIoQvyX9}}Xu`hWO&N1265`I9$|yP-aZHEHn4?%LVCM_U@F7PqgOl6X zMu*0h|H<9?53Kd;O~NIlYhz|nUPDWV(*dg8%j|E6*fJfS#HbMCj|@>j(a$OU zK=+mSLe8IpS)OS=L?hR=S{fz7gOe91KPhDHkvC|Nch({7=(wVfa~1=p-HrJbZK-~Y zPEb2=ghD1_6vvjdTw4z7rIv?=- z{wmFxXB-UtbcpjS2H{i}ET)2m$eL}HsBrh2LI;6d7Y5Ejw{!S%&EG1~*N%sObZeV~ zNU`dbZ=4Hyg9?}jX_mipOW+nwt?hpOJ^;Bd6MzfW&WnRqx`pLkXWoC}#dEaj9TV+I zy&(J=u#>OzLcy*^Ln@h-HS={mol5mP1^CTS)BFwjR5xUN;kq!RLLs|c6|B_(mbC)& zkDNkM5ix0`OQCMC0mxuWG_|O8ZCwce#20S0RN)7p=d3{u!XDHjB)pw$VH=-mf~Aq5dZbeMC2>=n#mYQjsi_tkF6l>iXc)zt z;&TSdm=rZSc}F*%K&M&llEA1PEr36AkR2T)geR$?RN)1;%I+fQ%gI?QOe^+$E$ z|8SVi_(d4_4_j-!K6za_-MhVLIiDP1lXJa;%hZUMSNlIxcv-=Ws9DF1;9e1B>T;;= zQBh7(&{qouXH-lVH?x(x=6jGCD(!rJhSGP&WF(Oso!LSi6t-7-`Ecv-@a`HnsNf^A z@g*PMbNa(63rffxRn3o}hM660=7$k7IL1KJgQX<`xYX92cBWU|5E%19P-wjhb8+TZ zo;cH+?)`2yMDIEx65V#b`h}s&I>>3x3{SpzcKxbdva5Q#_4C1=X7#0l7T3{IA!7=S zE?*E%uO5|o7_U;9L|UV>Le#wCKC!Pv=Mn!pLoK2woL->BAt5`_K5J=kWJhxI_B=>~ zUi@TJ=6;WPCa#GF&a0L998{}&ucNgpW3T@uN)Gt5KuBBHFEDRC)M?C(`z)X@)n~F+ z)P3VcKwb1-bIjLO4E(P6TqrRP#U2LMRS)5kOwH{NS!0tY;8 zt@DV3O(1|x%!=1k7%dg#Z}tWOaZnt?8&ArQ8`&qU2tUp3FmHiSL3g&$>Bf~izZS6e zxQ9vA5+t6CE=vQDarB+{Ps^9WSBtw-+|x+H+)9rZCGMlV*f&^g=WGWB3jxU#SBs-a zufoIAzMy-r&uB2|X}YEoMGar~L(bh)_ZRgaxOYJT}?yCc`UHBrsg(-4=@RXAm zkq+8Q_Am6j7fQ{lAI6gteVcilWJJuD(1OadO$%?Cm#EcQ2WnVka6^%=FYxm%08=*G zB|C?{r7v3B#9*=_+XvFWJ+lL){=0HHZYUWH?%a_by-Qet04-8GrR6d$KLkqPk4&3n z*|LgD3TfT*hWPsTN28!)Lq~{VM)G>HwCFG>P?G8LI2E)ktn?KINgx60aIIptki`d( z!)g^Y#5-Yz)wlg-%#?HqdB$b@xvI<#&^P5NmQ$lLqK&3MLjCIt*SM2wrgL(?QJ(5{ zkibDBTL%NkD5~~TJueRt>b(9G9oyIfapVX8e9VxWrr$uDIc@>p7Gw0_aGC2pw-~#; zHOAob@mLe;&2EmX-*uA7B`g*8O#HWj&LEtBXGX(T;qJG>F#0>OxfEl*;a=FX9Ev0U zhIpYy_>52?56g_U#S(X*+GVUSQB5nmdqSpZF zM}=c+!1yYp#?k3(m^h&7K@ITMUZ26QjFom@Y3r^haiZM_#(aAM5}9oq;aMZ;JS6>z zM|J$VE|@+UIHy2Puand4Ap4C3tvvW`H`b~;!v{i_l1F`Ji(9^-#o@S^BOE@KsZJRu zrkvzEnf9)Iu~lLtxK(z`!1l7W|2l={Zm!Hxty2W4B0&t=#(&K;RIC< zK3oG-AFZh*}Yb`R;$ng=NzgZr7!%tT}t)|O_W~%So z>^V#vcELp#-5kiQ7!VaRWt>FL!1!oWCcaQRd!4z@+S^ zBSTAgD>?v7PKJ+_L1E0$xaK%zt489*z;7#r4UKVgv{QSFzKN6GRNLwCMX6Pq{_JW0 zx2%LG4+)*YJH*mpjf`*widLz*rN{x<3ti_71e(2;HfZzP*MM^io6Wt7c2E(Oq94UfA3uXs)b4>QaMM?$_l_md!?@spA{AnX9piyH?dc zt0)#<;9X)s7|Ld}VZj+^V4F#zz3 zow#^|{_8y&t+4Hfw54yIv!{OLryq(+T;p^QExXeM@EC_Cj}Y1~n#`e^X(VZ6XNwMM zGr!RZJ;&V_UNYbwv3TJU7C26%qpuSK3Ht2yrZEPjnnQ=5&#_oF*PJ=cr!R9{vfnVG zkO|$q+Rbtif6Rbgr_boGk=D(~J1}3en+Vx7yz{p$9>}={}>z3wJgeM%#{)(;+N@jeeN=`Kp<3XcI&| z4V4C|3w{Y`+wi0TjQf@_=`+yA5*57J+&Z4K8zp9quO%NWiizn8^5Zz87bo{G% zdt)U_(QL~4#nsF7>G)~_so<0l{0J)@Y$65&Dl+ia5rM**3&&?8ghv-w^&@%eh%o`k;0JwXrMp2QnZ+ z@}Ihon#+~YmGIR-(TsKzbOn?I%qBG{5mazE%RqiCJkiia+?eEP)mD@KD8Ac#31Fnp zcnd_t#Dnx$aMjjw8%xCS+&rR9!>5vsCp*?tutP?)!064`v>+9%DRzt=j=3ol)zQ56!V+PG9z& zL>jpFeje(Sw!GK~Py6bU5#}XBAOfpnPHwYND>|0ex)aGFk`BxQo8J41T6lk znX@=qZFF%yJvye4+v3Q@lu5s}N+v%wk)YQHkLXV1p2)^IB$#Ya^4T~UqVnDHqZM~` z@|in_mALuOj16--KJVx3z(jo##n)0xW)PK!lE^OANam8w!}XeQuR;xWPu=j~q&Ac} zjCrtGmLpZ?P}w}fmjfhj`9)JBwatIFxBQy_2vI()Uj5RM_hCGeaMi0T?A2Q-7^iHD zpEup7#>9x?>lUs;HN|q<@q=Vl=;-`CaN5v{7vVBV6;a;`L>I$1ChD!sn${OWaPI{C zzj)D^H&wwvR@-yIH)Go^SA%z?)7FIS+YS(EXY`x5w~!+nNHoBTwpBLX@dzD2Llg6H>ND0$Q> z5pI@eW%1^l*H(>lAWvC=Or>Hz@FcX7B!$$Pp8hnv=fGD$@-+KPfRrq>FiE#0DVGjo zUvvS3veJ9uv*E|u3wMLEXs4ghMxwQ<_Y$n8y>PNnDp)u z>X86wiYrLl-mlNAj4k(i(2QT;eE(d=Rg-&@L7n&gC5xY;c0(I9b8hUvu+t)cg+K)S zKZP90B)3$77Ty;|rMAb%AV>_fIC6G8&LZVu?f*7yi-2w>%S$^yJ4Anx-`A%pd`$*E zUc0fjYY3WPTXApTX`plRduv__&)%IW@c?@AoMqkLR79IC`~-6?QvVg*oz;~U4+$g6 zIMA8%4y-cWCi1_{vH$!joh5BZKp=LnC(@gQj{*d|qLVH|?woC+H}EKX$QbscTn!<2 zIaege@9(al`fv(1{TA5M^G-t*qPdOm>9^U~*>cxbzN{DI>M8IsY!2FD3|@K-ZcABw zKN9FC1j(cu9)Im)%7YZUyj^q3nFo4v_oxI91>H=4pJM-cBfk(&NjZ(}jydS;;WNg1 zEyFHq++!BHqi5;0jFgo6zH%1}ARIIq`S5oo_i$xf^z%qu2@K@Zl$ zMC;qCZ6CAGnbigOx@>*~v9=}lQL*)#A&Kww3ULMYRiz)E0$ zkmNj&L$jB2Q9?at zEctWTUPsDEN`^XSZ*Wm0^{$3u^v>;cXHB@7imB|B{Z?8+9*!`*kO z2SWa&7#9)qdQ2;J=`G)_O!2^Btl`3ioJ(W<_%f(}p`nk_=ZS&6E;1%pa+m%ynbW^n zCjFS|xOH9}v)+?pTU&***OTL^r9a)vi-d+I0mv3Ok=zH6d!Tk>BB^xuJi?H8f#QR7 z>QAEwDFa9D8|~QNbWiR;)m5E{iEdI78DdE&>T17KkpCwn^C*>QlKRZ+{buEt|M^Qz33l<6 z#O8m@^KVw`5(o0v3t``Ddi>23%ikj^3O9BM9;O-hEcyJ5u4#C~s-QC^YA-FrioijZ0{$DwD zKAozm$kgoYt$lZ2{p$^omK1uAfQggh|*?4q>23iQwA9thk}gYDLa z@WA|gt#ujv*E`%{)gU};o5U0tB>%pG5E_A;>(##x!$4Mvfg6~- zeI+3@CUB!3{#W;Zn&#dSF#2C@fCAMCyLa>pNYe=ZR~ul^B%R^@^L(2CaD&cM`_q4V zl4h~a@Sirr(1Ft|tWeMb!~gSIAJo*BHiv?eSS-J4IR6oFI4G{o5{sp?S~RWgk2gr! zvvg?+e;F<3WGJ7=(oy959R(N%2ZyusQEnww183(ZMx!ZVViJ<(1zSSJt1&*L{tCT8 z6TXAF^30|`JvbgL@*x2)pGSKc68P$0ZKbVGR7&bL(PBE1A|l}$&5r#p*GuF!2QxN% z_~z($`-P>tlQ|MK`*2A3G&`woEwu&Ls+`5D)e%WaxQ%yD_bbn$UU2W5X5graUz)B- z42->4WqZp9$u_*TEE_U1GC_RT8?E}|hJ$w}sWWYbr7ctp`a@;awYvS_y~*5A!=uK9 zvAJ@cEiWXlGIYPWhlle~QV67Lno2q3rKJzQfSzhfV6uNxhe?4aFCHFFWEMN$9xhyb z9eaF@xmIO&q7na5%k$xC(R_gVjmLbgxn^un-`AJvUJV7-%7f+1oIxmv1tIs7PETJL z5ueGWO0i;pUtbs=yX{=}aa!AJOf^CrX0xcYG{VqDyGubg6v+=rad<2iJhF0fq>PMH z(?k`xFKgw76d1PFq%XD$axftsdw+TfVzoMbS&PkPOVmB*h?{w(c?mz5K0%U#^xN-5 zfF;=$3op%bJ7H2r?~7_7#FEY+mn&JlNLpjHC0c(rKuvwPqv`fYy5e%3{BSl%Z*;A} z&+lFJWBh9ZlQCJWrfWR}0%2*kJD4esNJ%lfH`^IW9%#&-KV9q024)Do=548pynNJ= z)5Y4Nc`L0F7bT_0-StUn2?2+_^h}Wo$ulrL5C#Uh5zcG1=~?5_X2p(TN#0LR>t_@6W_0omwwGkaX&)p>+6j~E}G6qB5O<+MS4P6wOBcc1;X z9_>e!;*eRR_#<@JPo9eQu=cZRJX2^ehV~l=nYX(A<-u(2((@3*XrJfnw*%9DJl6zn z4>C38D|EHLtpX>n2%Tf-)T!R%e5ty39f+c=aJ<|Xy=Spl<#D*%%T>qk^&@c2v!!J_ z9-378XuU0t#bhL3?{59kPkM6Yeq4xv5?VQ&C=3t!C_y2LE4|^|uv~ewsB*+=;r-BC zj`$Cgy##KDRpJ7p@q^!0!hl)+2<&y=biM8t;WqWPi{r+Vfn>X7L!1GUP!3||W&6yf zvDI#O`1UW(N+!UxP|0L14}m3rpyDIitAr46YiQXC1$`h2<+{sD@96*ZMqcmG@#>Ir zFqWQPsjD2(9{!}^pah}tE8%^jd5huw^1N&@^LFLuwV&5Zc4FGv89cZO`l&8g@n_=W zFP0$?7+>?@H%>Yqq3wK7IfNaN27aMaPs#pTPI0+XaX2jRJuGQ&^w!Jq^n~Fpl2@n? z!JyZmS#drOE=HtT+Kug&))Wx?uAK3$&LVwDcFFfWj{Fz%4>6l%usQQtB9ZbAA7GXj zs|+(^E^%Y8%A9jXH81ZDN(b*Q4~}|Nl$G;(a^nl}a{;@e3ilpcR|&I8Ygg>&&nnep z{?^OwAWqxM_a8pCDZUd^J86AxnaqWE>5j->{Y(Jr98N%Li2@iwh~Kxx)86mZLeU$>^K z-vl>)KBZW4JSeWoChb~ZO(4^izR5{6ll^v0{n;GOk54!Vhk0aXYTh8Vm>(+!qxuG) zMuI3rl-{#U++i;}R*`K*CS(c%G>MQ3IWpVacy}!Qv(uFc?~JXoh)4hrMSErz z)?IKmXK%98QiDw;YW!l2DQc8Wl@!-4zgf$z{^Hhhvr{3dNHZ2w>K&KS!(z2DIeqIx z1z751@0MIwSM$|^vFqb~&qyQ>A#f5Pbg68`Q9)KIlJMEFb&i095iAV}cG?tU)IF2n=I+)DDp8vfJ~ctyoxHjnOm}JJK5t zLsdctfyWDemZSMfq1S1~aL~}<7`V7Hz+J2+;<6D263)-i7g*v8VHpc@7MeYroh1r3=M1|V z+J|sV4g2;z49mliiI}9aD$k#<(XqM^VmcW+5$R2ynZL0pR7JkSN1_Qg+!p;TCgqI^ zJ1-~2sb?%CwzwL^CnFQSkWq>ib9^yqx?p4B^G1lFmsFuQNlh91XU(gLzZx^b&vJ+8 zk&6W`KHJ6idiHDE7ioJ1*aUo@4caF$z$SQ;M$@<4WT-)v3%1P@)s{@OSI7XyTmn;4 zpec z9t6CGI!V9>u~DSKf`Y~MLAABrLpl)%Le-AC_;y)-=(R;)peSP{s3c8vim5?mpuZ`r zE?}u#axq^XMBb6lmX8@G`TgPM&sM`P*p$A#EG%0>Z3CirDOH1XjYBnJNZKD7uaX?5 zf)b>J*5v0SO32UKoB-=GM8dPCi*H=B`3hETUcoO}@ea=%eG9q6;_hNkr*?WJnTYtH zdLxA6$d$w=k~xyvkvQph|H@|H}-OBcedU3FeUKhSx7v{b*@5qO_(ER6s&aY?U6F$0UZ3h$dMQPK09&bys@ zn_FNj-@v9319*pdSil@HbtV_jAVVv8qc-@i>cM_8_DxoYN48^x^iI9OB6)(dWSM5_ zP2!ps@!d7<0L2ep8VnFAVOV{s2M1|jDZxhY{WkK%T;}kH*uM+60Q>T|bzu^nF2n=7 z9!+xkF?jV6vdRv4AQb@zgdk=omczb>H^%%++}Y;DT{e|AUs5jfIk_KO(ol_gU+@;y^fno3B0yKHnhXWwB}jtdPV zZ+KlX@qv2)vo8M&N6ZSo&6W{f+~wEnO`W?P?q@@#ci;B$7#!H~YXWgr-*-r|z$;#m z6BB>^#P?P9W9M1E&Y4*66ARfafAU@CH;b@}jdE(ip`Eo(zpabC$(n*cG>0p#D_4~p z9wRKsbqyKg{wtU>A}|o?6ez$i{-r7aIF#Ppb$t$5%(%#b47(RG$;rhN2=cZ+N=S6= zJuFI8^?Y?h{UPP2uV z4^pR>t-7OT?;Tk@--{EjEcitH2Av=O#fDr10l@7OZn>WZWHrR%Yh{R#>-PW<`!X&q z2??Kr%@@;!O-IIABhH{gDWeR zEbi`cH=?Res20O_j&8Fo3iboGt|6bnCpG>T$K15uWvW|o&zz4Px zayD6RQb}MoKfb`H3FJH;n|hI(ur8k?(c90NqK$A z+ecJ~eXV2gA*P*BJR%~3>Nn-K)J+@0I!5_7pizn{`&bh!F4Xfy_u zyvwNEpNU?o==L_d)YEf9NlR<*tqmG_02*dsb)9ln7Jm^bDKSQ%Bhja#RZ`-$;RF$) z5woe(*$-%tnn|3&lFXE}JPJ_AuZXlh;YDiYYn8^55&;2`@fH`b<+y-0wVZ9~5+@2u z%d3^n6loR*_>kIvf;)Ex801f+LZLA&A$XPdYHGH$S(12^H&@?moTPtN!S;4VwFn6< zy7n0KC675?Mtv*?#rea7e4zsT!_vw=f)J2m)5fGxoDo4X^zGCv%ATHHVgeDBUd4l5 zQTyK01q%~gm;`JPA`ua{!0ONA`t9ueYV(7#WV;s45E8yixd1kJUigLw=kiqpiBoip z7=(&GVUY2HJg8^2js+AF9GzBScUk#lZBpbCUspHrXC_RBWg6N3i`ZZDglrExad#CR z(dqY$L7U)CiOR5##C2B2JI1rS+ARKijz5IVme85d-qR~u+QXvaoUlDpep|v*N%Y=? zdgBm#z<8Dx17teE@9=A?S#LJlPgF@Tfa;N{xK+=k62zeH#s@~t-s}pH<;`(ux_E52 zc~B!;kiSo;vnD&MvRZJG4ueR<6I(V|GY>l^P$Ufz58M8|QeaZrT53$Q*Uq-U3sw2a zHx^WaxZMGrqmIz>7qgmR+Zh#6wK<3Iq=2{r?_kMOWvSGd)h_--`^rMio#KKi>#brH zT2zBC>DeGU3EX{Gu8UAN3jF?1?&IPa^%B31em!{_;eQrtH0s=JNpe<+2CbGss)Rfo zxbFj-e8&i<%daw^{UenO4p+AkdbznwMggaesQw&(&M! zGMfEXZsw?d30J?EFRLBfs*Mf}$AOyH{XL0;ER;st$;75wYmdn#}<%iQa5UX~AJ=3sjSA zv(vTWG7UE91{-h@Hw?NW5~&T<8-&$qre#>LkdkP_c+ybZk>{WOke^Ki()ZOAe>?KM zo<24|a^;E(C&I$QvieA({uxo9MIbyOf=a!?Cbq7QMNURfPp`kU^&w+n>dx;8pCc~4 zWW%XhVSD&<^@d3h>`!3K7wTlb%E|V16T-wy=DzlOm`UbN?dT`$DT;xGiD_$jyw_Xr z3aB`sb}E%ftT~FR%{Ohdg=Vnz&z0hyr_(%HAf2CA`AdV?$c52g#r4-j`3@t#kz2Ug z1rj-#mD(Rk6k5~eFstE#@!$Zx6cDv5RVbk_!yJascjq`2jPjpgmpvPhVz@i9{cqBmDVB>lw=_RFNgS0xc zjt}S2Bzrc1Pp>}1R#=}wm|JxT4`a@whi3+a zkydYGg?s|9PGb2;1cfJxRDie%Wm5to9SDzxf!?1#$y)uR>-ar-<0%{>#V{l&>NOyU z+LXe?ka-w^zgyTdK{r@9ji28?U2+o7+D?eo(}uD(kd&h ztZ(egL-Uz~wSIq(-IqKq9u{7nap5)^7*Uqy%NXwzOJ5a0xwwg2DkIb?_z59_45qYGu>xY<4l7Er4K36-wU5D02~wSE0!tdt2+z$M z%x1_WvQK$7J8kv3T#HOhJ{}z;&<-xuF~;|XY!q5 zsTD$f-(eBI#iYAPUoNNi5dHqF7c0_B zuw~-2wdQ*>j*$98w@V&^gY*+Sp+&9Pavh;9n~ZSxaoDSBLz4W1nNL0aJY%n&uKLFN zBS|d>&>WNAZ4MQPw4K1yXttdB8}fKzO|h^r!EH?r=epg$&?ib< zoE*&h-C#`Zb3(|8=md#7I>k*RgQvM*W;|i$-EOxc8XT*=ldUC>UVWeNKX1JgrJe4+ zILNh~i8md>HTz;&BLpN0l&gM(b6ifzMI!N~K8+tyqVRs@S7m{YR)PGogeq|L*fEXm@waA_qX)DISU zr%L-)c5k+pK2-Bh7?HVkusbQW*hGU}Yy+TMeo|83T9BaG`r`3G-x{UEU?2+EIIvue zN6(RHf#oYs$!YiAZz65NFbNi-K<426NsGCz9JD&KXJ;&}LrTEMSB^0*Iu;Mcg$v3rtQO@C1hm{!dB~Dp#H%@IUI?kc_mF3 z(xb&%%qY7e)LGY?wKuG-CLF3*#m$uihzvcWce8+GxaIklu1w(?(BkZag_bcj`~K8h z?^&K<%P8{0Kz{{%L3Y%9Q0||N-a&5=YLgtLF%dL5>@OtTu%L9l?Dh&h@OVS@?1$mF zPkb-3ZhLz>kg1^&3dN5uEBi?L%i{fsJ+HFiY_+jHJsllJU^g)ksnt~6jqv(U=gZ}G zJgwi~+}K|>lZBktmz7C><>Nbojl6Pkb#2*y>)LWuv$zS!@c}Xtgrm^&oDVr{hVhZDV4g2fqK@KtG1JU~!nsEQO^bm3{Amqb>jhT*c zZ$%wTRbVzacE1T>;^B`FD~D|n23mrFzMPSlK z2p@Yg9jc^Te11nuO$Pl;?ye>YX!G?$DeRiERCA(-&zX#_u%E`m!Kmnl&U_~6il`KG zl|msw1oZt<97N$qi?c=;f`};&NkLhI4I*u?VcsC&N4eZz-I}bP?j5T2(^LG^g3!pK z06pQt(MRrku|AS1H2rTegg3Ke6UKFmO-RglBw0sDl#I51TaR&lT^(URiLzU9Q5xYm z7vG!64m|RCfV6y{XLX23)Leb5{%4U3&^mgjh+Yc;$ppshN$X{co5OL#NyzQE5NU;u z(6jroU-QL%BtAlvkqOD9X>zA1u??KNy?z`vKdS8yP#t;w)5UO8f5g-fk;eNfg+B*-qETV082? z%vaLo%z&L)@*{}J8>BlA!p!C=|ld=j0{%x)n&@BHbFe;=Gj7)yPVLF_Yv-xpw&%F34WKWd4q`XaIHc-k1hDm=HnSYYMWb` z7-wg1A%PwJ8}ZI(0llQ$^hzm?M&Z3Sr-Cblr|oYD`ha=Y-RKFn#cQ}(56pIZezLlp zR+`DQo zNiWWXclTOHpTeThi4P6 z!srinqbQ$*K(VJByTZbdeR4Oo?hT zUCg@aLjkcL&_w4mWtlCV9Ysa;^&6z1m}t>F+!|cE{KympFIDDth2l~Y1qAG?9t#nO z{8{S+FSfSAvInS_W1tC-pR|ezH#>r5TU%y(I}siu+kkUI_h_MdAL$%O@)_aq1Nr|f z^OY7+9-G~<_RRM81YBx_DR63>(%VB$W0`D5=Mo%+bY%wI^z>5M;C5pmn{DgmW8vxU z8CG!2{ar0P?s0@Y;`ZTuAIb4&DE{BOkK$$9mnEj;bSGWp|Bq!fY$5+;7t8Ce;keH9Q zMN{>USFQBlXpCZC;r}pzgH}EX7lLGI@v*Wv*HL-i%Y-JY6kDC%9lYMd{7zY@C}JOo zVJ68O_=e5Hbg`y?f4Xq1`Fgn$hpI`ML@+3f%jK@lhX+;CbfG4abTFpThW>3V_{U1l zMbzCt2eUsZ56JocsP+p=_Vri5L7@2KJ?n$1@nd(la<(*f==nj&r{udA+VBS$mWAzI zZ%_+;k|do5O|GV+h>UOohsyd-;sL~9$-^{`V=pE^26iwi3qG}oFME+}_BnSSG{-GE z+wo>CwnRauV{J3ufZAEVTXs}QET&O6woPUu9*;bc<^eYD-rK9m=AS|ZK#R?$=avHS z@+bTL3&o3_)QC^^Wk<664K@RVdNFuh&dPx3ELE9MIBx6d=?P(BnLCQjy-ue!$FYSe zR;d)!aNHy99~!DsSgA4n7SCo=FsL#>37m%v%V0?$51m@1*{Tutfr*A@=(xGSSiz-q zI+uRunA85rkofiM*8^gWI=zVVl?I`B;4G##!4M#msToF%KyR*km1r)y9Y`;l+;|)j zx7^&U!gMf!LNV8<6tZlmnQyVbvhwo_T`vnBKC@)mYh*FCV5-yQB(*Bx(^ZIMT-q7S zUU$s4<$!9ns3gZ9Z|olr63mpbIhoP8>|}<`j`@Rt4%0V+Zg=r?Z~94bS1h^lBqk3L z4}y8iusx!1-KF7DJ(bKX<nEWu?3OSjSnzq7=dHVk4B(6j!SJPQ0 z+J}f+$c22FjznJyRh(u1u^Us9F#|LckZtfLT@epuTN%@P5}18bq1Xlz2!x-O9*KL}ZnfDFg%r_SK#C+B$7eSI~xq$CapSqIL`^MFxgMiw{J^UyzQ z35sz2PT}Nlxi?hCj%NpcY*nQ)Y59H$cYXZ~)n&I}Q;hqhc{NDiN?{4xX(JKD1OQfu zcx+i+vv_vDiG;0G)u|rq5uF7c)l zu7K7p3(~zw`SEIzQ=jG(pPsXo=WCyY&btYd$jL6 z6$^e-QGtm+RWPvIYM0$Kn5@ei3jaMGiq(jU0f9VpY+*@WeNFxm|3>kBLYNhre2Dhj zuj0=g1Y*6$3!~^R2HTv|=|Oxg&04ui#Sw=#2coD9h#yvisZ4DM@qUq+Sa6`prnOa( zsvdE2axT@t=078*uAz8_jq?ZFwq7wj3G#^VH7lv&^%=kUvw27WXCGrh6a7ShCP;^e z`E(IU=&K4Mr?1 z8ShuTnK-G(`;~}#ZJ*+W9K8~G{&51ptyk|oW;MMXE=RKL^b?zSye=HH*uleQm38S+ z{qYdYtk}no`twwmckH!*P+aX^z3LmfhY%6=3|D6r~Rp5 zH-$ByErq&Vrp{++X0?qQ0)!rqWLt&T)N(8e&&+-u`FHOg)8Py<5Mf@4eR~0B z*hE1YUm5HZ!tCBdZ(PSd4(A=SNVVP-5b|(5Z6}i2uD0zf_TEzyx(^%qoz!(+`$THg zeW#iG6lSq#3U(b~HM$*4NTpW+G$ZoC_1LOEUyFFEe#BIpB`4Y6tjR)8wKOx@U#V<0 ze;un8EvmOI#-r+zBZ?6r(DD8N=_28s-#M8lIhEDgd*?DiP~?A@0f|mqx(QdcL>@&q^T&|+yK!Q(S!O2O(y}6B7;I`)|?z>%*%ZXg#w$^;02JTUv=!THmFJ%y~_lDY0)ezIw z^KU7z{oDxNcQVCHb;TO9%vBd~>A!!lX0;mS+&4zYeHx&SzG64n5M)*4eTYE7UTKQQ z>NVagJ@k0>CQ7>FZEx{zvFqcV)lu~caWPUwCGSqx?0_HXKxEom~ADZ~!J^8G}>_d>#@Mf|CekuLhTdZY-GrTri{EWr_ z2Jv2ibyze6U%_V6Y(4{~v3$t433IL*6AdHhvoi@k!ZHobPPABCy-7TOjp;!YCUEl$ zX#Gff{!H!nSbD@}N6h+*yf;bTO7oigDep{@$dO>XZu7?#e>vwl==GadjTV{*;)RPw72-V-{w5IZ`*l!B<{kTd7$Bh-In;(|lK;Lt5KD%w$bZCLF zKYm{a^v4a`E>mP9uA)7IbU0?AC}L>m|zaJ0r4g zYhPp6)03uiNl)bmeQ>_yamZJ-LEKr0Ay_`j9nV>K@7Y%w+r6_{8G->jR0X(Rqhp8I zA-JsC_iu&(RMc>m!idrPC#qIQsD2%pRYpl8|BOYee@u>VIq!nYMLtO(+38ru zf%S)n$SM$Pq3QYw4tX!rwcKdiaJ>5cbfYF06RSy~!g|4?;K%JFXR{mcbOPX!CaB`7 zzrh^MH|kybAb2ubzwmm_!Um=XoyOBebJ7B#P|?X1d397Vc|j;v>~JFr3m>`p-A~dq znM6M%_3p1ciG1@ZXeDUKr9>^XGrV(h3WipeKPLJ3se6|Rp0rS>Q&KMH#n{{sLLVuk z{wxMUKB_MnIo%QXavH84x?2y)m;-N6?wn%*?#A`kCcq9x0$3^t@nHHL<*U<%{er@G zOYe7R*;s6gPJ0PR3&b#OvOWO5xWN7OAQA37Mj1Z2P92uU!FT{Fsonk6gqob5Sg`4UmC=-D*g_KH-Gej>7_1pJjKoqW^OwDl zX8QI9HLwv1K~OOD4zDAU#X=)<)p&rO!UUT^_I`o<)$tN~Y} zxi;?`99#~9=TNtsJqqX6#co4Gt5aVbQBFxcgZYGcV+3;YDydR6N^%tW5_am*ArY-_ zkTjGJ?6yVxLcM*jDz}k;`N3`v$CKi$%%|~B;+QwBY0)+X!xoUaJ9`P2qK@YTFZ4?Q zOZvkGMO7E297R$P@1cINVxjIk?5v77p(-=lJ`719B}AA;tDbiBmz+91cPNeecpd$v zHyDeyv2|HsZIc=m-1gXpq8^j^qAm4`hTb52hs}9ce6z~S%JTC^es4ZpNDYuw)Jph7 zwD)*lSQr=9fUXN>-rD<4Qk?*xt#&y;>Wv*&MQj>2g!>|lu%1D3K$wD^yn4S;xkDYM z+Yhn7z1>TUQxZ+`c>UbfA&Y3vZ7;iEoy~4JAMtZeIFAG}fJz}=`bg1)%_pgTt~SXZQO?HVOWQov5+Ps|jsQ9feJD~KKNyowqOl66iwz?M ze}-?4tlZ*UKa|R?sZf2nF$$rh8^M{LX<&D0hTK+L&MfDl#HB#ZeUf$>@5e-c+qIF`E%k zDxEQgh||0$^X0&{SM#zvC5`V4Z9BX{12$eNPXRvh3HXwW5R)s>F!eiyrck~{|MIDkI}yvzoV9E*b= ziE(~H+n?L>>H2&vgQRrHq|=%;Z!bXYRsdqj&Bl`!R9xm)GG4J7hpVR>o141rPR)*& zCfW$OBs_imLPFFsnE`}Hcp66=y`hD2Xr5kP(L58a*k4R2kTtZlv_vU*k$wF9sEK?X zE%CSyH2a=X5)ujqGFp*1PI;=!3H7A!+@T{_EVYAibHvx=k0^CtL%zQQ1kw|WY+$B^ zW@MS5C0N#0Z@ZjMitpf<#dsKrI@!EK$dXns$2E`{Zh{UESVpCu@whsvU;oJj;Wiy` zpRBZM0?fL~v6BlHbEApypN2-c>-`}$3IJG z^`^-|yMI`rygWcp$QC8)8uXo&u~>>gB!Nvq`_20^*H;Qt!l8e&1$Baeh~WQi?y7&M zWT{Jb3;st(zU4;G+3n6|pHLSIOz+MJx-qYZH-x-U4+5C-GTelj%3R2j8@Es7Cw~=c z&<(lA3rRpGnM9G?@H6_8j`{T{?+s5W`QI7#oESoY3U(pbMQH?}lqd@-8hO8A9i=aT zsf$U%6*-uZ0Dy-~i_dVMj7g!N$-)w2>F4WjpE&)o4>?i4&k#PS&RqOqwa(hy6aj^V z#yTQ#$`1;fFIyDf-d42Ak>|f%W3(@GcpqEcmCZI8Udao#2zBq)j@BV(SZWN&Hib27 z(u!M6TY0G}0m@K>ePQ2`K&|Q0QiiX3!&WP#=)Z$DmI*&>J^U<^z@@$pY%-4_Rgtz8 zknYx|2w=!tkunAER$7Jy3YLJtIxyx{t{_{7;H#9jPq^cHb2j8Obl4h0 zd*}d>rwm+gH-HdutlTeD0S6_Y?}y*AxBK<-27fUA)x+AyuPxmJx-mvdTEljq>K?sO3X>-cJawDa!C{?8AtdbYScYZzg3{A6jCG=rM9ol;*&_bO6pUI=DaiGux2 zZm~P_E!qOXcsZ$~-^JN%DP23MCp5MWh2(d`D2@-1Sb`In;e~+%?*a6^-z+pGw|du42a8$604jh`Ea4EhzKI4I1x0e^Th#+&@+ zgWIG;50fnK@IUO*D!vDk@l4-lg{~vK1^$-@xPEGipQRD1YHS=F;`?3QQ`#`N4Jxo9 zfHcdSo{lC-2DP$Mo;g*2F8RX!c+VUr0>sTE_VN2iuYrz=8J|P z+)-SM{jwq`lf;Ea;6&OOV1NRoNOWq&yu?_b&Sa_aIxI4`lC!WV<~KSH9_6@9c(ra7 zuD%4S^d3AQrhWJ%Ihiy4(+;NL@li>Q2$U%(^7Z|uex?P&u27)YTL_GD1)NC&EPaDjDtI7y@ztB(02Mi+ z*uHkTJxWgX3WU4+TV>t6Nd^eN7|PIh6y*LHuxn71GEGEeBK>W(MJ-;4LOiowjB~Sb zEX;jRg3UyWDGmsz|o*#1i5b?5}G6Z~UHj1ne z$p5kqA48x3xE{@0J`D#JwMkqomcszY$jGR`8Bpt9T>%w`;i;8G&?eD+L z-;6jW`ibBya$YY}>+}Yt|3HZ9zv{G1vcHfi*VP9cN@BZdEDpCf4K|T=?Oxd~?fWr%Yj-g|@+MKt3dEbM6x;Unx|mq|SF}L)LJqQEaGsR&>4j+EL9fmsys@K{Vt# z+#LzrHg5}*_b3f2guw8vsN1I#W~{HTZ#A7Kh>02%AQY4gNw1i!zR~WS4M1KgEd3$^Ju^+6&>`jW*+A{;%l;ia=EuSA^_V=^) zow6F+z0D3E*`#nHB1+1ADAfc@*a^q0s5t0X5X%Sq4eb0h{eS1i52_0lZ17clt(nS8 z+Hi5Co*5zIh2bbH`0CPnRy`g4i+oBhK5r76w}i@{q2c|Fo834$)H}IWa zl~&VKA|fKu5nwSx4T{2Q8ty2CiOGppht|CyPV>BeAKf6c8AJ!R7UQx z@>8``13cMi7Yr|tklW_g!txb-iU2KPqM#U`33VpgS>z4-;N6x_DwegHXbx%88^jK1 z{(~y6v8S(kvJ9RC&R)s*_2|3b`Q9D*&#Dcz zUy#||5_MzUYhoWcQ^yGw#lI^-JK>XDx6u2S8Wc$Ni>&XDe5*Tu2}D`!Ph<<8>B+GC z0sYNj5<=4>_c=b=^t)cG2OR&d7aWm3??8UY`E~L1NP6~*I_vlqQrxn^KeP8zy6V9L z347d5jHuQR=D*%21=TKN`}1AIf67MsKp2k4F1i%YAaR2F->F7IaNm5L%x6H{)-J6V zTsLoFMfTfe&=DnIktSZOjcusZk6$MLU*FK^P?_X>r4rEksY2Aqgsm^FpFzFa;wQ@*4CNuxb{eA}?u8v&o6AnMM03p1s zbjmkgpeD|^U|7CPtv0^1$|L7c<1eI#@IzSYF3`~+ym}`$MH(NE@KTEvFDo$6=eZV3 zqFu-_0)<5al>d}8|K#3q{PXk(22xQ}N>*oC`?Up?0G99L0~4Bf7ZiI{U#DF3`gQhE zCRbE1dy5b!xumd#c+j1s`sgSo=)HmqPqJruM(A$jj8dTjje`StIzMtsv#ReRH$cqh zzY)^ns~3f(znGYnXjoabhX>{8#avs!uaQwibypS0KEtkOhztKX5oOus4^p7F!0nQe zo*z{dYR}^BX-;sT@Um+0+|Q4tjd=q zarNc~3LD*T-4qB4m&pe1{s!S6LZGybCrTPbFL8K#$CnyJ!m)o4D`}^}JC}paoZln19F3&Fg zs*N>G?xLa*T>#OSV|RkBPIty>(R01~f*daAriua_qF6y>{il{U*|Y$%Es${Z_iqB+ zyoaV)%Dz6x1v3g>?-BtfWPx@=K+U;)Dw$R85hXXCZ?#X|PGmU*ub|u)P$K~-E&FDS zs9e{e5R%Xb3Atg+4DRUv(NUE8*R%l$VcsjJMq7o)lIJNo@O*jZnQfBeF)QWW2GXMa zKD~y?0D*Dqt6d*NK4ePEpPT+@k57%dbm|4(CVDf#NI)R0^KXll)LsJ~jJ*o$-yP)! z^Mu`ad_EBX=v*K%z{B)T_i99=SygfuF;*h!BQIcm=L!7wDqTiyS6{JyacK~H^s1$7 z(i0;!Lp7`KHbibEWspN?xt9!BUhni%usy3=>*PeZ$+fUDR8{u>tKDf|P};&N?@OCx z-?P4$s|FeckDj#6T^UxZ>m}KnMe}{x=G${p{Wf<5l(jl^8fq;NG9{wg*)L@pDrvky zxjOoF&$p2M0cx?);&YinfPcvb)G>tzCH}=4@OVYG*g8LZ2pJ0zZ}4Fs=e6+iPO1`% zDEzd4AYH`i_V6?~dBFAfdme+`fnvYIwfZ7BwM7_!LBPx=af=Yjq~C%)IDao!m}Dpp zG{F0!Mi)ThgnW9ui^lmpfZMWxNcqAfs1Fn?B){SKr`i*M29;*}}vY(9x4pKqRBZcZ}o&h{oR|GMS^Uo33sDH2C?kF@LWBWul zb}6U)UEi?^YRioBO|~YEVAS#LdXInjbLf;950_2;bqF7XQMI*YibS_OPu+q1>2_39 zMJt~2wx67|!o}%Z{kqqit+6~APutU;`5_A5$^;F zx|M|Q0<4d|?oZ2ccXmcLYk9#635iWy>>1&T;)dWAav1+15zNE$35j{0RIEYlIIJS4)m|L5zPuX0|k9P+Mc1##2 zEA+sTZLDN)bGt(wjy;|3g-tIjYQJ9(gfBOmZis&uw6p=ro1tNZ2F7d@i)BXF%1=2T zg-w%Xn%*3VN>n-(e=jR!i5EEKaJ5}Dh$m~)WI?yE5|BDY#h;(FS#3lSmz^uoa&rq$ z2(iaprRAf*LIU?!`3DBhSMju|Q86pWKCupf0r-i&<_l@d%_nvk*pxQvn zF!5lPx)jxW9>6Q7tEZS(qB?7r6zNuRj=qYsC4e(RIWp*&=UbXPGA%`cQt&D+mqRlt zjXLR!_U`_Ol*Uou#Q*QJjb@-L?5Hv%`#@VOyzX+Xz@iTulxuU4(4JWrLavnw4KD8`B*C;C;k znaymUvF?jV+pJqoF`h-MKo*2gYh%``-p#0Wy$ImareeX!B0m`Jhncr-_>l4!*K8(i z`~RO_uKks#x6-e>LoJbQ24_x?!} z*?p)unc2MhVDe^!ja^DdTkDFGJ~>g>Rn`(gixt8C-4OuQVs0fam%+hEc0PPG9{j1j zVX>Mc%e>PVlGG=PZ0$?*;eq+Y}loWDKZW-YBFL zD6*RBnfaLHMu<<&u{R-)L3L;<*L#v> zXwa5iJYRccq@-Vo7?Sn!>hs|`wj}-yDHJCzX?m(sJt(DgOMuA%ma3RjGh9mcJ$i7a z1oE;n&&?{-AI`mh(y$FOq&Hb#+?JO3&bNLb?sesM=T1aswMhPL?i9)5Kupy8jWt@* zB!&#;YOi%m&!ywrn4+bdH837P{j>9e6XEg}?c|9D`xmDk2974+QEptbY|$0|)3Rs0QJT5DL9-Qlm7ufU!1M-0 zd54h<2oSQ(-P-HJ<(?XhCIVoc%d&l6LfthCIRKKiOt|QD_gyf}Ce#-a{?hH$lhayf z_Dy=+aqeYn^9+_}zEFTI=ex}UpmKs20;rC|GPfV~WzM}LTD)lJN&VZ3da~@HmduE* zI+7lZEkh`N*^V7KGS)Kviml2041r9T9Cn(R`&C}E z_q)*MOD@q5bN7`AYCX4?8L2>$3PAdzVGOx2GbeJv{w}B`=&K$vtGB>;qo&p4v7OIw z>hJXj+OfFd%ES3^al+kbFy9K|%h6}et1NG9xouS8`mG>EY1T#&>pB>lHwz06-vDZ` zFjHDeajT;bZB#0PgZhx(As;tdN~yq&&m#~`c_i4;v2ed1tgNO>js;zJaSvNfE-Y}Cy?@>EJ#QgW0zxmJ z2i_S4(X9Gc|4>M!Sh@GkcA+bJg@4S86{66S=SW6gQSDX!^!vS8-s+0C0Bc@Bu!GR! zlBOw}owqhYb&0iEJs3LoZpHCIx)lVg=BA1NCU%$N*A4H;^0IHGctr#X{n1j$3L|_kSBPgADne{uc!_7=^tjON{yjVmV|Q*SP4)4 z_7pXp6dvCq10>Yap(el2?s2TVofBY)n7@;D=u`OA?u&Rs2B=ZTNz{_|r@{hAFyrxV zRLMiriy3?H6F8F!fz?&xumT98gYAg8=&u9l--;RE%as&rge8Z`4S$_hAh|Jdj literal 0 HcmV?d00001 diff --git a/docs/core_docs/static/img/retriever_full_docs.png b/docs/core_docs/static/img/retriever_full_docs.png new file mode 100644 index 0000000000000000000000000000000000000000..a50ef823f5fc0ab3ab0c71aa197daddbfdbf4e21 GIT binary patch literal 127464 zcmaHT1yq$=*EJxG5^^X7B~%&->FyK`jdX}eqjZ-D(ke*9k?xdk5Rg1{mvndMzmNC6 z@A&`!8-8P)>pdv&JkQ>1&$ZT^bDd96Md^Fk53rGtknYLKNT?top{XGuq1?qp2Y>Ux zi=$ zSwe};;O7@ajdC)pytQVlsrHj4h?;>|gy8ctxm|oQtnVL=t}N#|C=5QzvYUU6>lCsy zeHvGNT>D(v=|qs0nxCKlAvOU%^50*+9#|4G(B7EO4}yF!|Nf$)6jjIb`(M5R^GsI6 zFE<4?tbi&Z^4Hgs>zCoQQcix3=jDl3>1(VS zr|n<^@9T24QJAdc-=9z}S;QB0q^Czg*L}O(<#M}{&1RbCXnTr;g5OEP!NH-k=!J}# zjFOV{`N4AUMm)D|IH%<(>bjh`_p4f`ZLtFFaxrh3N#DQRp~$cMbdfMVvp#Z%%~9r# zH+dQb!CcmpFz}F7OJC%BE(r(-sBqF@I0p4@oZ$ioGDFNPEVxsZw$In_Ep{<{OH4Wz zKG7yOUG6qUbU$yLj(B82{nveHQ-X>3Z5*u+y_U-=<_P0GK+;6&rH$UsX zdu$bv7-dj#rc~>^I~&Vpf*vk-qQ#_NS9M0)AN2Pa#z1aF8!=1qBu5@0FI~8mE!s1ox~{&<~h+3()5wNRn!qakraTJ6-oZ6O`uo(?qWh5482F8JC+ zSr&nkeTeYQ*&Q0`@SnDg*KgwcJ8L}7-%ChJ2Cy@xSPI`>9qx~48pNH_J-WRA0_WnNIq3JoYB{N598V@vhc!^CPeQO6-2J+Xj8; zBf*Q}k~mGr&CvoGIXQORt{TV9i{(+hN*juXvjv>3@sie{0Z3_{W|6*?*>^9HH z2TFedTS`sDgB`BIrC0lsdPQiYLPY|wiCB$VP~lBs4>;NV#`}`^@oM7*-z`W?dmQ!# zG8^wqS4)MyevBR9xG~I@YNV^HTZog)Zi=minfq?B9rm^4{c_j&qusO!p_I8V@{zQzv8&f z6X_;4?~fJgS$^mSnK4CN)hf%Sk34I~FgNg3*_*gBhRk1T?H8-l28e()DHioev zi)61|hF)@$iMn2%Z10W4*dMJ8v?dbb#I?1z$2T~T&bh88t!!>>SvA6y?)~kIO2Bxq z&X}`n0{Hc-S4Ld<)IriYCg1#CHCkb<^A*>pMXN2C$i5EVB3d_SjiaGVnLaPqLAwgW z`Rq4UYT6xYGC~1Co(Uf=YP^}OlC%$*sdY{RDeRFmT~ay)p939!8MszWMvo z%FMs5(#47E*H>JV_SQZ&!J z06sh zO3xa|+^Vzp-wF}m+%an)=vnA+x8^lC^u z7#EY>ci2#v;eosc>!sz@rg1pmY*d3=Ai89b-hx=U4i0!{jceRN(-hX`j{M1J^~Ub2 z+@!zcBp*x&A}^}z!z)v#%E$CW#`N8LIfRqK^6iwep81c}P0V8ZR8ZOHs0f&hd&Hdx zo;c!!d(2>G?u3=zn;OKDH`;F_F?f+m=`TiwCeANp|BgL>u@u(S$Gb|th@BBDH%2ch z`jD$A+r39JgqX~Xq38xc|G)|}c52;0Ut-y;Xa{rAoW1aCxzhq7Y0qj~dVLkG?V+p) zv+T{n+P=m6vlZ3}WN?Z^f3yNF+gU696@n&)<_XCK50=S#(R*lOX)Uz1qm zLqxHFs0y~q;9QG)93eE-{$n+-Af3VHkl*9&o^8q1LaqfV*XuG4tKz1~gS06g>>aVQ zEzEO2{1}R=h0o^wrJB;KI$=DqQBkPal^`7RTno(L$uOSvZI`3f&&86lkG#%|l$04y zOmMYo45~?-`!hN%uWdx5Qww{q*9Fatjj1h0^Ud{F^j{@*uS6d&4<41j$uuaoG1^3R ztV|nyXMoztU{HY{^n#Or&{&|Lkt$XXYqLwj?tX|5Tn7 zg5X{ojKZSfyEcIgNlg{~L>;O9w{YRx`O`V?Q=X{9shC-kzx6e~E%=~xaa=_+rJ~WO zKgA`sOVv3FCV^Dg9i4XJfyn0Rl^l6)} zS(cBCCNmCyxz)trkF5Dlfvnkg^G(RApY7i{Pp-$1I*b$EL5(UZNDO zanPx8I3l%~WNOa8veVlsE;InC$CyWztSaQK=hc3P)RSAVqYAU?=tWaQvX&H;3J*oT zM_CJBAE|zI`))pzxi(wRkB;}qDt4Swe8tXmk4(GRP&B+ON2jt-_D<7kpy!Mz^6ZMO zBTk_}#K(IN<1-bmn_P+h8c0wT)GJ~&*WSrs6Pv8mqJWb3f;bSi@oy-kXi|Jw1$Vw0 zKQ6N7sLUbSj}43pm+|txIbU`ExDsu*vin*~nM;^5F@{%B=f~U621B}YmALBqj#*W* z(#%QeT&J}5L8VO)z3Q17M~=mDflQ{+*OHpVPN}57HqRhPuR4=8$G15>R=#93$+8W5 zCZ(7|)>yZvt2w%&8CUI-9J<*f@`=7zYt;C~eQah9pX}?A?`8v-^lCs|z9V~EJaG2| z7NG(+_D`-EWJ&oN3u_zAWX=F1@0Q*Vcc)$7Or)eTx9K<+YVa4M6~ICS4sqLwgvQ;Aql&zDomW`XWgj+&-8TBYdtXstUxJ2Q6>~+`1D%vS!d2bG*!W;7_gzw{ zAz6TNaL24s3Ymdl{nV$fYWnfRQ0svb-#Zu!NzSvK&i5?--cCzWfuz+XyVkelxJ9Dx zg4`O22Y1%5Sd4ZFHiHb1rg+~}gG`n7Wr1uel#FAg_$sF7UKiux-3pyXFF}nWeQWZB zyZY;^oUX)r3+nGz&eQ#)sh@_>5OLC}vCsJMqakie+#HX| zx=jXYSKhfp%onn;;aX?g#h?t!*afBzm`G=XlP-UyBV1+p5<y+)B39?@;X$W66|X zac_KhRjO@?InsD+0_>eJGDCTGsup`5to9~2k84$9O}}n#F?gRveEdn%`!6BldxZ&+ zfi4^NTz8Vk-=N!nb~Dg4n^g-=a}ZI1{!o-RFgd8&wbq&XpTXLt78;gU zH>Lcd+?a2!Oi?}fvZo;LuM~{$muFBz(eiYX^nFi47*yWq%g7deU$T84uh2|^?^1`D zsl%fw-D20?m&ma@OB{VXX)^NZQ2Ebs^mueOmyGBn<|>ZFe??_+z=dom)7|&|`BUPn zX7rBg?l2~KEInE**YAzIA|a+QkJ^*$OLp~fNqgTcH>d({DH0jZ^Fy8dpnqp z70+eurd1PYH(h)3WyBWiSN7U*9J3i_?)(a z-t@*}>66hGzwW#b7?uw4-{8AC_I|f^^F+_jG+b8~>Q=MgTpcQ@f(3_%C>Go6~G_af%Y*K^uO(GxyBNvyK(V+jtn-n+g$MWIHhAy zG}60MioNdWa(+mSp)S^a1<?V=8#>r8 z@MOef-_b#oW1^v0jumohR&v`+uZ_K5*RNmxreilCN%CE{`n}h&a#}yF-RX?;A^^dC zs}JX@^zDp(Uy-~sLFHpjdVE2Jw9CN<+z?nmHCdYMvI73JA${v z*#u|JZ-+AP_3>>oUHeKYfd@qd*&KrS3zI{%Phv?;bKbXMfRS;p7)X~TOmtcJ6f5lQ zRVcKe@{)N|T`j;qf>O$-^Fl?^k6cmjpx+gF^X8e;?rd0k z`jekOf5yH*0&JXG!xE*S_-QGk(tN+5B2vCHgs)ZcjShtx{|)L{2QS|oeX({kXHP}; z_CG-ixdS$%4M|xXeE9+07i4dY)kOPnvywTF!+YVpE2M|lfX)gX+75i|)ihmgk2i%c zIbziMF`!+lyi1I5{;+xVXFH&FB1HYsiQEJnnO>n>UmUmkq2csA=}FVBw9)Gi+o4!Z zc4yIdThI8S7b^Aq1u}LHA*~{+s?8F~Rgzx4nkwcDdm%2W+T^i2Y5VJ$tjgq6Ys~hpI0y44}HKX*J4qU zl8O!iSmWDIODW2E1c&m24cYsO)Hc}ckC8A6*6j(1-Q}`X@MVeJZ!v+Jvqh~smy)br zPzeSBg7JbkAwGZ_nV8-_D*%Qm&8DbagIZcvhTyP_Kv0dBTgIH7P?!=F>E10nTV-nG?Mw?9ZtT&9mihIu{CEX4@K3< zAJS41Fv;aXNF#cv5|j>@3h^(nB7`ooU$NRh7usvONf{lf@X1PUBBVWs?uw!VTGvdPsR2sFpcO4XOLGRTt1^(do z0^Kf?UJc~-=CX*OdyS6_u#sz+^kF>U$-@lI7DJvlG=U$`d>}uN$b#bVT#px-8fiZiGJi31{ZtCca`5dqk3IH8acLPTuy<}huUs-| zFKtJq!CYn>$9`WO!r{@+4+T`z6y)W_wv(OB`HI(e}{&8>B6U>D8CbxMGU)?WsyUPK#mIRZYOMG>)l~ zudbfHMz+xEU+o+#J!AT64u~^_8`Y1O7Tz{G=2I2Sg>?sVG(M{1&ws&HeD>bPi=!fN zD`o8RoIkYDM1B*GfsYaa4||e6;p#Wa@XTuRjnfidb3Y6WF-)y7$I&u9yJkomV&b8vz=Z{=+xl8QQZ_yew|^Q#7Hv z9)KJN#1pep87RLd)Slb|Hu$~6@>z_57cU@&QavOGo;P0I=bmx+3eQ2%UyBDlj9SHk zmM#EuJ-E``w|Y3>M|IMt+o0`NKrXuS^DE;f(Vu7lmDnEOte3}T61zc@_{Fxih08F* z4m;zu$mzm>^y6)Mt>GW$s_^3L%hQgpNS4z(%hXE|%Lf(@yY8FbslLfC^ z6xgCsOm%h1cj`~|dlGp$=DYv{)d5&Y+u#D1jxxAV2v~1EINL$jHQD3Gwi3e5g-`>3 zW!B8MZH6~meDd}F?k3^N!7zua1Aj%)O{O2)fHtF;28^T7SkFyXl2d%n&%P)uX6+wz zpEoBU*RkO~;kwSyt0m#!H5S2zAQ}PN`ty~9mE5X(HI}Z=ZXnzXhX<3EMc)Cn-S*x~ zf$`l|Nr&5XnSy5X529+cq;5zLkTpJ%yYJa3anl9#f|N3o=sPu|u%C!l?b7Zs%jdYU zWM9)Fpp;l-r+|`RUnS zD_%m8YXzt(dQZ*d@5u!swW;x~)rXLrU))hx&xe*FY9^5z)Q0O59YAYOPNy4U`?&8^ zOBY^TmE;5L>Mai6so={gC>#NnO$NJRXXC(1=JZK`4Bvfg=vmsG z5elmgJA>vdnO_d5vjDqs`(4*UWY#S!3)UBb=B~CANv?D|dW#+B_JZsN7ueY`% zMZlFEQ1u#1hl^#+9HpOAT;AF@AMm~jpUlptTWokUiGv-t)MvPX6a14)D>Mv+K`-UX zwPYQS&XBzvwd0+%Q3`F%kAjf+v7_DET_JnDInAdwH?7f|Cq(~h2qv=&$!mTZsIR=_v-F3xumrguv7zmNz9m_5Uy^{H8XJ}~Zn#}1O;8~09SNq6ct{JD> zNBfcc3GC%b=Lq_wXgY{rEagq<52zmtwU;!f6I&y@|{q8jW5x)>pQO*(41>f zN>|Cgph?;WuPD3|N#>%vn1CSd_`TT??58qEqtcJaGL+!X(kZS;wNUPZ*FI;QRoyxz zV%m|c%(+*ZO#2yIwC0)R(TkhTE;V`pB^H zGBS2!R`hgRpmAXbK<<)}Uq)-{Hv{>^ZCr>x{|haeEMw2bXzLx)|9AoXn)hw%k4IY% zhEzy1TVqnQrqFbCBQh&v`^d{3&fn{!pUSf(oZHMSP~$+0nOl>%!{*AKAJ)|bIM(V1 z`#a2eolKamJY~crUcQ?4y@Fv7mC6kLmhn6uqi~ z+?|ts+?r5VMXuljf$NLSA}Ixs@jc@d{Zk9?Wnvo2VeW!u(iMLClp9wP<7S`}-TToR z6HBLXYSgsNi&Dhdzmjgc?x4Q$4Z77E-sKXcLIm;u)o+Q!J)0*kb7_lf^T&Q@Q)%e6 z@GZ%|EzAm;F8CcJ3YZTnu0aZ0Cpe_j@&erK+p)qVvrbSjzUYJQjyHJLb}G9JD@&7U zriKj%dB*VrDC)-p0=lGy>KGF?PUr}D-w0EvS2%yH zDk*4iYX>S^st6Qan{f5M-Ujm4mAG;fRjrI-Dbe2ADs`j2Zv81UgPP@hj*;+XmdGW+ zo5tH4%h7pawTZb}xoT(O72*t>3G@KrN+kXMLz79Qm6xVVj%dm7rzuJgnf%_Q&{V=U z@T6<3&dFJyemt#{^h~D9t{q~u^ALLOAaN-t{5vURv~Z5O=R&T&**(pwW!TY3; zt4+LVYNCL#q^(sw4MYA)7kY~J#~094R)m{lPR z%02HwzM|<-9wq{Lq|bpdR6IHWXKk>D`Am4Z?!Xq!a78dfq|+mY3wxu~)LX|cSkgnw zE|`}suA`s%?VxFwvFHu4=Daq2@L(q(xL+&EXJQ70hxa)aaXCMuNZ6@M+v*1@76%vF zXbXCW?Lcb4dY^7NP}#fKhJ%S1L;KCg`qZ!xW%<|%>1f6#v0C*bz94(T5DpHGFs-+$ zEX9HWjj4$#&baCZxSD)0`srE|Ao(WO)ij{03pGL#>s2v}Wnw(rx8dPBGAtX1OE-7? zuX$5(Mv_okucgs4qYRa4b`AA)FY^cEB}QocX+EAx0(*(<=aDLKFh#|E&V>^=EmvN_ z|J%iR&hGFLBX<=I)>o!Q@G{MyJR}j{k7%Jt3K|uZBZ#c#qdzFApHlA4_qMs^C&7vh$J=+mxVJU>JE)%QqBH8;-n)#Cdnhef?2+=PLc*>bQ z^LJHuoteF45S&Tq;ohXEV}S=B+xUK|wsF;~r(5k{JfTf;P{G|#Nad3>D+~*zuW`fB zD_t^!&k$#nZm(S}9d7mYb~A=}%m`_MCiM`)UUu#mav{L>c=Lz(kU;k_i%T2#bX)oK zI{LJ`ZlhPdtZDQ7IWy3!4=bD}14i;R(*Q>h;1)idGmkorB5t#)xu-lLjvycWzBYBz zri3!fF8#Q=)p1haA%sim>x+g ze=-@#QSDgebD`*6X-^lA*4)5(VADYvAX9T&*QU#-;cA^gojw!dIHdBUT#V&VPi6e1 zFGEJL?vXGM{3BdEmZLR^*J~=Kk`!W|2D}W8)G&O8(ajJVX#*(S$+dXj0PjGEhZlL%9OI?KM9mO%eDh%TI_`8EcyoOC8(Den8h2KM z+I~H9T_PyMSb=8nXbi7@GP4+qO&D(%{s0!bT|ioQELAcD+Td|c?osbuan3QEtIjxN zc6GL&e1xHJ-fXNB+(cj)kCyO2+@_%E-IxLh)C_SS-a1$J56!ogzjpa%Gmi2 zjaP^v$!o7)&&uo1W&M)l&Y2?k#Z`ws0~7T}N%~X?lRu>^GJ5Ph{VpghjB`gAJ6Ar2 z1+D0E!A4WR1J3O7(tEA{ zyA*2*LcDF?rT3}59PE|N^x&fVGPvEdr*0L}!j2Qh#)I3zxN-P%^ZEo^IN;(Nv(Clz z{ad?cVMErsq>&1L9oAJ#hh-M6V#*E|z~Y2;3g%(nMLvb&seaqTd-A7e$23D>YFTA@ zl>37Ax5wmU0wQbk0Cs9a7PP2zgs9W%TCPMRdRX-^q&2bh`A!(!8N$_~u|`+P2B-vE znqU=)1L74r=*iXuA)uo6madODag*F2I&s46jWDfWdGmEbFEzj4!}e0(Z#uOP`SIh$ zo*ZXV1|;n)&M&oSW6!mqb)`{3Ru=g%wAN{PRqmUWi95O^R}NqT}C_dHsL~- z`K}*bh#fjrqB8B&i}VGDSa^9Smh#gvJIw-YM1^cUY$jHLJL@QD%E50?R@^rU9#8}m zk5*Lg_On?{6;g6|&Q2tuFC3!qw7H(G&OU$Aagad53~KXkOwXB&q>$PUb-K{3TJ=tc z9tSw$Q7pa#Y0`^;S>*~bw4k}UQANNm=zpI3`e5Z^c;ArabjQ&6waHS`5D<{9L&JhZ zoZdVu9UAgIayaDWXJ%eK_1K;ll^D6(eKEn2>=|F@86fInMr79$Tc{xeyjpcpy&4&rmcmwCgWym?DBf|Q!= zGZE~sbzHp+$352T@@why-Q@?9WcKbVGWHnuzADAWzn>NQ=V0*6;t5|x(sVp6IxXwJ zkEWsjyYklXmu->*&ftWFz2;!^e)~h)C3>Igj&vDWrHST1GFEp74{wO+9=pt0nDfH8l(qLPuyP`Q#wlTV5 zob}BTf{6$Sh|nY?uTSF^o*AUcSfX`_8y669mwowy|Ej)*=7~1JKr(HkGfv3P7@lWR zi78I%`U2nZXpnzlMCh4|1e-0Dq|^>G$IyAab(J*Zvp1%`mY=e9Cn`x@vz2g521mx2 z0_UWq`25Po_yn%&^eg>x+*4L88Gk*{<3}M-&!xO<^1VgXyfY*3v+|$-=CS4p7k+Q8 zV~|p>e;Cw=#7;=4d|Q7C)gNPPJ**6MBEm9#iEob`c4zO4qnGsOcI9Zo4V>k7X zb+;%(I<<%rivrxw?7f=XVLT~WsXJlbiLk>2IKhzK-s2zD%Y-Z&x*I9nfZ+13Rz3cK z0Bueb21WHzMcqpx06yKQD74v5n;+v%+F@O2B;@T=RVu0~Hx>nYt+xn zyQ?iEw7lSOGO}a?b~%aDwucTZ0mw=^Cd<3mvtdTU=%o>4hBh)SCt9 zRsKrou#`d-I%**Jgm=u5GGpK#_PN2U9&DiPT%~bY(ww`(>E$RDIjYj_N5v%Q?(vxN z?{O1HaU+*#|x7#6m=5 z1(d!^isU&G8txd@^q&WkuN3|fghXCZ8+v2Bhb!E+PA~oA!1mpHg3l`Is9I?gL?QS< zou!UdK-NMt;F%%ni0oIW=omXrFO)(O21C`#-URFuN5H4puV2Ge`9xoluVtr&v%4~{ zmm1=-24F5pi@;UAK6Le6I$#V{+k{j{CnZ^CA+qEA(q)AtNgQ@i0N=kF|8H4JL%$6v zJ~i8FH1&&P9HlRl>ff=ffKoi{dgH3LHC^3rlAVUYFvLq_{l1-Mbu#d%BDnk-oWZR> zIoA+nXU;!SyAkJ(2U5Huq5?vZp@jKaY-KBFyY06jPbX($qoF+Hca}=RB7~OcR6?~1 zM}Z3QkET5D-!z<~+AQXb;I#lD@lyuz8@iq#uSDFgrg-S3LcHY*YANs3^~} z54ya=5^SZ^uQ8rpC8tvFyfuko$-W*p$#$4Dmn4^+`L8EUE~IH~AZ>rYXDc_(uuYYfMxH=*)OIqp@_j#EATJ z=D4J{!;w$HZeTo?$Ve6xcf!-I`gdWX98rLKk>2Zao4f8}Bai2DtE`(37f=U)$h|+m zD|#dJI-6CU%oFp6HQ*oFMJbE@Hi8JU-OiUyQS!Ogg?+UYyOzGXMAX z{ene+nY!u9*M6l(S+o|$N5(7#TAWDKX;-zU+2~*DRse^I@7r`AGaIIDa%QJiyAa&8 z11&=n1ivyRRC}=_trT?xCd|kT{qs}^VXPnUvxlR=?13u#XQ5vpUtU7}EaJTG{%}RG zqqO)uqQ^^Ll^5F|DsOi&wvy*mXhKn*QJ{us>ADz+*5=m#HyrTsi#2s4fG!05q1>Xy z)XbuP7B<4?r7yK;`B9$;M+hQnWC=0zVSHAkxq6Q3vb(a2fuzVpOgJTXhTi}CpiLo7 zfbk0fgqQv~%JFMNs3G}KiGMT%-2O#UcGjO?+Y&1>k2>;IQS8{L6?(mq*GigIp+Slw z1*H(uJ$J;pqvVO$H}Imw#2n8Z1&+QJ=E~{&sEmX6|7_?H=J?#F_u1cO$=<#}kneQn zD9A)=mb%`8x%VFF4!RGXQArsN(PzOf`oGIz5E!p!E~7v8)vI;#y}g4l7Quj8#Iu=S zhL8uT7#JANh#&ef0!H#DFu5UAyStBt$V*B}sH$+7U!lr<{BPU=rLq}lnmX)X->M<5 zi%Ecju<~11&O8gW2uh6kMvpiN0aRew%jHSb9CNSY-4xKj_MM24qUCA1}T8?by?4d zkpS^Ij}PcxHwa?Y=Y>%;pw>VG1v^Re;-3?5B!j@5kAzARVU|UlnzcmKk>XInLj|>+ zfjgWX5hSD_3dQcv6CDBnlB?U$XE3lZNuGvqD+1Mxap~@c6R_jOI$(za6nt3JaFGiG zk0k>g;rN3mfds{V40GefM*f)pNX;Lpe4DUxllbB+XE<%DsP3{^$V*ZcuOAI5AAgaD z(vUdoDYt7F5YREADhgjaMEG0)^KI;A<>pvXhvS%DAV4KoRv9(sX+(t)Koic>GISoL zs@UwlTd!HwJS>me66r&pYvhtW`30=icLWjfg3s}f@EA#j0Q5yH+E_oe{FOp{WNzPH zFi_Ctc}eZGB;yOj2s%_>;G%tDIoflSZ_wCKt^Ec^nS#d-DZfV0ensI^Zh)a>Ei`dw zw!YTv15Jd$eZVjv;WikUjzP*BO zumj#z%9DyI>|v!a@D$`IHm6I#>>dH||AzGiM15T?BeDxdc?Lp_ik+SP3G18JuVFw& zNG?wXTDc2wt11JZflq#l@eZI*H{wE43r)!)(U#b-;FqAWI{1MYV0kxbo)vvk`of!x z2JmR16jc?BDAbXNSs*u&Ke%y)g(?N@?fLn!FG+bjm=WK7v*5qC=$?8%L?w(u2j*qL z1QIE};XeHe)buVn4?dpYQKZrfb93{mtAp+m!)BDj8=%SBFQMIglMGQ?Z!yKb^AKXU zrgl#va{cz^R2Z%X$U%+p-8&^8r&nAS!?^9h?XS9Af@9UHEKQ*v-#s{O8tQ{O!tOLb zSqNe(8PSU&|B`_FKUa^ch~;MGhgwS`0@VW_GZYq|82U1QX*LVbV#YNKR}A$%{`g=X ziE)jCXo34K5gC9e6#Srwh>o5(4gx|#f40EsTgm!hMNtR9hyb&Y80b_9NJv&=49|i4 zdmjAP^X_Sij!~g<&6~KX@*mW^ykv;U4KSbZJw~Ks6+t;&Hv+lO15G}-0gTF&5yY*d z?u-^Z3>Uh1+sairuiYKbry;;_;hi)dj{~I+rZl_WKpuC-8#4z+lS2iJ_^G7YL z*R5_aZu5BK3d797dnT5DZzVN<@ z6Ehsjd^v&D!3AO-2ZD)~hUzX9a~*WnShx&Y=gnfHR#vYomlPdXzuSaq%yPA0;b-IQOY-sG zK!4-dG=!3-u$g>>Ulj{q?cD*K+vnE4o*0;%nGKjh>4_!LwL=EQ2;k(H^jEI8`b|9! zI%zL;-GMV3F>;g-CQTA=v1k(wQ7ssGNKRcX>iPjtavt?5FapmW9WMwi`S1cMgn!T1 zkO~1r9Bc$SaN);9wazhbiJ530OKgt6pLNZ)+bYSv{zW7DgMnER)h`99C<%{(??)5P z_FSWIdpLz9c@A(l^W_T{g&*eLfb3BWEF0vzsuF23=6bNxX?wg`AOb5rBo^ZXc?nve z&AByMj)Sl{5@4Nk`cr2(N)o&LMnag%1VE)NMZPswlwyXN37IEyN0;mc%t;rbehZl5av$%9WX)I_-zhvCH;F)~ zCMn`gN1W)@DyS9ByldIzg(`e6cM&E4poVr#L*9b2{peLQmt5jJTpAFnlhHakU2X$tFOto^v##a{{T2tw`z$fT2@vp5 z2LufDL23S!X2pb^lexSzR zK&5j5GhwUkGg^^6%TcNx!9iw;?>-V7TPeN_c7f>ACr-fjq_}BjB6eD&Av&H|KOo0+ zUDrV0YajR(d=wA{h^mCmJ&32P~=~Dj)noR z4U*P=n0jdu|170;H-c+iizNVH?iUg+&r5=xR|`1{j8a}^>N4Dk}mVby8{hBC^f%4t$OzyK#OzjoJjXW+|S#_adF zHR`W;LPX8zC{kxd2|n}c$2CSPs(!40pj0v*9O;o{qgG?SzVtdhGc$g3A4tUP=BQTC zrgZ#G-1jDKNDm<_oq518CuNl=;95$+XYCS_d~yt|dN8FFA>L`2E*^XUy-hnfAL96` zDfstnh)(|Uw8E=-MmnUgI03BuLT&cMTqcb;&+GazyLS+MX4v94T*DP$V{32G0W*B8 zyax1KL)E!-?g*Hx=NJnI>Fqr*z^NMSdfa1(b=@d483HgshRA}RF2BVx;>AVIgdN=u9ZU<7-#aY zdgwOPZ>~ly=CW!Jz&H^1+#s*TVTlU4LrAzsitz>*u|(&4K_65)Pw3AA-3eIuEgtxu zo1?86H2iqoSY3WfJrtX`*ztf*KgNVZNY*WwWv+nA{C2vk1kM5}iZgD~+)G@eEYzvA z39qCNRh2N?)1CD=e0j);=1?g=>a@TE5npHU1m{5T+@#Y#=RRF1yQ9y&;PfR)wpj{@ zKEKn}8iCLH-G=qd1Z+Ps>(&p5M9(W@4Ro6SyMO`l6pyG*KcHIGAF2FwSo7gi5~dgy z`BBwGIPUg4AjoF)?mPW96O_jLy>uww<(oW{66ukEz7@IqW*Q7Wp%?#B*KZb+DXKdN zHWwp#vP#PPKK*=lWwpn7kebT{9D%UBFU`mOXkEo)lQXTgXqqc1))S=}t_chCz!7C! zSO$;h?5;Zj#}iQw@oGqa9=ybEux`2;{j~0srJ{Ax$G=%`I-e8N`5!L;cB+H-3h~6m zMAbqtBQE0uNoTq?C4&IiTDs|PwvYT2C`p}wpY3BK8ao5dShqpG{&jcIxyEtzw7G2- zR6ck&S`)KAZ5oeepm{+r2Bg?Cxj^Vo8qHIDFw4&=fh*SydKgShP#q&T=gT7MI+z-w zKAvfy(0SfSPBd9AK3xX?w(*hEb&p`&Z9p=}W?Bm_wMP-A0TDw5!?6B{f+0T^EpVDw z*naAx4%Bp3d2{|~#0!*0JYcq1(6&TSvJU7-abqaU(B((fn&@Lnz5{wLoh*@UrF5}1 z{_9v-h=LbIz!4CWnEU{> zTX$s2sSq7KPbdP50BQRZkJRU&K!;}MeAV$Tb)-Mi;ieS#TbP+RJOC5o>vMU*cbFFb zfRkZvpe`x8hjSz8PIL>kfisDOF|IOb!Wu4F=yWFy1J3JI(3ff{g&$mjYNwr|ySQ*1 z^xba33j2U}9_8eJ)33N#rKHc@v*ZpX61m9BM zgIPv_?eUHk*s{BVcw}6hIr$H+Idhw-bXHf;bC>{5p!y3eyUPKsM{|S1VMrS}*7{6v zh=+A9_Z~K^0o$p%O)Iz;5<;IpIq%`{>);&NW7&X-w1fQId7^J6^LUYT7@g5_F?Ypl1?1SFjH6zj9hly8KFQRB1EQ50t>YV1c~pDaL)Ld{AAIo|-y1ywHm6 z(tAr^R0lgRXq!8wCw=dIH#h+zHKRFt&Sy~8lI5;j!(WhgQdqmg9#9i&GJ<{>ta9{3 z-npu%1Tne#Xq8^CM$r8P^{{9XCSDH6m>Zjpq3}&gdFRi?hkg8T%u$EDnzoltsj)q} zrP36RQD&k4l2I$}py^{#cb?U$vGP7&CZR!h$4Dwu_}nya1~_k=O?@S=gbN`5;FFNV z7;w}-&O{WL>&+HApuh~E23;p~eE1LRCKil7Oz6RK+)sB_%OY)%IAD&pH@Tb81m!9vCtCXN_c~y2XJFp=k^pWoBWI* zP%6d2VXwSK(Efja~0@L9~w^MG1>4KM3sFBim^Lh@>^ z9-w0&;Q)VI-l;zSLxep=6x@|oqF3|HbYuBs8x0p0bo&F^cSH%eEo37jF%bO^=IIrbmWW=eHQaW7$h5!4d)+fyRY-XUGKFn$zps4Hf0;8$1F>7w= z;sY_4B~>|7o;8eJs`FRCs^1T4KY=mB09ih@I6fb=*nEzmdc_*`iFVJVxdwzmHh%g$ z#FRI=a=df8kSTFFFX$_UKgwC=%Y?|-*fsim)4A^L4T0NtPKc>6f#RheM-HXjFwor0 z=a==d&#{16gI3=bx=Sc&ALG+IlEc7J7HhT!$fOsG(EQ9iP9dR33EZ|^b7Og$aNX-H zoNr^dWDa90!x~X~08F}fCM;J$lmsFOldL~84F3=&y$J-~+vPzv!73=Iy3wti`O<+Zr?_%5hV zOW(P>X?}gac7fBy z**23O&Zxxk^%{jX>d)W&TbfLn#!!`7s1IMy-Tq4uNli$|< zM10=f4eAg*t0sdg>Idz9WlOy}hY`g@9=72M*fB8&xJ27swN?=MA6|g>RGuE%bqMzS zOp=n)&*I|aJ(;R~sjswUH?_ML<*b|x{HPW8NPU80Ni+QE_@xVzfCN7%bgBrxX-ASqfbMc&;W`(bG`Waqxrw5S4fn!EZGmHcR z$_VlCF{Bik6@T15)d+5*#!K)%__&AH#~azguBEu*2y&y#J4#yedoyiHQ0PBW4<`N@ zm~COT7^W*5{Ljb%X1=yOlPsbS{8HEs`an!h@LqiT=MViqggOrb@Ha=PjFx01G;;hd z9(zFd)N`e^2*T9s3}f z0%c!Q4^2XXuF+g9Stc7cJBMLZ=ySbe)#1a$!ePs+)SZ=>;4-}fS?51loV=K8U*| zzE1D~DuFmGRj?)E@1Mm#g2;;GWne}uE)vLY=t!lRe>f(UX9z~XdW2z%oPV?m|Do5u zB)#pcA&iH_hin*|Nqa2g%}zEL6C-BQn(xfh z&J;C21Rxj)R#OD^h6Z>HV_5WpEk<&&BL2C-SOP>4zkli!{`mj!_0~aAfNk{XvXp{= z%94U2-AbphlyoiKNOvP3wJV~8(j5}g-5ra9NQVN_NQVf5fb@OW@B8i__x|pkamH~5 zNB7d)MmJ(p!g0C-giSw^8pPLTK1UCnVPAT@BGw5uK zWn>d$<^$#lhR|9N4S?)M0H)5>G}-35D6#LO8W123Mh&3=e9Qr5sZA{KAOfVnr$A-_ zAU?~hm`{@1+U1610ALWxBBQ^pf;0DW8?ax(}E<0(U7 z7wdeA$Z2N~;Q+g+3oz2d0h!DkNFJVwt12md1d1godGSv{QE%VMYHNRcR;gvY=6mZr zwGxoP@If~z+y3H6159WSpGYn*FAw6It@vMLQat}g#(N2}9lS3!TBb;d0iXZ<+Rp-)>e?VwYXEfgmeV!X z`W*oxtaQ7eN&yk62jF$FNlC8(6*sT2&|g;bWMM!jOv*}@v8I-HZ~!L4@FCY-c9yit zeT`Q;oDDys1g3hgVH^3kejqk8H411((qWADRH+J2Qk>2Z8rJ$elO3O@P67Qk78I`? zQSaXM@buDzfG;TrGy!1fC88$$1Bao39e9mkigV%!>i-b%(lG@&_F_QDlX&J-@zi%V zgEw604NcHDoxpg_;QT~cgGks<^?xJi9tM~7zIi2-WgfCBx?W9|zqY`hdN zD8U`pTOeA6<`hW$d+LTbQ2L?*F0-gi5^-c%w=DnJ@$4vl-b9ixYIn*kx~$z_^ZFBs zasapLzXyW(I~o_@A(Kmf9j9~$jYNv5cL|&D35d*=0Ly+VT&|Jxvd^eLdUe%`W<>#ogMAZ63<`gA=IznSt1OI!|$Z3(zo$sl)yvhg}3$X{Di(CrrVMj&aF0*3= z??Nnck>6C7{t6@CPw4m~Gx1Ss-l$Cl+Y4HJRK6HWupU{?arF`SIiWxW0Lrkk$80xv zd1-n%Lz)fL#di5KfP4|tXO01O;kYpQ5Y_(y?LC#WB6;$v%%pYHZha8StvU256M08F zK29UJKf!IWiEASEJ zMM=Sq`_&mAIVpfDS)=_?>mo=$_TTpm)6ApV|==T(CLQ}o6~?Y$zs={q{n zr7Jhe2Yj$qOGPawi#NlnGu}AnQVwXB>Ap2-q_ESVDl)rSgt)JugiF4QlctRuWcxxXZJ9~ zpSPtqI2W+A%OQETqRXsu;{uB4q)YfNrMXYZ3bSe-PLhUiIgZm42n`kCw=&2@>QaD2 z%GRK}N*P1T{V#tq>Di(l>8p4LU02)XI6&F%bR@Y+Zq^>AVn?@*@j=_A;JOfa|M!ieTyb zjWREkZW#194~y18%wngXQU1&7QLw1&fSjn_*_N)VlIL96U~LN>R#-k|`r6w8$4jS1 zN!o1ver*WLH}CGTd@&jx?*XiA9Zro_`YKkeBHb=~+mpJWM^%2EwO1x=@Qs#XiOrYJ z#<<_>9@y#K`(E$ICGo!Ve&^5`fn#u1_>WJn-9jKrn*npIF6?uR!{8jKHM_2F+l69b zW)J2?0sJZiblZ)8(_X{_rFpp$Nr7ph1VaT^|Aurp1a(C zbuczYDgMhn!}p-rcWvQbwbgG6D{Pm|V>&fJyEYkDe~S zx~Z_(bmyJz2zULE!}1bijN8h)FW;L^lYnb!|+z9Yjaef`at)xQK}8^)vCx&IkQMGaZ6ekLK@4q=qCc(^u~C zvGVfnu^r~}3bD=SdsTag7trJv^hsSY{ZP5OIhNPbT`) z;M?0YFa#l1t%9^7a-0S-ieQ$hwu$tQA~mWdH-2Oov&`4ROxT9UGy5H{V0K$-)~31z zI7|0|mxpKKN?rL@Sbgw={^|ZBgfcek!$;25O0e3eH ze37w{`;Wqpsn0t>vOH5}ui6F3z-yq9OjX=cYv2D!_~U8se1`f4&1)UdgRbK@>1o_?VDCMSi3tAu{c_( z#7p1y_kOykJU;g{B*xtn5Ulz8iq&4~B@D&|Hp1N65jmW^6M1PXG0sKVZEZeA8AyM#wk2+pC06OKD7hc21wTM!bYSfimDx!{> zP3rp1y8K>$dOea%S>YjJ78?U3?e zG>pMKN0=DqIR;;bcH3sjoz2wS0g-l3$4w&284TWad}@F6-%ACauy7Wtw6cR5WKIzT zLd$)jd1D#qg1atuqm~q7V;(p=h)it={q&U8ib!}@BC5AFo#gy>)9{mfCPF3eYptKa z##vBp2^=9>#W&qdo|Ul$E662GaNatYOZvGgo!^?+ter}aJk|xJq=X1@ua!pAPyMN8 z4|UYZrVJZ)iUX1jpLMVf4)8}K!G6VmK`U2L^yVDUKU&pf<&01-$WLG`Qs3Ed8CP_ zI$KKl!hIYkoHu$nE@66&545!q=>-%+O6^+J%iaYt0ok;?s8 z3x~(cgy#QE<<wY}wLL{Ydo2Vi+hu%NA2lHY5Lqz!)BD8Sb zc873FCEqgsBlzF|6x9D-@|vN5mlkd;*Bh#MCq7EU6jf-HsCQm0c@QY7=-4ex&p=xQ zlT1BHptmK@c)wfo#o928WeD!^S}Sk+3w4ynz{#dZewEU1 zL4kqPkUgzipvWjm_NiF@I^CbrNL2L6yWlceIq~$FL0gGZofcgr{Z`47D8fWPbGDKh zeyo6Z!$t&6FkiFk?aPxPNU5OeN8)2?nPr)PF8gNHzHv0Ey_!lu{*qMNxadl ziyu4_UNZJ*6QtN1MEQHJEXZ(wbL!>kH_qcAUehb5-VOWURZt^olJ$8yjhKdXM_3xf zJG~>e&xpL$b7db9+>#VtBs~l52bda_3K{%&f{2j;1e~Zzu8ILVqkLlXt*RT2MT2z& zlzd=0#cKp%mA|h9ZVl{lCr9PwS2<;rAk#qKgrV5yJHBjZ9$9l@a&{4|WA=)^YdN#~ zzgqWlfl&aZ@8u2SpdE%yO8h$KiI4m5qN7QM@uf=%C_${kk|__zoJ01v8{oJuS4bG! z&)5;kTr%-~hf2=1X~M!c8Rxu6d6!q#G>C>*#!CKIH<*E>a44tEPW1FGB?=IYR)E%} z`=oPNs3&E?*B6#FoD~8_)@cF)j_$DGrX@ti*Ik@%k}V9!z2AhUc2fBS8eTFuaH~K{ zblh-y5^l*mnKX_7gWQv?C95B7q7se2&m>KXs6f4NZIWnfRzPiM+~b!;4;~&&V(YDS zuxYWY{xq=nd$p@QrAP|Dlce_tWVB(!hz1ohbK^XJ+8=xsUN2s+@!ht2L(HhR5X?HRkMC2;aucG)FB4{NhXvVSG#oc__IztxgYm`Klk zSn6Ras=sAu{r-FNZ#Aq6XQM{*-ZbkJF-!I!Fy|>VY9t1ZTSc=~Jj0?Z{I_UDVL)AY z_cjl$4XB2A)7meL-%C&*sf@{ z>g;p7(F?o7E{EfZ5ERFTf2b4oOA96?1N$Flp+z_mg7XKE8!z^1*WAlwd z=l1?#f#8ZaDC4S?oD3Q!e$$sq@R=Xb=uscaCk#Z>-Ljv zBYUGqZzb<3*<0l#sgs?oH?IsUdOp8fx!=92sLo)oZo!t(P;iSGbHTRw>&X9=wp5tX zHu#)K(Y5p4w@|{d)ssbK8y0E-tDB5(X~N$+&w8y%TB&Og{>T53bq51eZje6q-8biV z_;$lA;P46gP}a^J9KX!>Q-2M*!%wBR&Q>R0aJ(qU&;JoEdG**HE9laaXMN-x_qUNC z13UZOHFjCSSzw-EK0wzbOD^)5n-n4XUTJ2nj zEz7d^Q{^WnLmf+7*N?OsE@#v9N2mJ#)mRs3cyeAHW^uH zQIVZ?vF?1pw^G_jJR)jfXu$Bzahyh;#*hC+#&`0ebbkJwJj#Bk;o!7GqdGpPNgM5s z@XIQqbw6tdWJCRx|2wuDDPNC1^}6=ltL%IYXcu@J*f~157*;7VXHxbb#^!*5g@IZ& zFrH;)>EhJxtoK2FgMlhd1`g1s%N0wUu?kZZ{ohT-ja3ZdL)8!BKj zzsv@!L23O89w`@?Vf365dY4AOKYHo|ydbF!=YE#zP zl(vW_(;25H>w4jpj^h-As6FBF&zY$1Zdt~kO5Fvt{&!*<9;6ocDJUv-T_y1UT&N#d zb$J&a^%%;cMPwx}n`05`p`fn#6wG@yE2M7;_U^$Cb)CoZo|9>p8sY-ULs*F+{vDl_ z(Z?0cGlFHW0hFBs5T5tnpo2Y1K5Us)UcEJRiSr~=>OPJibD;W@q5bW(XmZb1-ony% zOk$bzy1i4Ud-W5F=wod78)5VAqbYMhP4EDZHd+R*$0Q|?-6W9uFB=hHvXSSy`6A%d zF70(HYj7o668}xFr@oOBLn&#ZtKZ;A5tPA5 zQs00{np4$n*kMM^ruJq3us=+7YS+7BmCuJQq(UW1DmE@O2UIX!rI#?n`IYgte#Z-U zvP|O=LErMAbE&0O&_^Z}LzYP(^&D41B(pO+CxfQ|6uCv7bLO9{RTsgATSnLK?J|e& z72!FT->Q|dtZU4oh5xvhBgFap@hjo%gNGFCa(YpHUQpP3=h4vqZ#P&b&qsP%o;|8s zLoz(odqgt#eAXG$+Q(#M&}5Lj(k1(UcdMYL`z~Zn;Cs}$^%D2Lmx;B=#%1>P873bi z>)8K`+JAY20iyzBS&P7q1sJf{;W9lt5b`X~(wks2W-fqu=^-BHQ#ypaq+cBPd_65j zvR_p4=UwpDLmX)+V~%os?s$*R)XVlgi}Ts)la7cTi=Ks6Zbyn%eOZ0BT|7h zuVjYnq4^9-_@^z(7U^qVM0%g)-dO=og~@;x-zDto&F_K>D-!&5UfHULtBv}c12;!* zKk8JtO5s5W+sApO@qBP+Ep08UhT;_!6e_1jOO*T;d6CVYO;7|aL4x0KqM-!ZAseQ? zdLDy3y};2wV$h_`c;WB5&(>7`OSz0p&ltu7p#Ch|& zl|dEUvqdSm-sK247HQ}C6SnH&(fI~N;pQSO=m~Y58Y+y9%Z3dGB(MtG?>b|1;-$qz z_nRWSTp)H78#DE-2f=eKOm0ll_!K}}n&c{ySzrD}O#J?YUp6JVI#=a8z-Tq|v@|3uP z+q6wT*Z;V#P`5K_&;Q^9tAf{jE=;A;i-6tz(k4t+K2~ zje>8}tI#*;BkuAKOe#l?-wDk#GFcU|Ym`r&XUCWR=g^Cw{v}B0#1owgml0rUpV9}r z{29F1U$)h$Czt^hB~Q>#*`i4n7O|Jc_07AWtqMDjKkzZ-Jyxre#I~pyY~C zYF1Y4+V)(IvFGH|*ke_>W5ueWI!HD1{p^n%=RNSlW$ zP1Ue-WC(>Vca-tE4|d~T`*oWtbMSd8;9cjsxR#kd*(B4*rvB5YKv9IbRcKR=+mzX& z?zyYMze8ba8LXK$!*e@wdwsZq?SFG0C_&kMaFpgl14k2GBPlpzfYEcQ+SaMx8e76~ z_uqy4CszUZX31s7_CIWoPQ`o2ixV_!EJ9_ye>p|3^7y>naE#)bZPKJSte^SK)8Wb9 zKaejjY4&ktS+>9b)!rx4yLoFSKD*(0_0QGThlFP1f^kdg3yMt@$TV=Zv2@^MH}b%J z$1Z+8v;o}YBJ%)&llS>g{ZwA-2$4IsOz^nZ%Bw-VZW+)ATc#xDJ$`*0nh>`9%WfQq zfqQD4HT8WP+B~?ThqAlR@Ib?U+)qt>HTTlxeZ~*4jbcCNof_@HchyZ2MA=mxzhvW0g9hn(rj&lY_wl z_QfV|PCWgZul*O#<_w42#SG2lLfs3KY;Dt7;PDL636s&1rhC^kw>0us!K`j4kgEj( z&iube|2+_ds$U&8C}HeupqkJFq7m0y8a?jemI(zK+=s9)3I^i0VVj#vOiY)la`aCO z|0>U;!q+H?`^&^g6$q?)dI-XB#YKBd2UvUN6&cutkO?%OR*=T#?0c7dl^n$z?hyM@ z{T+R8>=*(Krh$oOnbhFpr-Z)eOI-T`Ja zk*4*v{ConXW!zu~_4*ZMG)19^v?SY)oIQ!X)0kGEot8r#X6xfNk^>iL+KaAG!+apr zj0PPYyx#mH&h*(@@<-eFoR*Nd)0yvsQ5Bk+`mFa}6O|G$4)infGRgJx@)6kytnCFn zADuXyKIda&i>)e9buPZhqA|O)?TI9xjti`yqH3o7UQYbt^uC^x%JtQifQ}@|Wo~Z6 zd2=hdzhG6UdfppAqN2Qn!xFr)ZawQ3K2EiYHo zk)Y4V9)h>|H_5Wxy**TyagDbmqOnC#^`^b7zciF~?KyE*y@J)MSRkBHM##s#kHh%0 zu@XIh6j(+Z9# zsHxD|rwa#vW_9`c@owx#ULzazN!Cv&JvvK^vcR`*b&!mYYaFxtI) zv&hOcO`pdtc3H)GNznUUG3&KigCZmQM^PKLHCs1Tnq0K@WRyRdogus8uJyZV`C(<2=dcFXK0mtd7L`mzL6IFG` zzXYrr&VNrdeEy|mbOlT!yM-h4&xUse$Pl8*`Yiu_JsP;%+E}^@T{RONs<0EqZR?)zn;OK;_#>*1m4*J=QGPjw)ufKqhSojM%oIy-gucS6 zv`V{+kFNe^iDhik!OpFZ+(-2V-B$ikItYzLvC*prPFIvA^&SQg)wyj$(+xO0zb5qW zX6}V^n{x?!&I|A4a#+n#WGP(PNI&!Dx53ma>#ddk|LT=oOuf<_UUL-rC~s*aYa(Yx z|GhzLwK}%8kV}H#UT5b$+pU~`*ZtdjXj8b&a0VL?EF$#vn?6PV{p)KFsOWVL6a4{T zbZrQnq8|l@d5`4~@OyxN1a!_zfasPqS-fjC{8EnB_VYI-ALa@*b35aN(Bf4T;zhJr zeC3I&^#36^mK6W-a?aVF{lkW80Sdcg2HzCiL8Li8548AePO0iuz z-nB+IXOdp@x^s@GKw!V+rlf&L_GoZ4`Le*7Zk@s>B)=w8>8%4_gogsZJ znbyI30j0XWY^-tr@TieaIE%cc1Og(T0EB)A_Mgj93&p}NIW-G-7%Hq?s9Njlxuby_ zKtKuh$T2xD_^S(OVzPhsFbt5lk}!LD0`}iT`7kmvegVRTM?e{x^#{nN`$sG+EN;bo zZ}BcoNJ#iHp0B(H7G+JlWCCxX$L2?S!Pl+)uD`0CCZ?nQd@Oic@aw6tMweQ4X}Kw+WMvnZ zUs>GEKpW>V>)hA}R?%rD&yG8mLZ$M-KbGA>n;6*)R^{b^vpRm4SN!#DZ9LXnCvxY` z&$KPPBOkTze@eWW*DX$<(XF`s)SH4Mx^mv)2ge2};S%2d-6jHGTtg3toV)ijC zVI%jn=lh9Ut&UK2`=GVZW;g=Q84K-(RH@PA^S43+##15+^aoYLC9E%RMXmAB<7a$L z>cy(zAEiIe=4D1t6%o=63ef)T2_?|5{)JOS1DEEtq!+11EIPQ*a&RVa4%6CGZXn><8qEp8SW+B}Ul*jl zR<%lPsW|2@$kh()WF)*tm2ir!LuHb7J}^_-s(&hyS_sjct1$Ipvp!<7Zg_QngLTRZ z!i|;v_Tui&&%9~UrEwx^g&$T-@)J*p!!p~)pwmGe0lMbwYk!S9RWm{}5>=~nche{< zP_QbprX9Rqs8%?(TMXHgFg6IfMmKqlpo)nvsR%MTfZip{bKbye)Mkz81q8kAw0XKG z>{ghfzot=Fr~p&+!}Fmi{F1MQ=Q}oko)$srbKyE!c;KKkCbPNMLk#cOS=S>bj=J#_p%<5RsoOUSH{f*tdq^rMp9bRXmSvCXmS=jLMLt z+USaEqcn8?#jp7R?p760Lb7B93+P_)Jx{|b9F`*p>;640xX|*Hs!7M)8T!z@Ky*K% zHQ{bR|IGX;Ql6%AVm=>YIF}!yLv;lPGn@{#4AzO zG094d5Mo0LGBl4C?@yj50ZU1i(V{a|NqYozD^{ha9W@mC;Fl;BKN;- z6kJ4t?g*HC@vmLDWcV!j9W-`cs=oNM`7k{EeJAqd#pm5e$a_D*^gi+9N9p*XGoS)Z z9l@NE&c9Qd=BQ|rwjkA7J9q6>n)$Oe&e&fOAD}UpS`1c^4QnXvySqGh`SoZ3KMKm< z{!0bEHK>vvz0H+lmg#yZ--lr0NMwan^tumED-v@IYol3Q43a-7or=H9rW_1CPmtyR?mD$9t% zTv;*rC=hCi8anr1;P~)klIP6>PTc0ygWia>uG3eeI=CvJ$ia#?WI_Ck*b5*58oG|} zbgN-uYabpk9%cGy!~P}1VofO^clUk3fnrxGb&(WyieqJa zL(S|V_>H;uzQ0p;!IYmi3Sb^^fozW{-x`j}HiTiLT9WJMcky*pUx)mf)a_N|wgT0& z#1h_WA?ae`I5jt9uNy@Lrwc#!9vjYGc9$d z@?6nT}G@(M0HlZ2CgCt+9_f#2n zO=rL%nKJj1$dk_5DrS$vQl$5BYe!)4^-*@WYY_MOKhpV zr!-%>5zCDi=2EL#mUaqfV!D9qnsch=x%qqCawBbIsnEh(yFjWz@*P!lbe~ybLGrpu znm+474A(ba?5=SA#fcHEB2dBIhU%j-_)DM{ykYoDJj*!7Hl==Z_;&Ux6YP^o7uZ_v zl{LhZ+YlbQWO1E}$a_grf8gu7vDjLd-1^5+A<4J&%nyK>1Lx`KDKE&@|L16gbH8Xf z!@xCtB0NM3HMmm|4>W;HN9Bz#JyGCn0C-}ETk=UeLm&AKh;VQx(rRmKt^z+dV=w zKm-kGQf*QAZ>#~Y&7Udjz41K)C*kXW;_nTrGiNOpQ=bKnsKK+keMX@DrufbM7ez72 zpbJ^dp1T0kI96b-WxqB9Z1@-07xC9}y){eVL?CM#%H%);z5~^r4NC#5M@D90dBg$aR}8w<-^T*4H^^l@g3jz`kUaH!x~v* zvH2%FLTIL_?v)BI<^Ft-{q~S-|K5gYPL299_6x?d5BP3f(3JCr3W5;TBzAw-HJ=~E z;%-&1ltWe01-Z~ZhJ}QJn2<0_J9Y%}KFILr1USS+K?mWmtFuzB)2SQ+U)YzXb!u#F zt$G~YEDFqz$bf6}El1`JK{W1pHqRtbftx2$-M`PxKX$fbc1`_rVDhLK+6sVxZEmKL zWNa6(PGe&OOum(JJcLi$sI)3`bF?Fz5GP8wMi}8l1pOy zhM}!%rxjrgRYveW3Y)3gzE0&d(4&3MHIv^L)2oS}NbCJe-*6K)3<_!=w;Fr_qDxCl zB+*3+m7W?E z2Hk8V614->Zy!xRcm`3@~$RW##1uz=_iF*$(p$Wg!c^I`BQly&_PbU>b@^ zcKBJAiI!|zEUn)NwNT;0BB%soeB12?L@W$EESeAK!7w*MI}>7#$I0v3ZK1NmZ0|}b zk$UFas6N3dSX1PAVivZJ*x4liquCZ^6D_Xcp7Q>EiO(m(c3AmHb#o44garMkGRwpx zUe@!hc;}NX^dnETp(;9XZe0uSLK^dNdv2u#RO8Ln7Cv~61V%LoW|frN|L2Y$jWrQ1 z5%+O1I9V-?P1{vSOrJ$Jo4U;B=~O-ePBIjj>m-9k*vfYM-Py)hKz2=1AvftJfazYW zWu*W+ehrWqdAWIMLG%BgvalH}|CRIb@UR-r5aH$H8iK3QWF_>A+1gcCY6W^^ELJO> zoHZMeyDXo1!TDQ91z$p0J;YOsjUvit;}86VAX!$)abnx+UBAQmlCbjLo(TBRk0gle zc<9^zm0f2aDo>N2MFdmaL^n4Xg;ToG`SvlV&lB)_=bsp7eManvb0d~G-SMhpkA;(| z2w8|@=l{nA$jD4)y1VAcxmK~`T(@@Iy+E-(Q!hqID2#{qb(BCAEc@UBEu{d&NPY#= z1*~*jxA=nxWJg1QgHyDAxgm)V_AY^qyyv1ND-w*dopkxhyK*ofM^Dda{jF$TSeZlz zGaxH~sFeawyg7dTQ*cgpBR8&-8Em)y~O+mfxJ4znuith4C88W+U%zYZZw?T7} zkliUMF|-;46#l>7V#6>cLI$=;V4lxLMr=S+i&mnRi!CHj@pcrV6Ew3jkLdjKUf?zh zWOKeUznxiw9IdocT=h7Zlw(qQ5TXG-Z9TpT?Rjd&^WdmOZA14{}!;}R}IUkBwK6K@j`);&Q$nlvtj z-&f>R0=v@Ro(HvEY|QdwiTw-4Y6$ueN@F_pb|AS%{YmiYs$( zW^QpU$TaFp0X?BDD)(0ZM)I0LFFU9iSEv#e`GWV7=!@OSo#ND$8aUudy{ygZkE)hP zDcF$-Qd%5yxtB)MEA#02{%WiDcTLc9vNUe@T@Z5>{OuMHUjyvNAY2nEaFbFc{On|= zqtfFZYWUuIPuWwhCW2GNd#TAY*=4*|Po3)g=o;#m?p;BLL7;Chz)a%)gFu(T!Jokh zWZZrGo8iJs(~2C9n4mtUyX5ycfOda;v@v%FgI$VhKnh26^h~TP=5F(8U8}@xb5Cf@ z7}1Io{fU#pfVWF+QedTIa@$7)mE;x6srQRCM*lcD^40Gb8cPp{Rq?nrXx=x8kp{pM zdizM7sqG)&8!l7!h$v}sNbuSHeI@3-C|k@+@>qbSQ-NINN@`s1QSyZ2WM9Mb-Pldx zt;zh_Ilq;)&&{AV9F41;X((2Zemexu=YdTZRIp8OxOO~}yyBONAZVawu}2_sy|zb9 zqn=#h+;m(Vfg2fD-Kyw=OoHl~FtI)U-hd%(o;UI+TU*i#w<2wDT7fWV9?nA3l5n~* zYMred9Vn_$xzKL49KDFUK`O0B_860jR2(w@jnz;uwCJkf-ZT7L!q!Ud&blnbYPjQ% zI`qL%juGUskpC8sMMAjbT!vQEg87Ao14~^YYoIRTBhr<&_BAPKas_5wEcRggRgshW z$ygXBm1YQ?XI)(#zNU|&W?gxaC>41t+9?xEAX0Vb;o!}M;uLVdw|PuS9UWJIs7{8cCOd->^c z$KMde@7oM9p7v1O8>{dx#ne)yxEL!et>qDn1K%nDxGVbXiPlrLxpiuukx2Yq!mAB3 zDPmuO-C)WlxB(EmtGjoFPRy>)lAY#TbeO?7w${cVJ0ddS=QtKW>Y!Z`SA}r^rf0Xu zcs;DsRoai(+LXNoo? z&pz-Shbv2b@yP#{h2m)NOHfc>e9<51SE<$eF4p~;_OS3|V|(CHQkRU{{`HHNBwEwp!w(Lv=%VM z{1fxS^Aaql7YKx6RK}!>`7&fTm~f%^+KYjD5#!@|E334fAApVjy7+O*tDf!@k_Fvp z1%;TZCeZxda-8`VbR6)2{LaH4Lr-p=d$*&X23!(Rn6lpCGE_Jqe8g`-=CuR9k}3w= zVd0fqhrM$cuzlJ#i%2o{JB6AK-u|bad#_e)ytN*d!iyji0Ep1-xH@yRU+ic{mAs+x z2%~winG=_mHbb{JZ{jo0@dIHLHppmu~WK+CSq~=a6H+= zFME?NE0QcGOeV2FWw?u=N@W0MPtGvTG4|*VRgti4XOL@x%GGJ%Zk?EU=RytO`)|#B zNY3z@#4Uh52%CPmaNmJiCRE2cK~9Eq8TaEjMSt?*+xVpLa9;lffZaQIC+^70%6=jJ zun@Z+c0=)?*+c}%ij~dtOAq-tg!hu)!#MteF1gplvRvYQfR2wuh3+Ha=R4502IpB_ zj7^~*#-eBS5I5`$Xt2Lvx*$HA!J--t~BPCV7E{nJ>n~ z!F)O2cZ|N6l`C;vz>zUmhXd1<_1Dxea*ECbn3!r?N$A1I6s=|(5N%$?uX;Z#>Blp# zGh~dwrAU~m9I49c3a{VQL z;5jle;3)&JT5Z&lwt3e}To16MF0H5i?{cIEV5Z}3NS}DLd^aY=ZJUh5%=!QwAtMa`4@#-)x5-MSTAWOD}Uu3?{2o{T#FWzm=RT^9rT{A=)Tj`Ma{POVAGzwX9U^Sc_CpU zL3LzB7W!2Dpw4Ued&Bsyk}l`k`Rd1I-yR}m3HDeh+&HgUupJiths3w*A&w)0=D{=U7?;F&Z5$6N3OR>8kbuzIepcd?E~dv$Kq%*Ho7Xmn)`Hv0`8A&|i0kUw zbaHd>nPEShp9)ACM-*p!v2hL}Cl4+H`~&1dpJa^$Jf5>%usI1DI?32O=ltS{5NbVr*m@*9rQIc z=)e>y1^~5(y~0t(m~vc;P8(VBHB(6RbM%8pO>bCzj-w<3&$O}1ezZ*zjCVa)0mNmH%1ptqUR&7|+-l}of$6MWB^!>V zqy1y@S1l;)=RG#D5?masagp3{7V2f_`o=ltLK=xLG)gihr_S%uaB1 zzId%GJqf&7`hhXqWOl7QF#5Ij16u?KB>=dnJymYRA|fKftS9jBVOQH7xrGy8Xs+jH zWa_^wAaeZOa_3xd>!Jj$h5<=4#SIP?{p2Io2VD)z!tR2axUSb=1-(5SE9^WrD@jyweon*y!ox#UTLc?5|c{*|DdZl8LrU5UO&DJ%$N z^GE$$3vQo?FayTaJ-<&YH3QsyE^42FVH`K`<_HXl4Y|ZpE{@U%%8$N;^~F!ac!77n zWHBETyeYXS=2<65Ddm;(upGXG1jx8Duwor|AJ2;+=Z{I}Gq2BvD*;V@ln%BzAwhT7 zbpPJ`O29(P#AlkNel9kiUq&kpTx_uoS;6ihZd>0GEe1^l zP9hZN&q~y@R~CFF+ZKnzPxhAhScUM^w!kbf4w^%t-F^ZoEWs}A1fC7g8}SQ?9y@%A zc_@9ww>Wizaq|z2`&In4ps%gjlOs9Al5`_~%7ox{zE)tmFrkQh`to2N|{9CiZ09K*zpY%`_aX#EoenZ;^A<}jCGC^sYUmDmdY8R@FpY@fT z>Y-9iYymR>Wv-1DaLDES(k}Dh?M_*_K&)#RX);!rA20}Z+!=pCt3yXez>k-`O?LN0 zTulHo~5%Suh8Q<7aYIep*z*Lzl}P$u_=xZZw+MPvG8;?gwKNtTa9%y`7(c(XKW zTNL9`(q~=t1hE9100*qNnOdeNXkzY-3sur83pD!f_eDJL0_AVrGv1Tv-@JBKntzUx zSF*`bXvC3>>7h#3q~jItd7y>lpl};7{1WTEcHX2j{b=o)c=O=NuxUm!w=y|8QWv=y zM4L7Myw&ulk@+1i;`z12xd${y#7beb3cG;+5Xi-|x}lRzebDF|EvPVo^el*DQZ*2R zd&`Gh8x>7jTF$nELAr;5p+ve<8e|ZW zFVfv1l0$b3igYPTr*xxq^X&2Wzn}Zn?V2~PHEZd$XJ0$c^Ef`oMHs8y`deRebnWL4 z!JjChVc5Y8-~fl?wr3kVBDGab^bgKKUVH)o^z3YrT{m&ugAoXcfM1Wk0p(r}A2D=b(I5qI+s&eSMiQ-^{T?bbJcm|2h!qoZy!k3JkWUqaYd z&j)|<%r6vu2#35{w+#CxH$A9lRNkmBN**S4xp)UrGrIq|bq8gi_x$z=O6Z4MWId#B z$bZ-%19b>g*D=v)E1{({Dp__t-m+r2Yq<{=C&bM&YlMH`c`q+7TY}czGr`J>M}!nS zLf@Ccc_Z7Er3FI_GWh4?P(G%+HMpX`Sbr}RP7zb>r2O0VtiI+;x|_u|*2{GeJY5a* zDLVuA>mC@!_f`)Lv)m8i;j17(laz>PEZuAlNEO_U$Mn)I`6py={*qv6-#S!^n3X`P z+OJR2GeBg*nkJAGegZTD`e&UA-Sc9Oh(pQ!R&$0Ukk7%999)x#3kQs|vZVvdU39l`ft~aDvMN7LZtb_WN!LzcU%7~%32=xCng>ewVfG=|K8}#f8HAK-?G&uBTh!m zxaDW<>1aaH@#G%_ipJ@@qar4GWxOhhMw7Oix?d{#l80(KDAgdXLK@fwqs0<2SUDt$ zWjBF%&fmpjl!Fy$gLFV?YO&UdO;C#;+Ns-(zBTF@xpwg+-^{v?=)$=%U?tB7(DmS~ zt9XtP(BdA2>c8Rj9#{*bZCp#US5557js%G%Gp*;Cifw>>j-q)nE|BvYkjt#3TVq#3 z;sC=ZVPnGvgn^fdgX}(A7F=YI`}~Zzox))Y(fEo*AP?bf7NiYkwBsOk^!>Tp7o3ww zDc%lnc*(w-(EWHj@<}5=)~e1TiFHji$nV_IIsxmllz75$B@i0VRqp1n*DUY;Fg1EX+Bfs6c7FVlI>0i4|3ej|u0kJbKUPYWF zmSFpUx3x$P736B=Oav2;is`Uwq;l{EN-iKfVf0KEd*B-7)5RDZkh!Ind?{tv zygyq&)9{H!J)wPB9Y>;BJD`J;`7g9PAU(O?)>_x2NJex^ZBt#dcyjc5R3P>yi%Vw~ z1sTUe9iK4PkcqM_l3P2@i^O^ua(~Oax}BE{9q88;v5#?`XV&Y(DZI=7QqsC?0zvSO zSz%nb#LdaLgEO+ypet-=Ih=lxs=d%qk8@ZbHx`?Q;=a=t_u7hSp?akUO@1rS1(r^x zud$>B1wo+KZ=7WjPppj?kOd~g7T^J_d;JL=2eBzB`sak!j@IVJ^+2v;*u95Hg z*0V}uN$oPeM(Air*OBs!B}N%FV^Y zkx2bP`h#0F=A)I{JM%dhP6Eg;B_C0~u+b?H~Kimcr%bb}Wo)u|*QXLGX z{ucKKmngUnLKa5L&~OCWO(M}X@bM0FBto>;wQHkN(x<$&czw+)$1^njR9exK>(=@2 z3sYbr@zMF-@`iYs4#U@V2h%X~#TAj}-O6stZJKkm4$5`eO>G}iO&VGw8H*ul*m6_) zmmOT~n=Qg#r|^CJTU;tRUyQkZbK@1ZIi+JDeNn`b|Ie+Wwb%+ST4-t_ASn;^ufaa@ z+_lUF>C>_7dTB+@GVd>TiJPw3HLnj1Kd9j1J$7Pnp6B49O|DTUHZmr<`E%|1RSr?_ zmgz5Po8}tVj6pQSMkdZD&m>{=j!V-hC!wa{R9sR#sRtRC(64Z|w1%wwh6yq5W3ya5 z%BzBf{C8x}EO637ztR=h4!1PCz4oq!T+uLCI(exn@3Vhd=XXZYE(+8@^Zar>%Bz#e z)8);+a<4F%A7`IRF}SZ?)Hs}O{?amebK+W4z%EDd?cxcJ{yp((h=H3vC$)-WO?3Pd zl`EtnmLCV_Y4FQo!!|^o(at#!X;71yxtYbcPK|-K24lUFhIrXC4e`|kgSG+_w79HZ zF9&UyJhnED09>s{UQEOq$)C0|3-j{6t;96{1Nu?YtOCFKfcnv_g6)#&^GmtwIvx4-`(5PIRN;HFVrMY|tRW(F>^p@UF>x z7-+l2gQybU!tG;|Jg@zEmiLPwb3-^vdk-&=q7j1$z;cr#1jp~r>wkUsl#UB4;QQdv zcAJc|9cj<9*&UZZ+O!4F$tNFx7kMwt&R4yRqjJwVFdn!-@{gZH|Jw36HUn$HcNg*; z)%yy>Qwpt>c0|j+L+M3$FqQLqa}S7f?(BC6djDvt84(umHhvymXE2^f>{sG$?mfRv zSBAN=5nHG?6U@T0vagmUVVuNcpy}h&f7GWVj6nUk5x52l-8t!NMMyGr{eMKNC&}Ni zq=O<>azIB5cQJ7T8Ib<4Kl%1rTju;q<>PrMHG5y*!CKLJNRus7m+-Q`PXLL#LMBRXl+t~t7LV({%0+4~fXC91v z&=AggnCMY+bEY_x@<9rLyGZffKc>PUt75{TO7@9*#~1z2J*j@CGd4b}iOH*=?OU;H zXggcn@dz3k)usdKvGw*o^Q3S5<$2V2T9rok{PYlnH>8HdWk(n`UewvUaSs=7y6)Wy z>~FtIdt>}_xz&BAPu$P%CTp-savd`6xpFq!Vx1vP9n;^w4D57|6!quG`}PN9-Fuc4 z(Z#GGk6G$J{zLm-&f>Ds_hkh^5J`W=4;;dWkGwyWfQ+)}xpD`(KmYywX{b)}$Fj|7 zaR*x;&f7!}3{0+ld!^^O6R)-yHiwV$_#fV*^xCsbHnv)`kR{5f>M&!(L6MwDyDUPn z%+IBjXx@fXx%9+sUw}=6TCd&x4{0a#VHtYEAiG1O8F1~vj;L3axH|foRjMv zPpqMl=R@iISL(l`?Yb|@9+EY4>1HG*9L1|_RjY~_tWoO1*cnRW?5l|I|ktWvrD zMy;L}QxuJwe-U>sk$rG*aKMpSaK$~cuk37u(M})s+r=Jx)NMERi2{MCKDZFD5@6#N zsi!ig0|$w65^pl`a?gIB#Bs(wxam;LW74VeOtkPeXDUBf{Amq?v7#ntWr-}*^E5hb zu4#Uc&hV$Kl;>%)u=E?f!^3i+&THxw`A4fCBVYZ&B-g`cRxRh!53)0FJOq=X1L&Ru z6P(A_yg?6Zh<+BLYxInou&jm5C2h%`Q9fy7Ts!tnnd7Ex@Ou^HPde_a)RL5C_JP9G z2b6`xQkZfWk(UjmO|K`rv^OvU@WYhcj$)py?z7T9iT$h8-4%xQOV}?rA@EgsNX`5p zJR?Vitv&ig^otp>fChUPUV%gk;gMyVG%2UWNp0W39Eq&b^iGqmh5ybMzdW_%dED9h zutQ11pyEL+w!OL0{S+J)-g%2pgf1VKOs}74FtLnUH8vj$_BC@e4jgcKJJM0VGVK&! z&(KM4a1EVUy2a1I^;sUq?+d!3cLOZ;^fhb$`TL7Shh3Z4Ymr3jEG)x4ZJ)JlsVXp2 zJcea1ca$+~VBT{c6ZqvyCJM`y02Wu<$8${Vi@!+hu?%W9i6AIG#rfQ8_?B8sHH?S> zE0GAk@3MxSk&>{#3}3y)=J;|J5HgVH?su|dn=60TR#oy;hW!PEs<#=Va>aM-P`51y zc0~B@|Iq@}NVLAzJfy!p3ys5Ey8CycqnnMk5)A zLWpn_oI)cNF-&%T^u<4YNXjcj-MJV-$6Jcgi?rBx8DjQ4N?rCmIo!&3^7AV*Z8uRn zvSIUc@tc}jpO&!lTEh1fVl`!b@}x82&ZwN3bA_JtbS}$f>8|+ck*^q@yPF%6sOW^h z_rQFR@m1`9FRn0l=2$!OeMMK>uIGB_zrPk$S5b*yu;vz0hY4uRTvo$wC%LNMgbyj? zzsu5su?R?aCQZiQ-@+}2cQ8lkCR3+rnHrbAZtAJ>>2A^uLP?&k#H+_#N4G;InB?FR zDz5(3S>>fY@lyn?-}se*yhn>l<2P!E_aS>(W&VXzS-F3h?mMMIOM#+nfI{N%nE@*5wn zPup(;oSo;KGBsZC-WNwl+c-?u79SYh$hx{D&OQ|;+DR()^QZ*Xx;S174ct3%=3BH7 zb)(h!y~FuzJ+BDuQFWn**_5D2(;m|oiB;#TBqQr}7a095WN0d`=wb0+q%!v_e>K`x6UZ&8P6259T+D@QiPbJ>2se0rFH%F&oNx2bl9Rm-7p*#iWIjaxL<8 zjF&!Gq@pqvk}3NDVp|*`cD62i%Z?UlruK+?^u1?39Fr8oTihxmDhZ?Wjo?_+W#EH) z99S%CavL{k2ccwc>vc@$lvtGpkr+>LVEC(RvU$c&KfJ$kcWxMY-x@6o4y!u$z=8lA zYJVb`d7VJc+_Q6v{B)BBtsZxLOrsCS0`jd#IFR&?H+lC8S3|qYGXQiv{9tOUKC1m(7xmIgT?2-Pgs;IS_|Ay3naGU#N zwej#xT7FzY;;=lxuheiSOx9ZqlT$FD*#3+U)$x<*QCDg>^ir>McK91AJB*4~mni0O zTIZ?2SIy*m^%iW_8f!Ha<<<3|tdBYfrUB^#z(X?D{ImY?OUecob>(@vg(-#!vW~4B zcJu%DW#oV++@13i3q&rJd#~I1W?KI`wrZo8Yl>j|5E3@Op=3- zEz(X%NVp2p*^3-%kHJ~QaecJ#{x1+8HuN`P2kV(}Ek=9zSz!NaE-4l|Mv@A+&g{+t z;PKV|M&%Ga+&zR*krCeHp!NPAk;_Z4w?#hJ1KUZT8&h_#=2*J6CK zO&zxcda7U_inYnQ-^4guO&NUcC9R91}4`!jSq1yMtp`-|NjP zU9N)>MsN`b$8%h*`$6B2D5TNbzpeDb)x&rU?EH7K zR3mqRCOVb0A{~f2EzM$!<*l92Ej;ZUehl4al0&>n@P9Jc%=&He+esYoPFLSOP>-2! z^TX@fT_!o<{e1CaZ8)71WKTGZV-sN)uyi}U9F5+7ZCWjxP3967PTpW8ncalfct_RI zWT;EIlZ~d}8^)r+Tuj#EWMg+7a$1?|MQ%hj^Eh5{K9 z7XM}J`^=&#hXXNTUts-=bRAObv`iei{&q_7(z`r^|H!#}xA`t~yH8Gc-gwQ zruLNP<5l?|k<47X_*UKi^){rtg8iaz!u0Zgr%_i|{#cghxA7SNNrN1@$N^6UEhd`i z?RFpcmr5b)W-w$W9g5p_VSAZyQto_PEmJ z{%1l*c@M9avX1HH=;X=vD|v&tz+0#65>~W5$%X?Gu}JHDN_;ur>g~Wr@S`t*lv^OH z*LM^MYngDUurybPXlHrmuOx3Km!q|y9GV=HxPe!s=5kn{y280Nao#U69ye>UXR<~o zbSv{@lTmP}<^6{bDH2c~aIE1#t%VUlpaSDgRrfLR`RiYik?@Xi$5Zo^O?O?y>dnV7 zh6$&ZX=SxHh|9Khyw&fpb#1zXOad0`32cIn7KeE5%N|#%we;Ij>b;nwy83wG<@)rB z`1$~1LFVfK!*g>?u|TEr%)-o2rF+IueA!Fu9K26C--d^%W=(GfpAlvmoAVr4{uCwe z{V|tL{pjyR`;G8q7D3|n6r~Y3odpaXrjiIydh9d-9nz_hq;w4mbDrxvASU+#${zDj z+WFf`FC8w^6~MPdq;fZx{L}zB+U^c1`^mc($J>$ao`57@?KCIxKqsU#Q;a%S9c5kU zTR~E}YBc;f$Q?>HW6I;x+4; zIDdxx;IIr5Q|y>HX(|f#>LCB|Qf;550zy=g+hxtC)f`W)wMU}8&+@V9T{b(|opfn( zhIjj`5*D$5T&>ZNHio%iu`BdN?{wk-XNAx2)d|JqAO#qBfjA$&x*q%u;b-LXm1;y4kH`rU1Yr@ayZ%u9tw+MAVN@tZ$b)j}M!xIBY_8P-{`)e%HEs0)W#^`N>QI!-lWX_&#h zS2pU?|7vGQKHt-?uHXK<@3xw#_r6uYLjK1PnJA+D&GoZ4?ag1lYcu>pa@^&z(SB+x z8%6U1FwRtEXag;@eW&i>IxGG-GynZHMBuSw7}pT6_nRFmQJyn13JN-Iwz@lLM`~bv-?ldrmZ-_l3t;U>(hM82kpY-jVk8> zaU)axUm(i;*L|EGPI}6Jf<8;VfvU8JKeT4EKRcW9v-rKloamnE`D4s&4POENRdYB! z%k~v&Aa>Oh%A>9iVNZj=#UHM7A2$3w;(M%+bPgHpbM@q)!_q#~m(wcm6U_iv50G?Q z`B+LCRmv|d-9}o9sW+FCE@qmj>P{<+HbW29jXtq2=m5PgH3dKA_4C^z5nN|f3{F;h zI%il|*fTozY~c4nL5I*Py-Z;hw=7@fei5gHljZI)2TNbrCoR)mz|D#8B#uuyou9Ee zDWygWg)%w$tOis1c{0(akiH0lRWy0^j zQMT0{VPeI|4+pWmxNh{eCm1l}N|)GHkypjGCkaczK?#jIqYDzl+4BAMy?9+83B;J6 zCj<#TIO{!c6rn`+bfr7^$nw=85)_{n0QF*p7$R>EUlQMA(s}F-i~E zsuh1W7~nZtG=>xphY$?T=`&Ra#w}1byVXQe<*44yU%u#{JtyR-2qD^Kpif%wp+O_AGu->Md1}-8vAf@at;V3 z{T7$I@p?`tRPp=Pc>lKT^~q zda~-cS_`--$o2h>evGdC_atvs`V@Pg(ZdqV_-3;+M?)wpX}~;%I_g#KP(bR zvwgRUyIrA8MPTjt$bOFldi*hzF7EbfUAEwUcL>b5Z@rOX>mViFy|2 zs5$|D$Ks#_Cy0#eC*lJk5tC<3NM8D19<^RPssKWisw6Ij$g{}wbaHyk8SOgs&L%cX ziBVpDX64A;iwK5ML0B!%_(aZ^)j*Vu!Q&cEDA>YsW?Uo&_i4o&LYeJug^}g(k0HlB z_X6iGa|Y7{E8Lt#js5az0(Z0uFeY{)AATpbT?{u$yfUM<{a)9kcxm+}W?*_MUXQhd z|5Ew5(J<(X(DstIDPM5Z%r4Lvh}T%-sp(X=1uOxn_YaWQqoN{c*}WIp-qz-1F1A1j z1I9hNpZ*G=|Afq7*Zb!|v5r~+^2UVqHRTrP&-mOp9-@9_9ciDU;u@bs!*ZG@l()cy z{jQZ$S*b}3JSacA;-7vl+IFLoXgS*dCY+bM6R1ZEd+vJMT=>2M=ooePuo?0`TA1!_ zrMjc#7omuftXm^OVq_Mj$ZjQYTG8eLCpf56jCsp+V@xNMbHPS7u{^k z=SY?J_MWhQhVPhdGeIYgQ7~4OT0j@6>|+)hjI-E zI568%%loois2uP%FZnLt^9U}0DbTv6h!fkWu5oH6K9$+}MdT-)Q73O%zq24bc_3ka zPRTPqd56#f=ygpwCO+{eVT~-Wm&Gq%>^~s{*rC0-mi?{qjgjX@7`&QYQ{kQ9I`EIc z@qdHnoV6l#6qB&N%C330Y+a*l#|m9v^7^GUda&1Gzv}CIA8p>bi?i|U;wvF<$`CLO z%CO9E3yl8kV-l*j^?Z6JOREFA9dDYzOnq)+-JIB+3uVI?(`>dD`bi~hmvDYv?Jy-! zBMf0id<@pPiiz#kCUh?&Bx7a4q9_i-w$T>$N)YfHE-aNDqo79`CP=|2xCs6emsD8^ zO6N(;eHQrXNyaByJUUA^cu+AuKcjqp{cAe585t9%zV&CTDe~guPp2Qxmxmo(+fsTu zb#Oir^r|uPw;jvawWbTy#$(JY)xaita@P>kG z5s0{GuvD4fKspFit!P&YfM3%_!fVq`{4&N#;LX+9iyw;o2{)O)x=etkBLwBE21W$P zE>zHvily^I=ItQ7JVx1<)1aX1!k$x7?1eK<;Fn&| z(&c(RWz~8#s#5S+wdigdQOu9toNYT0g7e%lygBYPhde!EF%6aF)-0y>!>C-atl9T;XN#>eEy)l55IXlJ zge~YbVQ92oHLt*hg!vMs8@~vr{uG{J3{BjDubImZQxeVshJZyr{=-W~_aqvG43-H( zHn{DR?Lb*zFY@|C*Z}wfW&S@w0)!70$FEOS0IX+pFYvacVQA~ym*S4JCs8qt=Fvca zkzY zlltR5xb1zgIj7C^W1`fKl6bOgoKoV|RrJAE&I+f7leM9=zVWx1n*q?5;3l&PV$hD(Y=z2Bc~!vT{3<5v)`jfjmu0* zw5W?gG_}c&V6jL7t+)g8YK;|`Ruf)zqC~`PH*#q~ceMJ=wJ*>XS+9U~J^94jBTonnuvN4?lI2UcJ{f5NQu$(b0i zXzGDXH5Yg4gvUk=T{o49=^QqM356Xf+)c64rFNvjNCVB^aUq>qpop)vTO~BQJ6sr2 z&H+Iin~=Q{A^#S<<5KjYeW@!>*z6t5VWt3Dm*wour{2C+*_pSA&#E~3-~BI*Qvc{o zziKeSWugIeVI{4|6FSlP>5p|Dwu7{E^qqHzegUmxW>NDO&O0t`Qm-J`DF^orFTJV` z8d(sK;RCW6ET{T~53UDSM+cVAu-^kFQC#y=d?`(`6e@a4`mQb|CwB2qmkqUu+zv%5 zL#?=lF1+V8gpS19pTpkV>pL$IaY|y%4$w;CO{tkH=zPMsQlc?0Xd3_2id_psDE7nO zFGi+z>aj!q`53>XNu)S_S`Sa}8T#fT1A;H^ZYG0{zH4 zrwSA<4!%J+D2q;s9d;7_$n~_ON7rZl2t9z_}%@!*kxL7;--7@wW%en=fzedS|}Z z0sbG9mYvUeUMfC+3zt|o1ND)Qr}fXXbI=Kk+!leIkP)29pH^O?WlsS^n6BMmyji^# zZgT*I1P*9eTH>p5MzI-=jKOi&%-6{0R6JT%`&7?eF`@Apx=?l~X^LVeJC&q3bYR4% z)V$Thvl+uX{}nlpexDJ@n=6fx9WPLUzlr_U-L0Ixm$~w5IW(nH4cMO)+u=7{@Z@uX zUXRrL+e^pt!HiTE?G#q6SF58RjSX!VisNCmyPTP%=6T_vmY5pW&Saa@UnP_fBrb_o z+Csx_k!2~e;r^5Ugi6MF7|b`N_(g*rL4*ExRjrO}EuV5Ohby_SL_E0|06AOvwmL7Z zE1p0-9S$fc+j|Iydz7~NC-+|UDuJ-W^-Rpn4t z*|i=yMb`-Aw#OOs^Ce}u}4vc0B>||5kGKN8))waoWc)<%IpSUoeL$rsTscc`d z1W&B%{B#^I_Ii`t_2DO(OTX`=hEc+usj8jCW5GrqL?wSLfavM>wl+r(YZ*YjM+}Pki3bgvTW1y6Mnq0@yv`m~{)}9Xw znucaC0wFG(*1IS`LuT4GV2^{N`_0=tO(1oeYDwhv z5Sm#7(5Hued~dG24<%WorRN}Hks;&8N;SnvpWod_ZbYaO-pv$@p98Gd#Quf@Q7gB| zOt$Po3%UlDVVD_&W>JVYfK#s53`a_vlq4em{K2q(2TM~)`71K}C4FZY#6A}R@3CUc zOhY&xs&E~tIWdlwg}mHqZ4kgK=XJGNa7E@ZZ!P$rYbI=Y}M zVdJSkw6;`|Gt0#(l~F*s7Xn&8^L@u@m`BNy(a=b8<)nTw1XGf2;M{-z(xtLKHwXGP zO+?te?#+QxOEM&SER=@vZ(&9*RJfyg`!%Sf!WKc&tw^NoBan+$H4CB;?>bSVSrxB0 zG!bA)AfgmNLxjq38}N)PIW&y{kKxOChWB36`byn|NjwM{?44x}eAIhKC|{f$>yMcD zoM&U>g;Hx&ihspcFPr2U=Sby91NQy6=PW*9 z*+9Gei9YwtHERgQ63e&{Trt&<$KWqoke#QQ{VqBd`JR@}t*s3g#v3mS*Y?LlxVB@u z^+hnpAsaDqhwA^sW}qO?8CbTdHmeE1SL@_x;OW@x_a_XVyKt?_W&iFyHoFkQ61*l{ zAN2yQXMb@^rip>R0^er?%O0R=$NBtJ!=0|XzB+utP~zn3MF#ecUpma2!;)U>Leg+> zwx%k^Tq%w#=?6?C))jE$aMDyJqL(p}x!8CT78RA$Xe-w&)W$ zmP%$tqaO$&1wBoxheVCr$G*aVStYw!`tPF{ir!`o_jHnhju3)8uqkO_7zO+5okukHMu0_Zl}bP$~? zpkiE@;ieqGqp%Lgu~P$6sgA|6%wz)?2ZW&>0WXsL(@#R#b-g+h_em7%^ZyVrutI+l znut2q#E?S4Z9+^p47+1Y*|V4$2?J84A3+KkncMXSg`7SGz;p3apzP<^c7z9Cg1?@q zcZ8MpH!_GEWus-EXnt{$t@kQ5RAanjng6~SBXKpMd@kdT5%Hg--_M2~u(Q={JM3b$ zm(ot4?De~ZCuL(HWoC{rB(<2%dDG8?R#4ZLHBnX`;GotW+Zy^6F3biFry@l&`Qf1A zgrRF1jV`4a*C`8Bw5WeIGt1j8%-;L)>^SecuIcgO7t)^%6?v@}b{kIGUT9=jU3mw1 zb2x#Vu8xVBxevs(tqcl|lpc%Q9lizoSY-s2aKF(3I0#cLf{H8y?4Kpq%k5yL&+U9{ z0x;g{OJBBCyT#%vu7wjR|HV?u$gxE*y57I3_RYwu5ixkQN%KiuUPO?rabpS6$cOYW z4+^OglcAKuTRh8?qiZOn7Bg4jEDis4*OU!<7$fqgL*+HpDkxKOUavp{r&yBy|Iq@x zXFpty@F>?Ve<}m6r#)aCe~e#nKlH^kYa6Q9f1qvz<(R4>nyv59$tcZyu;Y(3Lh@)q zailhL|jl3BJMNh4J9NRRYs+2?Us{#`P1C?(- zu6UuVm%64v0NXx&kGH&2a(h4#1|0B8R5DVJk_+w826+ZJ+P=h($cq5(y$R%))sX*F z|EIN1c!3DgyG1snpZ+P$-3~`-35Xz3IwTVRxg9*)pWCe}1A~ai1 zY=5KH_U8&Hhl<{32XdQqK%-v7rfGhP#X*yPExmfXlC*X%pd5zH;7*o;&w#_%tfkK( zz&nGfK)0@sWQqRkQIT}>URUgSP;|md%RzwYS_T&=*u7B%W$5z)h_-+-C1jqG@?q5I zWdB{G+Jprx?=WV_2TYwl8Oy~y56Tj-wuXMV{FS)Bp8Q`aZpM3r7k9btY%|X%@}80j zTEeqA-afhk)3TMmH&=sqd^V?8^BRF%$mPcV&JGHU72G*f?cZ2kXrjkzPC^NdS8yIz zod%m;z>dy3kr@u#d9>`MgHrQuJ%dR;5BlqG5kcX=EDze1h5gKNl!}OeLiNlo%PDzz zc>$}zN9bD2E-Uc}Qgkj4jW!(7T_Hp7mhL<76YZFbvedjXv{F0XgmT53m11 zl=R@ylD}4CG4HwtoH#lf7YdbQ`eJgr)=TIsJ!e&3Iw!JN@lO-}`$&_#fn!||Qap&0 zqxxTcw3f(7nuIq^1=MJ^V5JUIkrFa&n<^q<`cgxOn}IjALW#egDf{UW#PYHfi(&iV zRTyzc=D?{G9qp@iRu%F^<%gx5w|Hwy&w%BOp0ne$re^1APYjSQ3>kJRUVgS;6mGu- zm2?8wvH1V$Y#V4yc=MaKyq9ia?^u}>XQE|^19Uw?Tk;{$x+(E>#ig=_y>TDk%752X z6Py=OH0%J~>AU(|ElnsE(1fTeKZFb41N_NOBuoZgs{g8BXRZZ=b^_bztZNUB8NNg) zhbAvL9+go{d5PN^7^E3_{|;G;%+x3>k83PKspX8Xg>x-9sON|G9%!%}z6 z!SrygZ^oEqUPyeegLjtZ7+ay70YOsWp@N%AL;c}UzL*4QK3P5wl`4<)=7YeKY=li_ zWjTV}B}KS4?6h-Z){!yr%nZk6;`4bowcGXagv!&WUwHPz=>S>^+%2yCN+n_FMIKBn zVQJ%0-eyHc(g*oN3iQQWC@47&oZdS}2sqr8T2~+HzW6r@+q4&fS|SDbZIqVZTRpb*8O_r|d63i#(kjmyB zqvFq;eO`xH&t2ayy|rBAz$w5CaBXSE_>a5z9_1oJQuc{9B$VAR7eV&@VGe;%*W8vV z4?kXMUGCTNvBdpD{ik7>BdcFX@jHZVOjnJH8qEOVoie@EJ|5wY%1^jb6#EbW(?TeUUs><3q2D}a8jjYBVi$QTNUvNgBeSGe+ zz-FAu@f}JAvb9TF0ZU21W9!#0d={J-D8*cIj&^7C&YD3av|lZ!fZ_T`HZNL-lHGoL zkPrkK_D0EtY(SH4dRMX_IH_0xCTHY73Qq2N*_All_uQ7tVIM&%y)V z{^ipJb{s>8!SJYVG>Wu4;MU_5?1|sS(5H!^!Gz=6(?HU(;`qGJ$M9uMxEI142M#V* z_!P!W+)k}k-Rm1c|F+Pbe>_I&$}Q!6uh;%|cBr5%5L&vxe1)yr7%kgk1qyDf;K(Y% zd;`=M-T&j~&9wf0zN7T?)yGmq+cX!c-ue+ytf!j11nZ>7Pkn-pX{tY)ob825to5y;YmYAI0ns%7&Lj% z8Gv4P&;!(0H|;*RF8=VEL>e7N3`jBYT3m&)lI-5Ni0hjk?nR+b9acrvw;L|hT9sxs zLn_QK$L@A;tCiY}cQqp`jUN07#}XZz;3L%Ad!Qlx#91~V+hIO~B0~e$7=203q0(&f z5cuD9Sss1+D&;~1o=lOZrg1pV2ezAtpyF{WK>7wTH4C>><701%VotP_2)7s#sSkV% zYFO~|=JFRHD4s|Whl82f$4+4rNfw zAvL>d%1@}pQpX6#tF30mGzK9A%Ewlz+A{5IJu-V8p9ZAC(Et>N?&_dk@(6Uq`+cY( zpI~47B`!&8nm?seO`_? z*k<%l+I%NiWWzy4QH!dJIL+$Y`)!VuBnK7wf9pC|gq9f_^X3r<(fq5-wi&c~S7p2& zRnj&xazJhks?My=2kF2as})rolh|e5hqBnJSVhMo-)hH-BkHxthw_1c*oTW{_H7T} zGpzvTWAPa|+`=;qW}fj|>wT$?PpDCu8;$+8XU}wDWb&JHx^nVz6ayS=AIvHy{r1a0 zwAdpiPMWs2nv?3@I#!kHYeHeZ`+1>9S^MYNl3oJ%_)L6)si{{ZhlBAe!)@!W>t1^@ z^a~@Wt=39e%*Kvil)flGR(9>*(G;=j;NgV$c8QL7=AW(T`;R9zk)?9Igi_i~Jhh}B zbh4y`blcI3bhYdK^pGGf_&|syR+x_iueZBp+GeG$(bgk3934w)C95K<^yT?RT(gFz|I3j7G;w^vu7$!!PJQ>`|#l&MYDtemRjPkVOtYiX^Tp=i#m; zwqFtnXmMh%5lD4a4pVfsdubh@<K$wwzxw z$@Nf^XE+kieX2@;FgY%9s@(G&^TR@%K}izFvgLaYOO93E8a;M*bR9?v=#8ANqdh;L z!`V?M_hK|1NtoyU#em9Y`2we0-$gG&>1G@YlnLR`oCXCU`VSX0$8cUOZL$*gFWn zG_iVL9o_je$U>o;y`j+jm9-UB~(95p}5pK1n>r-t6%i-m<_cPA}LE~IK}%V z{tJ(sLP@!VM|xNz3``B1m>t=Ze2^4^h6u^a#P(Y(ai4Sbrk!->klX`Y@WVT6jz2;V z!S^QY)+aEpy>CemH00!NZ+g#W79_m7spi)2Xcn=Uct0lPY5KNqrUONP`QOKnh2dkf z^@~hbJd#n#ld~|W3)^_vf~f$fqf+nA_u#lRV%J=zJfeJ4%lAcd-h2<3nQJtg>arp! zixSefXiW!tTM0Oq6DV_L%bi7)wXHmfhic0!RUwufHFDxI)cKq>mh67wQJ>t9=TQ^v zP@g-xij~aLJ`lKZn=S`3wa9_2bdc)qYLY)ipR(=X^Fr^&qySU$?81ANbMTfIoPzLF zR~1SX*8zQfb)?amTId}~VlIpdEMtQ0hLz;e<();;hs}C=N;5?zOQKVSWM-+QLV?G3HIYJuuBrN=` zWTRv9@J1zmRIci`AG@2>Shrp7mM%l`(d-jbq^BahU-QFDrv>XKh{(;5D)^h^P4|n% zhG#j=!*SmRz4IF_115U!sZ3X^a|{sisrUaH+$i`fev;`m`#$<*Af1m-)8}&mHjWDP zeCKew%Yy!`GoPW~B&SPH2gzk3IXYjq(d;+PIxMKC0m&WRPlE?NZ{HE}9?db3{|iY& z4+k_In+XDZ-%?!>oms*_wgpNYyaS2 zh!1q(A>NDY6mv_v=6;ydZDE8Zx2~k1XR2|w@LU|qAt8w@aS&Qpqvn%2A_%5Ui7O8? zK3gDSMriE*gN6h5OBt%NL6~*7L%!~aFn2Y(dD3#_LamKGv8VY%o`w^h0ynyqZ;dIt zcJ;?ShKS}teap$J-)d9k>HSELu8vTRx%YY|SLt>|7i+gnxqfmsL@4u zA}K38GX_)g{|i|9Jo5)Qj~{~@s_08x`Bhy^Qj|$o*_VF_wmNsb<%>k%FyJWt`yUNI z=4OqxSHGNa(LO17{tuinE;r|&sq+{gfW7J8#|EUWgSUZ(3J-g$EGA{00uHZz$ao|3gbnmc>z$o0xd^=vQ*L*Z{Iq$r z2YMW$No^e76@T*wBa7(B`oTdnt+h#1RuLoS*w-l2Q6dapuiCv0Xew9#SHx|?&mra-}v zGTY%?x5N1y9@shzo_KK5>4%z!kx%BaQtIEduW{H>5s3K#ZHz(K?s^>Mzz$p)G4Ag% zq@T}Z6^3EepzW`HuSSDFYa$NYKPl9PpAb;l2$Sn{Q!@t!^~RG|VG~ty&;*Z6fkXs~ zSr|exW5y1V8SzA?Hn-ni^L_|N1VYP79>i$wZaE{@c)^xIe#cv(2(D*-I9c_d#1>ng z^ns$@B1*hI8Tq`3#n^*<$$HRcU#D5){}Q3)IzquY=Hk0ql%{xOYrMT(`_pISRU@m`tupXgVe5mgHl(X zIvBekySZ`K6G4!>zMb2wY-klYji|(;UB&W_6iC3+{%g{*$I-B{o?!TPT*l#p;Ssxz zcr*q(yp-(IAp9;#y@37hI=ftIRoWwbnvIxRvQjdXzYJ%0T=n1wwXo{70WNs=pp2vK zPTH>Tnz$hU`t_};!kfpr&jStvG-2~kz{HRqr&9ViF~ZRk!;xDgnkr<&JnHGza}tkn zBjIvh(n>*??(;DUJq7;%n-b6#Faq_2@p{f?p5W?UKXpJ)OC7Gu2~YXkB%eK%cmns3 zWOI612wAQ7H_UKi!x2e_1Pd-EyqlG27n|sZ)-|lukSLF);ihouix9x+;9U|0cw)pI z;9y3et*m0zT}{WH9+27I(r;u{1)o3&EAKX!lv*|y|LV`JBN<I?= zF~o@&bf5}9w6@}?QBAGte{?3fz5rnm9!2H%yWvIzR|zbS^XPt`#xNo)AB?!(=3Y17 zYjMu4t^IXsf-Aq`fnG2b$}!~8L$HnA$_V@M9U1vNl=0--gIKo_X_M?c+@n{R^_p#K zRNpkR_d0_(g#K^2ycblDek;~mp2qfTJbQ+j#A9q%kJ5SOcKLbE+mx(^ziBAT^BBsCaNdhm>rP?WXE|}!Y{h7AfQ2-kGHWoR zu8aNB+;azMp^{b2AD(dSb_I@Q-?)3$Y1kA~5&hk6xMW|+YCW?Dd5o3*8k&!3ubu=b z|I!^abF9tdU(VIHf^$*}+B98fN9F@of64%O`qqR}Dd=_<;{V6jS3p&{b?q9Y(jd~H zNP|ju2-4jk(y&2NM3C-Ik?xY-w3{yJMx`4;K139)x$VwcfdA zKJ%IEyF;Fr`0DEF|D(UaFB*FlM8bhq-h3sfvei&|?JdN3v8a_c67HI%Z)QJZWd6JV zSzO?33>;I_(sp)_0G65W+v$%v@-*Rw0tJC*mhHTuwcphw>P@Jy9=m=pbAWeKa82o=obZtz%G<1-uvN} zBaD6tHlRTfB%Ww#d$qbBlShzubV5*_EW`YaY#XPYEb?0ds>z{jS=Dx>^&c~lS$lZE zsea-;_7w67z62vR(5azfOKik2NY(y6Qb;KYIO2uGV2iIk-63wsKhNu*!S5nSM{**f z4ntvlOA*O7b?8Ha$7B&iLL}H1^O8hqlW(8mvkN%~5mYj5Mz3v>WWp7DnHB0;`J)k< zfw{LoN3i^MbfoPtN8w7!Ml0WpMR~ihYVCSZ70f zJ~lB+pgSb|P@O?boAU7ekKC}~nL9=LW10vW7VOv;x>o3mBZP}_d-3j5$85f_lld_P z4TC(1#IoppJWk=FPWHrQb=`P9RY5fTojuLO_|t$qARIK$pm<)bTK@bhh!5CeHp6Jf z6NE&8LnKKkYItoBQPG_g^RbA1YE%`T?G;zMyyjb*+D4lYJv>{-6}C>A_b>H=ZqW7D z4G@&N-SzSDX&L%5PaZk6GWe{4Iy46H+}VJDT|`&0(2V6vrv%f|unnmUb3ym!xpIVW zs@C>fY42>pA~CnPF)g1sRHaFsvqWu~-R=!lo8fBG?PZNq_ynz8Zvx@+i^SZLKzZf& zDot$cqk}v-{ie;Rb!1^3mdz_V`*woNp^LHh1}KFr9?nnLHEK80kJ(8_O9pyXklpc^ z6D?%T#?%o@rG>W%SG8N?h^rm>T^3E;c6TL-?0;{c8OW5tH%c12TeqgMve-KD6@+Kx0y;o!2 zTsPZkX2?$KUZpOBJcGIOQ}wbXq=G^h1G~rX@m}?4*6)&7#??_6byy$uKQtv|woIvD zRdrB~ppn{S9vds!ooi)eGUjB@eRVEWsmP$0=-;>N97Jf!h@F`wvPo)7Zq?!ZK{X_l z$FvHJeORxk8;de6@M6zP^>^O>efRr!L)f&ZyuAD>bDhK?I?#_oo`%!S}(o;cGU&%BqJ%3t=JmTu=@B&_U(Gy^=q1)eXozthl}gB!P>7Q?;u*ikj8) z(hdI+4IOg6CCkVmzjyKS4bwth>KL7i#NZw)<#Dl(PT_B298@;YTc`@#8Oi;<94H_c72d9y=v|mMvK8jhL)r7A&**oL95bS{W4| z9|#DCf#_bO`;BLPP&y>BzQw#ngrc4plz)vZQ5LdQy55<>L!fMGy*ztKW!>G9uS>z3d-jDMm)AN7K{6 ziR`vY!q!$ok@UYc-+caX&UIUm3_vfnn%rtnb&J$E;1h=)h^Ztgo~e`GExH-QTZ zRf=i%#UvKkMf5AdLR3DetQkPJHtPav+5xPf5Kap#tdY zdfrL%J@2I?%eR_b1CPP#3QgVW16yWft~f2?8pDy<-8hmK;Vc_OVnOtvyOHepi6xvyVi?3t-?Cu8-& zIQNafTrO$7bA#eGa^lLF#b#KZ`aCB?g*2_|N4C)FcRrk`RjH#bdk?9YI*m-Z?~L)L zyM<7$G-!s z1z8+T`(W*7c9h$!!&bV#%Yo%BiTpVirbPw*fzs8?g@5crMiG>u_#Jz-pMDhvD47XS zL4z*}29Z}*Y7UX($S#jr?=BCZhU-V7mvVj@T~xz?s(SCn?P0j@$BBelpf2IsQ?m~! z80Z*OjTyvrv681$P~Z$zY-0Fi(^sb#pqu+fk=1?a$rrOKuuhdGtUhzRykuGLLxKik zkub3@Y>#c4%{u8#%XF(_msEeMlue;pDwnaLzgUisds<6f%JE93@fJ700MqI zg^Cgw6aqxJxVRuCg$j6!gF&%1Yy5$j-A2M^A}*7%sFDu#mc@$|aG8quWp8~H*Flw- z3{TALzj!NH)-x!&Ja&FSzpgN*WXhs;+1T9oK;`JO8c>by>NY)j7bem-&dc&I7#HFN z*30N!lZDPr!&PDMibr0l$bCRH@#HMXM^<&Na>|T_nNv+aP#``Q7SajI2)RXI`x4d; zh#i>9EC%=vCbmTgR;`{zZZJoCzfji=rD?wKIXHNmBH)Y%(VPcQ@s?I9Mlxou4fka) zK-!{(1*w4v^o&Y^M0YLCYX>|YBrYt9uoxX%@eM%ZJou3V|DQ|rQxu%hZ_9E(EE@=@ zDKVhI^ALAEfK3{{eak^YN~)}^?7g^X{LL*v9joTm2U-NB0#+f#8F(i-8MPQN4aRc}hUtLf=HFht_A3TUo#9$1 z`%i*`!^NA08eexm2u$$J;kqA_&BaVO%CE5$Xxq%1SkN!BIa|*?*nGfFGGlh1-+kwNWABgamGMF9Cj1HU!v>IX+Ls(c42HiUn&Gdq~T6gQP) zxsoCyuK||7GSJZmXdkok^U?hM{U@5-`SRkdDJapCffY9S<+*SM&lY_(0d*a^GjM2EwZbq*25n5tkKxX9Ni}K5nU-I&wy* z!KLIbWOsZ*OBf)#!+UG)ULiY-YBwvij?6^f=@>}EVZeV9GeKUSAdzvneYh@lFtazr zJd%<^2J~#FiF3*P2j6hZIOD^$QHcf#$u6o#90YddtYDfb6B`@b`?OJfZFejYC+mVq z+Vt31|Lz}K6U4Fp*=J@{lTrQ_GPgEvuo-(bfnCxYPrk(N%aSsMF|KFqoqqU$$NaJZ zX?y4jnO}qDbKSCR|Cl>$Joor#=#Niw?MUj`6&2rRWW8nV+?1{^d1^d854Pv$P4X!~ zmk2Gsx;)ZRoR{C)+=pTN>-@AZ6j(?`O0=Y3Skd>- zM$u~Gfn{}dK6AA#(kNzg&_3Mer|Dbu|MzACxRT)<)6d#@wmye zbq-?38x&H#8~(km)=-6=cZZ>rk7yM0ffObqr@L~%-sQdIH_Cc3zF7@q)K1`G%KG|| zWjkmY<)33%3{ti3sNb0Zy;;fW0N@JKxq605?gvP*Pxx&*wtXSR^;((W1EjcW620E} zgSko6hAVR1iqB?G`Mtx%0<=UAkDHH=3+O%m+~g17m+F61Y>$5Awx04kTpwS8*vhi( z`{Gz88k{RD8*Z2~uuCy0FsM#yjmKuiw8r>wu(9E>va;p|FfG%IGqcN_AFc=f_+j!f zH#aG%ET-y0K_00=9;n&rcFr_tDNgBLn*J7x{fR-sS8m%wD%58jH(~jm>8_7Il+^X+ zEvdc!o8>JAvNM|e-R2f2Cue?fF(+HBQEXzd#lwXskis#|A;^OsxFHDX?SX7b>dV%< z167`!xUXpp*wnV`?v_W>-3|LkZwGl%LYAyeT(uiKp90D&v3^?9y7m?VAa5aoHP?fY z9e4D(!Tn5^?ZzNLYv7Ih_cn`x+w4(een%T9Mq}{lf&m)jK=^~ahH!S0lx8_#7=L*AN`49x4T6ktibo|NU%LH~NA zfmW@W%s!0wE&Xl?6OtZM5^jc)yz7bKb4vTL?!Q8I@&JUr!Y@C$W1QwD+lan?{geVs zMR4Ef;g4KhUj7c)Gdci{1+A$RoRT#7thA3sO^_*X;YM?k&$prACq$3e3psf;>}I?Y z(rSigMaD<)%JYIc%k!8&Jy_cN?+1NK2dg{Rc3xAUSUdo8GmOrUHfumnDo_L1ikPNn zn0M~530GT9`oQ7LkrB%ggA6qQauf_m4WQ!P)=w4p|5PUq-$0KPS-{^(Q(BI@>U$zN zGg9EKsLsBDbMwyV0bQEN$V550ivsucX+kgS1S*#em9znp(vV-nc}svam0LTm7y+>^ zCaj&jI|2bp(DkT2S3X%u zLnEN3h6lj+@WJ?v{fQIT;az~24X2J2_$B~#KxP0l08lyN?(Twk`1p2xEY+WS9c(+l zu|K5BEPQtzm)}ZJ=McyL3I>^|GAea=Tbo)4zHV2VA}A&-<#6(4BW8F z^%nTZWTjd1GkS}F>J#o^FxktuJR(&H%pwwzkO-K*eFo5iAJ4z2oN@$^vws;ot@rY5Yr8}cUi3K|DNY6h0Blin;p*vH52bex7X=zqp z0RYvm0d(HUm?wa(o&xk9P?R+6$y}TcdD?I$^!Ccw2dO=O-frVtTwE;v?3p+5(4c6J z2)TgM>*OKw*DW`RI4uaE?pvt0Z{=7pyt1B33-t&jMp}DgnZUGMlQS|llb}i)rgYOd z=IUMhIfCB)K~u*K@Q%EL@#z*7J$!hj6#sa{p{e?zH%HnJp;otQBhIXx^Y1dDA_PXDEB=C2zHC!_5JTBDI9<4TcfHt z8~XH6iVCV&)vn_3nk!K$s@nfkvGMkTsvM^|g5Uk{pT66ADVNCBVWKw`{%tqI7mLla zUi4#~m9T-*ODjI=#UlsO^aWbdx7-l$Qp*_@4j|y-5#Vv~?fR%V{t@ zBs^ZFg|y^>N;j-&=hj0Mtb>9Yg1;JCIE;Pf^ej_C}(K?X_HpQAlYhD5JjlK`9|gWUdT^vN^rB+#6mNbu6=7UB%tmG z;;z<9#DE(?krYyQs0re)rUSf(Sb`(^c^Nf-;xAdqf4bOXH~uE{^A|XV&*fbdmPn~A z;^2i}`Dmv&x&jJ}foHN-F~$9HKdP<@RP#tCj{)V=_qz2G_Q02bt8^~Wjt{V6MBB+o z_th253=T0*#Dx!)ZT$p<$39?mLZ12!Fq%YwsAzVAyONdnJ}jZ4ih&f=|IE^YHTB)^ z4d*FxkMjtfW1^#f2A=E=@B$Y~J%2M*R8;gEs#pe9A4V{q8y^x-M5UxHlT&Gq)gu;Cm=9sdI54am2#qeuKXnI z>-B6)F-!A~)7JGE1N~Yy%VzymJUK+Le)#Ra+g(#1`a1f?jT7@ zv_7Bi6kKt33Z_4~bi-+IKkuaU9*jJdgaNG+m?NW+@NM^x;cFs;|Z zi4#XmY~rO4QTEcXmag>)M?D4Jx3`L<&6>Xt@ar1S7+cSekJtK2uZ!K+7H&oiC{jVP z{(f&ggl9S%VA0SEoad5!$C#dU|QRFBye| zJmO0>Fw|{SR~;Sd%TX*q$Mb-pietcMzCOH3bfA7 zL#pU864T>M*NNZ`3?qtru9~s3esk#Wk0z~Pb5?jydRj}}AJ~7aM)H^-2_mp+zl7R^1ReagBZsbc zG2zdmqQ;z9wyV#}v(fKV{GhGA*wGued)dM6nKy%Zw{pvFW}VFUH_ytev(O;(x~``2 zWwYN^)@TERjuF4el8=>=__mf`wluw5wl}7>7%vtxA4(%k4?y8iS>zK6kFZ>+>)Sq> zCms@-ZqE}d%7PqGNFxC~oeaX4cN#66`y&)l(_M+PuPEKx`xRZMPe9{_*;=8B%2YU~ zcw%hti!>`{@|J#eoBjD{KZjHrcIL#>-fy=A4nhVZhPL|RqOH+Y#tcj)=%+zDu9-!9 zMMRNqH9}C0K~1}BSz)Zv>16siFu$ z*2)s*({ejY;r05eF`{`)?imA%jt`jPW$T$bfWqBB@%A#}g#pg%dxbBj1<7X!i7LmK zp?zdn;sSqs^Mw|t&R<^+_a`5CVU5*ekXI=B#im3wdYt}|vk1Np2+mF;e*q{OyMt`K z!#d2OWJ+h=8GGHW0=o)oR|%3jH^`YC2+`_kqSWa06BkOWL&|e8r zi_@-;*np{KXkc(p0OZ={9^6b$&6F+g#Tbuhm-{7?YgOW26_yAh_!b(SJ;P$P&d=!p zBFr6VUVuz^Q#13dlhoD#MDE_4&juu}-=UXuK=DT6eWHIi4#d-(?k4)EqtBZ*>k?G% z@_H~)sj_j-28{ZCP4Vpj1b2pDa!5US>}0Vdh4V+;M91tf6E2!{-->`j7EE1!Bg`@2 zGJPe(==gA3peDq#QaMfmxGhpI$Ln(YBf`VNIM}}0^v?kP`SCC+gk}3)S*~r$6pVm^!OR`S>R`(>+u$O2rnHSYD|X} zbBD<1sTcga?fBt8O|7-!P+s4gvT>pr@yCWDx6k6rQr9{V20Tz3l)|n5i0$=DFM=wY zb0Ml3xgS3!+@zCBxM9Qp&br^_$1^p7hLDPAonH)WI#m@FY{2m)*4Ea}FDPI|6;%$a z8l%T#U}VtStOAo3sC9IzfPtMvQ$OEffaUJB(8k>>BV{+x##(^p<+xAzfC-lcw8Q%l zF1z(pap&b$%@Wq?cv$4OczZMAoAu>0H~aRTdPZYg672M{ zQX@p|pDPOX>h=M;rSZ$#o>%p}Qo(o}?CkdOpS6dE54|3Z@sRVr`B=bEXVh`W4NQhv zX_NTeRlH{f=1H}K#)g})+6uT=QOb`aXV{B6uC$j|BdUstvi)Z!n<`@(4zn=ZdBsyO zdCNedK~YxL@}8ERT{&+;h*2ym3^A(J>vgEdKcsk@y=Wx6RdZuJGY>18Ij`W}|a zCvy$N++RS9y>K;x!5(8Dg4t$qJx$idta`n+C3i`yJ%J}K&W(M!cvEM?S2VV`p`d+InzL-QQUg`~|pyXb>6vz`RcG)$eW31ddlKm2HtPBziD3 z#*VCtny4#joWH19S-~qWU-b#q6sHFYmOrtruB+p_02CAwg)6J8$uB+@6ck9jXImK~ z!QNa<0bS^u>aU~|`_2lU0A*vtstdYYoIoU`6Z>s$rD*pk4NCu14o6=v=zmKz{3kX( zp@$aqG9e`7PNhTI^dz8e;hc z)Hl^_ebGKei-_IEA)6UYzI|TRTiPVL#W|iJ4ZAoj4I<-WE~6v6Q5g-0F2qL&{_A3~ zgQYmqx`(p>-@_7mIo9F?k)^o#dJROU@8*Itz9>D?bLV`Bf*~ShQCRB#Yt+dR_^=`5 zsDimd2RKF%{DVbH?_xib^!DPA-9z^C+t`nxQ9CBiDAg@pVCF2w^*jgkz@-vZ z-v$Xxzm}s{!%CG#9X6DP0kDm2&&cjfvHIuQUYlkq-btfHn9(0COT{lH&E6puY+0jv zf6&wy|BA1e!0&pwbK9r2b!t~_x>_oK3tvc(`M=)5-$*_Qjb7~f4ajFWwxRrZ>B6vP z{|1NCg`@TCNL)$4Gf;Gwj95+*hUjm1>)1|xlfSRqGiac*=#khg1qzG}LYnQyD!%b*?0yZhCF|g|Z zNVs7%m|H_FEK87;PxGKf;~C0_8Ed1*8oC zIU+Pvt{-QAbx0CG#pnW6=O3000bz&a6wTmPt)^aZ@9F*kMm;Dvwqo;{S68wCW@qJ- zowk*<1bhXNwcp36WT|U3vSn?ip`O*A7q`C$lHtAHKUndTdlh{5@l!U0V0aGQLsU%B z)|~hxJc7+vc!ispA6@^wXd*~ckAS?yw?!j8$`g>*ba(afn$uQPQcw35^~!_qe{!gT zznd5TRll9JF~B`=Qw=l}nN;ac7oV-BEAO4H=bvn_+Xp*{YwLfiBoTkQZ1ZCWP^;3+ zrAdxP3O`(33{4$gJN8Z~W5IZtkgW#ZyAT)rnY(2bDI7R_Oc|DTA ziY9zw;CJ}TIBnfLT4|G>br#&_zt`?d7`WDYXo`66Q}!O|9&V!FaxoKprtXe%$9txz zRATw+(kyrQ_xx37mGDL8g+@nHEOL``;4$mt~EQF{r zwGTdiI8`VKZ-uR3K#vGJoPh-c#|6}@@e}5qVV^J}!mSaZok=7ym+Sfy zrx*JJW2xM9y6&8dz=J#`6X)+JnoOc3!)Ur)tEr2uw|f5h*l*jHrQlRSoYH~7g4_B{ z@2L&QOYM04K62z+;YA;HZ_Nw*>z4x|0reCtrw?8~V(5Mq+A89F;a^8DTw_;4`|;9| zxUlJJPLA$;DYATXMD-o6Im@cdX;=TqLP7#eU%7u}-o>(LC~wA{+lYMG=*3YyKQPQw zCbA71Y>!^P{%o_WGRLamdd}mo+nyQdXf9n-EWEvvtehUb(B zN>Yk<+7A)$T6CFLm{L=F2cX0>oINHw?rjpl9sttp64e+79+k-fS0bc-AN7#QoVt}D-)SL}4UTes^k8bubD1K9)Pov6sH-@h^YL=(Nf(1B!90lpa&EZ_c85;W&?Ujs^f;_<7@}xN|$osZ@Rq^^w zy^9D@UXb3$nQG*>h+`4^m2AsLWBcR%1F2-e>6x~5e{8`YWRX5`iSXEY_rP!5b7d2t zW0|46Wrc*Yk}I)s^2Ictb465xooe1gfMse4o0iX(NR%700RyXfcX|+SpG7+Fe*w`q z)=Y&d$M^>c9pXP&fEc!6#PYl&Y47SFXM93%iqo$g*l%r;){=D6cT=yVK)a&-P#kE~ z|IKQTXsVg0ohxy7^jv3AWotoBw0hF6Elj718GqqL7w2Y_B#xlrEfC1Ei#~lb@zhM;E$vMe zWC7jlWf~nzB|&-3#uQVPD?*%DTVKtDO_f2_{z>!9WNJ?xz;EQiKQ*KEbc=KXmpwFm z`o}w9$Q*@^Q)*c}Ix!yUy^V@Wag=S`7HXGEGWk6@5wvNR_KK+Xj=Ifp9tW3uifajO zEDPzld0%_*3fuZ@?u;{?R=UIfwuf1Xsr$);J)63MNfJlTRdVXa#LTs+iaA&8OhK0f z*?u+{F&bB|L9fQXu5or0{Iq(0&PwjG+<9X{x8g%>XL(*~K&t0?a;@;Mx^*1SG3|j) z8ru?N9m~XudN~*D`;XN4W#o?k+oW>|y|Z=B;CZL3MRll%4CVH2M~Id7#x=yIaa z`fRhhyK(W(K6CYnnZT7>J(kZKa`@S}etDbn)o^!jQHh1d_9qazDxZP$sboS_^3loC zimj$y!r|pP$UoQ|a(34ry{ObpBcr#p=AC+RWIY=}Zg6^&qT3{1i9VgZi{Y`ZELIKH zN^h7^JgR-|ozXJA#*}Xp9uYd*n^q>Buz3D5Q`+h?TQG`TH*Jak*=3a<%=4F`P;kE8 zlAQZ&c*L9ACA*WH%Bk9)Kx*;_sjXFKpL$iL<74dHsd(DVx+ez*@c&W(UTl!wi~)L6 za<|YP8`7HqAjD>Xru0~atpcGD`srZ)%F$+jMX;YuE{XFl8anGx&Ddv?R5=;``q9-D zqrTm)?bcpS>JR!W^Kb6<3ZJ%{RI7B?^@@57ZLY(aEXJolnrt4Ot)ofHuJ7>0Tr5%2 zCkajqNII{(BsEw%=2~_MHl6jFluU`nPS0yse=4prUXfFKqG#o2(R%QU-fg3t>$GYt z(6ZAOxDd&8A4`f&QyhOQYi8E*KPAX~g)O|%pRx?9*XMO5rtP?r3A~@i7W3rAK2ZC{ z>HMx;o=4Pz4y16+F&Bq>!$*xJ#DxXef_@205%!L%duv6s!`#0o&2U)_MC!-1MG?9Q zZ=?U~a71*FVnC_se{?veTbYE;`{nX-Mk-dN$~#r3H|q)s^cjw4%_<1BHHU>-dN|t< zAjPN}Iyz|a5ec#GC=o&AeK1*>$~H`x53eWTfAxXaya%CtL*vQIpJdI`!dJmNm%ooK z(48#YcPocTCKCnCBG~yy5rYiu;;NMHBejR~gZ&6g|0I%}(qk8f^ z?>dQdx2!%39w$sg-5^j^m50l*x_rDu+t=Fj#J9Gqdx?AQ26mDWlj|xu!HlP8=m6J(_yPOi`?jt&0l)tr)s8wItuU zrS`W6pvIXRsK1-Lhb{d4GWA`ROHG_*Ll5^X(q6@M6@zMjecM?v`hC5L5=5Mm^?lme zYt-RCuFe-iDC{Cld!z&aFM#(C8U`j}D zSv`enrOe!^m}(*O2&8F=e|7Jn9mTmsBcDwe!$_MfQRuc6n=W0RV-PRSEY80ugB$nC zsd4qr=YMZguC`7tV4AK_Rx%UVm#bvCf_bo;k?dN~okKU+R03s@+}hyP3q&w_(!z0tuOyvOpWrI}&VaSuEEIb+Ww^&j$uJ zr>PiVL|UEhI?MmdO@|!ij9@4j+eF z-B~uA`SfaqNoZ(u$8kUdrk^|Ga38p7O0TyY2Pn*}hd*%7y&gOL5noJhHS!ga^S&Q& zopDiCVs|WJjsi#|`D%*smg%JTdq1S;(N|_`Rin})KffA`nJbxmtnJ_o9X}+NF} z56Id5jyKn15)Pf6CH$tAiC(5tbK4X>0WP5O>EZYa6D)>p{Vn@?(%x&JPr({qkPKxxDxFf)?7J zzjZxShayJ8_#QO}GR6+LJXeT{&G0J2j$<~nT*Km>vblWAC9b_0z$imET}h+oR*qXT zvpDeK2CV+KFLu)AZbRNeNqtzt569QzB+0IgxtN$5;bV$Iuf~uol^8CFsx2!kRoFpg z5%)G~((THGoL;`X5~J7;(b@$B0mZS@RA#WwsyM?nK8kU#H?iTU9<26Npf;pj`L@7N z8s%#jnJ-jCAh@5skQpe)>j%dc=12kVgUuFV$y8W?D)d;Auj1L}t`nO2V8{Jzn5Lgmc(iC-R zr$k1kVf=Y*W9npXW?~{*NQgy!@|lY)bAbPY4}n_9sL60mn6Zla8=Zzc(k`=HX5`1! zEvj)_D)^(gNaDs5m*?Er1?68yh)hvvvEhq(g5kv3>UZxa)vEYSIhbym)gGql?0}(L zTuBL2$)o6V`uetO-zz2qr6!juk&v)s?reU&jhU0PKzLbrx*z@#q4T7~w9lo2h1OuM zT!T$s`?2BluRMPCs9FkEx-@ev;kyHaTR14_qgda+{fid>GQt9R$Ei@7z?&BG-bleE zbBn!^!`$)M|INuwRIfk@2aBRo$*;qT-2B}Z?{=R@Qbux6JX}%XMr7}*j#St%vRg4k z`#yZgH|1ZeFX~WQ_Upl)qz;tYf$9l!$k2BH`hx#D7Ic-(SnHw;S1Z2Qs@JVEa39NO zH5N(i_04z7kEYBiRr26;K+YB)X%~!YP{y3y8u28hYAEvw@n^!VuFWOgCv>5T?vPWP*f@e;gRy< zB|ra{3%d@vnS=ztn)mjIING&IeTp zpCkB8WE)PGU#JLeeg{fSG$APT!dD85N<%qdv*vp_1DRgEYb^RmIIpBC}Kmyva^M8hK*O;#p$k5Keh*XG#0chF@ zz$f*afke)rQvw1ibwd;v-QC?k4|wkV`=Yj?#`9DJTD#Pvyjynaw_O!n%x=57bMiwU zM+K=)RBOBhZre26A^0z+ZLUpJ0 z;_CjqjEolIr5fPCcCLZxqFd2+O!U|LgU;f5Nm#2Td?7#7GV* z04_46r&5Q_%gd`!UIzw>&_#)13labjGdm^cpJ#ZNfL3V4dT}kcS%dqzV;B)@D@Lu| zl7uZ*artwDQXsoq_qYVFofHpoAoitkecV39g@tTJhoEBh8IYyWXt*0|6Zrol5ukep zeN;iL)Eh{z8>9=dABTuuzZGt9Lmb$@l#I;+>Yi||-+=6nMx%ae(X#2EJGTHZQ-zw( zpkhT5l!LUsevK`3RGB0vmK(9ekG0BLf<{3NmK5OmU~+M{c))S>h?zopuZyM?1h^{5OS5Q)x;&-+y&FVe7dLd2 zB;r@FzLz0U@QH#PwYHFf%f~p)f#zVhOu90Bf0bu6faxy_-8z`Pu>kQ1Y~XgL$hd<=1GQV!$9>rMfh{@K+0+1BAK5JaHi^ z5a<@*{*mxl79>Kt0Qlb^tz+X9-U9NXMi%hByx9oQ+e@uMr40+4GPa>dlY zC*lNPt%YW%h3~w47}?qJ8(ohHv2b^s!SjTgpWjh}o_CZ8N*_YhYN)QVUI=yT^4_E( zs|!gs>{6pK3&7obTxBVE4ON6Tw;&FFQ_pV}Iq(HQ+XDo-ISd+Vovq*Oq5Iw^cS8~E z?)NJMK@lWY8|2_^G%n@l%}5yl7$Fd5!#0#*dn2PPRX7Exh1!tG>;7G64P-a!gA0W< zfqHbIya)$M%~OZ9-4uomS4(8G-hQ!FoC>O2{E4~CS#Go?Ntg&NkM(bqmHf`5WPlrVnby^94&`>TckP9D|c#*=Cd^>GfAk* zY7|R3IyA-IdpL0Qv(k z=01_g&iv1Vz@IOmVE*|34(6{9_D{osJSh@j^D+O($gZhbs<+u-xzOURP+KSlKRCM< z3m`(PQ00egOV?Dbr=XV{+W2JXxTv9M}=%2uCSKkiM$IUM-eHSwNr0JxU*cFt@ zRN)ZKdFjeWgMU(tE%QLY&d%?gIbM8FZE6$>vC#zS*+UKJQvh!By%OdHC)NZp;4F_E zd~<__hGx(ii=1zSK~q&#^|s?>Qo=u) z^?PZ{$hSwLaj2eN#XzG#yh4i4g$j+* z|HulM`>^^2fYl`66rLBmL|~XEx7atWe{R^99LPEsERUIh-5iO4EtdwNImNf+Mp1v& z(w*fnSHi&|3!~~%^z);l=OJ1#GdCX|`l7UM+clI?_H_Li{8zV>cUs@Uc}{Gv*ykZ8 z3TGv{?YTPfm=ZsJy(=OG-~})-$80Yqzj#eG2>8*`WtTV2)s0OANp!tC5@admtY{7h z$UP_XoIakO-{&2-bZ~Y?la_n#?jhu}wCHu_!l9z_#wvb(Q>$N5?_K*!Sk(B7cJnLi zGh<8ED)6~o?XN1oD^|RHIu907y3WE6cNPVaCRqzjY%iEDZDFOixP6#jYf=wEX(B+vXFEQF7~- zIRBXBs)I`Lbg>&-4{z({EifwSD1^6;eob)h!+1%JH|gVJ(1h5w`weL7^I8z;<)${F z%>(XlQxFKWkT4ErpR6oq-c6iu13x~oMCf{|NbTF+SGEqdlCjV+7`L5|_|oJ)FdsAe zvgrMYn(Rwet7*Js;kGtszFOQLVS(#JA5v!CMrLD2ESW4CRNH-_I!F+v{G1Y)oa|Sa z3?_QcQ|$mCFu!FnDDv|A-a$uaH5!d80&O@=xMf+8<$=D0AtYi~Xxf)9Czi+>6K`Kr=x6N1#8(NrXXocXwAxN=igU1yAdefrbVdUXY%a zW+RlCm>9HlB+iMu1NkS|t?J_szudVAdys@NN+H-TiIjAA^nuuy%~(wwkM0M5H=v0Q z*KeL<(C`GvK$syGrO*t6X1(h|^UD)P{0^Fb$}(FJz!@vVdl(fvGV9Uor&st;%anSN zM&kVb-d>sgssgpt`r29;5hfOvgt<9AV3QA{S`KX9Z(D!%`2iX_7TfV&+p2}7rLu;y z|J0g{nwna|IY3GG2l|X|`MOUJsi~_IKa*G&*ytvK%(6w<&q%^bESSFW}juh zMvk9pC)2hu3UgPcm+4IZYr&1OwlDSXwqfZT8&>-%Nobz{j97i!AI&N9#(K652|wZN zywh#KpS9iS&G|teXM)3mK*gq+oRuzR;mC@Nh^*f|8OfqU|h%1c)azG_QtRwRd%9WM$zkFPqXcuy7I=vwr<5 zB`3$x%JGPe1pRANTtaav_Zx@4xMF%17FM5x92Bcvi#}~3N!GqL1<*ZW=Alxj(yfp# zqhr23wW*zXdR z!oVVhmX`K`Fb_IIc@$BDXKsNv;Py9YQ+VFdneA!R_$xF$rNRaN7~XXg{cxhfk+%@v zC9|OdCFkB~cUZoJoDwG&;pjqFI$;*QC%=oyajW-a4}CHG^U5L#68A_}T?qwx__9_d zjI-*;J!)D30$zW9WzBN@Wz!s5-?(oa0&I7_8-C9(E&2?sg-yllJ|;#beN8hH26jVY zRa4V@Yikx!adEJfHp>{$Clkp9F9FQ3WX?aLB)%0okONOn*Q}Uxc#uvBl+alKgt9+g zU!vXXW-8_ThdT_nlDi@$XNb+92Co^4c)!BwsKPc8sxpUl4f~*bm72|$XO{a{L=)jx zK~>9)$V}|of4Yq`n4qBgYSc{P7nq!AiUj7Sgr*urvBeN)A0A?&VWCUtS{LJnpiB2i z$qk7r8Q+c&`ffrd_`@JN)nY9pcRO(Aj0j`kMwTcGS8Z&|w_6KkYE&Jjc^5;@Gilfu za>$HpUO$x#oF6E?xQ)?=v_|43Ufbd=-LmMm;Dwao%3KD=p)W*PBlViw6&d0&a7Qnz z9a*F1AmS0g%!l9h7)15alE|PSN-=bA&cdOGqgar7{l@d!&@!+jet++-TVpN}uMQU# z-0Ak*`D?6JAMbQeod|U%&4hUf;Y_XAmTjb@q!-wmMXrT#EBG$@RgR(GSD7L0)K;oO z5FiSk0`#$5n!x0hKH-2tB;<((NpS~xO{0x~(!zQ$-NU8RRb1BB8PjL#lHSh@K#eH~ zFkeZJ>#ZN5OXt^-7w}I5P%KL>0OnLvU{|MqB^LFq_8RpWaR7LWwuMg!NQO#f3);Xl!~Vt?*l9772U_8%08q8|I&_^G<5^?jRF(ftYnF54CZoOK) zN-|@hf|v2g0MM1W(Ba&N1&d$p-}^%G^(OnvzD;Ijxyx*`e*iTC!q z8}Pl}-BW>d<6JAFM;L(~=y7g2VL5Uu7pf1vZby5Q--{#ju%A=Z+G#tUI9spI4I{w* zeCKXti=A12dsHB$*Sv>z7-$~C7j!vj0VF}M=iQGJKishJ2B`+loiR1`J>6RfQf{O3 zM6-v#d+2a^-DH3GRFkE{gUk-r-f7X>W4~AGcSsg`BXlof@86>fp-l1?i!By)#=DHR2;lpCg^m-Af z5A+lt$AtKPODeIe>VKFcm(za1+V!Cz_E1B^B9dpJ`c{!}ugslsF?wT5II~*s$c4

42rafm^!ro*d-$Kw^rt?_hy#A1I?AuuKh%;?%jP?NSig=x~ zY*8|I4Q=B$LSe!7FWwtV&;5`py0`(Rq-MZ7r!IwgWOI*4+Gr@C7Ko%P5#ZrP zK<#qSbXJApHTYTARm2p2j-Q1q{VxjK*Vx{mlcfB2iv9bepV>hqf>VO2h4Eew%G}q~ z{2aF3JBsF=idOb&3(t?2&idkb~h(u_SW+5Nz`fFB5I zadXw@pr)kS0SzQP$m^g_`dx6hdKn0Z32v(kOrvVZKlo2O@$q5xw0)642LsKoqc@i4!qr>$ z=K|I9rB4u#9#%@73IIMv?CPJYZz>2abhBeSdMx|-(%AEIRjSoGq)%@#1obh5kh>o!R;_&A4d-#} z*6QpI(>nTGTgk;_nOBj^RB26of4xv8MWsUJ#D=-tD09KI=SJM)vh?pfWa%3`SjoEk zv8;9x+n1Zl7pv({GMWm>pq2~Vdxpa601F;=C?Sgl!c3>E zVrvcHKHt{FlAabZ{U@dyf}9G0rAFfKAmP+CZQC3Uv4Q*|CN`+e0*?6iRP2?c7a9Fy zKaL!@#!#0v++yjT+5fj*N&8ThaM-!xundkEXqcDjI^H;@(8vHb->6<^LNy(P zns$}f{_4w9LF2qIU1jruoCb=Y#Fngv`VHZa`%7pdgNHUXUoewnsjFs{u!Q+_Rj>M@2B_!O%A>HvwGzb zzCCPT1pnyXJJ}qN+y6t`~4bg-{%Z>0H&@dMv73z zrrw+_80-EE60*97gw}>d0L1_Nbi$YmuwVnk3leF7DD6kV?O4{;wn7qsrA4(Toeq5} zKO}!4C)tHmz5CVkfT+>_aJ9X1f@ORBvh&aF0Yo5Rk(IO_ps$bDX9DL0Qg^ z0t0BNT)O;)o;vo|ZAS}tK%JG2xzoIg;mSzgq+QzCu1tm;UfHhwi7^$Jp=~E|&&0a8 z8K6CUg!*;Mu?Vv>ceDb(5*qxyV+pP`8#_?TV-|aAd?8{7sMM4_mw5Ss?vvgnOp(!A zkcvlxDf5W}G%LnIflCon5p37ykVwqGyia9zDoMZNI=+j)xGB5e-f%khc!7a}m6Di@ z%+|l)KhBm-|HC!8qr|ZL#M8A8gx9pzGMju5uX4+Db_R^O*8y|8X3)P1HX`^RLfu*y zI{)+E7`Y}6Uf*Np2zhir<5N4XF!Sj8{Mnhhm2~@SfB}SwckuvD^BKHvamFgi3v3jB z4`ap`fy&9Xkh?a&vP}&Pz0Oj~HZd2~u*^|+QLpIuq+hA&^=qW;84C~M+4q1bI%THk zz>8Lo87L>oXS1BxLG>2i%H*CwxJ1#RFpBGiAeLRQMJ!n9mNy)={WL8j)zj2pa@R+n zYU*Lf+;BOM6fXnX+P$h`t?}=uv)MmdQ7spsh3&m*mrkfF>Lo~K1qiy`lT#eo>~`tg z%drW$y(!rz7B865Y^onC?5LxcBf+V;ik6nF>q6mzs556|A{v63-~MW&wPtsAx3;1M_RfAaEc`&>%C`fSvvV5>|=Z z-D+BZ@cc;FtE~A$kwBv~&3;ajYP}_HUGq)Vj4VRcU0B_n5#pl)%Wd$#+N}l}ICz%{ z8xxaFWjU~kPOGc;43>I(bG3hy=KYrx5CFISiM+?PAt#G~cz=L6mCp`WG+5AgN3CX~ zxH(Nno4%%2go|P$+GLEjlFrQf0cV@@7ozGd4FVTz;p&p(14hTwT%`F6mXFU?8Rl54 zD>PE*KN!3$`%ZSww!lm)Q7iXE9^NID7LVC2l;6sO>>Z)I2ooq^EA6T)T5Dmlj2wrc zXcTcne5;yDnWEiH66szs1tvH^^@P}aglIzEkNg^am?q0Q$TClmib%T9l6ZjRM)OVK z;w&HKuc^h+~rAPR(N zSE$liE?5Yc_I^cAPsxLKy`a3%@OF4y*XWzXYV|_K;=E7f zJAgOxjXS$=dGOlcA!E(4vgXY-PuYCH!mj<_;>H()u%=;K>tnn&aI~iG7;lHr3 z-u;H0e*uv5_mwhkw~u%S7=r?aOa6j}`vqRL$Spxfs#DoifpV9QJ~ooILYn>~&GsjC z683(SjF-rTTBTHX<%GsAF+LdjP2`rjpQYKU5}UXCRhp70Yb4RE|ie<$Wn^|?L>WXSUq2qj@BU&f%mVQ^K4EEI zj`a`Lb$JHrUTYnJ)i{~_z@uUBHC1?xL8temuEg(_ma6&inY4^ddIRGG!g%q`n?~=` zm+nezKU-g(`A42+soW10@>f+k!OCez+a+q}zi7Xta-jgiQ&fxPV<$q;G{EWy&|&2N zl_*@D8NPSuk2xm#Y#8_1A~|Xeac^wl^+gXy^Ue!S%cAKg$m^~Ea(=R1qYy?5vXNfU>U2!Je+QyV+PP3SV{ABFy7MEF| zle|>7FG~h5TWB+vJJhbGy2NtFoB;&5=J0*+iYBBmgtfHC8%dPg8*f2kE7M44jj!ki z_&mOKxUw2&-)<=D{c*J)0884hAqVtm-bx;PVH`k`CqL5{Z|jyWE1Wi}cmrU(NO8m|Y&sZuShya*yJ`L5G z{C^L-Nx{?rXrPaEYvwSNA#)G>T#Sy5RX#DYwPoJf(K4vG{H00s>F>El>V3Jfi*~1hHbRIdvqQx{@llO7}AEtOW&o;sC)$nJf-`XV3}#iWC=`l39HnQry2 z+>^@0L(78n^@tBQPePQUH|tIN_5Y&UV7)8y}nx)z1r*Ti}--)8a=DnbR%+>&{Yd8qMy}~M)GePP;UcvA=Qje zvdOoy!TETGQQKIY*;y%m5YYr#`2dBakSB{zGP&n&K?NZFlxCf(!0Z|g=f+rmIUn=- z0jzaaPU3^jGvkRZ7*k*Q!HS3Ez?i-lVLeuz({A_JxfZ`Y3{Pyd z{)okooY2el*SXH#0SX;z-ok=B8d{|U^lahB!G+OL@48RB zUeXZ~zk}xc$~RW16eGT^n9xfQc(Uw|a0dH;GbH+cKfxOwMq-{ZD66^1d*NkY@J9Zm z#CrH`cmb>lVpG=LXI&LUoGs_I^Yw@^HRm!ByTWTER8v|(;{=itR9g{l`Nq&+KghP9 zVKc5`3`4#;S3xu;#s%UPSbri-*WI!YiIuWJ)e^HRg@$GY4ZOLSSNkb!G3;=p+bg zRd4*liig15S3o&8HO-u3o-Rxo2SVOy-^8~uQ#!*W1`!8l-~u?=EaB%Flyf?!0PLK!8lL1pyp+8|CRBF{}( z3oJ4hXAX9aG>(X9SAcYZw4f^17++znF>u~0!$=7UsmiR?wj=2uFY`1WCsYy3R}sJA z{Z&W(!WOA~IbQ9ydUvB~H&YII7S z@WRBeAY*myGG5+R_lf~Ux3FILCZ5RBj-lRia@JSTGD9Ft6RfvAmnHV3SB+gKU~9QS z>_u}0&k&}`YcRU#>R~*@9&G0lVr`W{S%kK?L-R%0$2o}QC@F`1WcF%XbXyz-L51CL z!xq*Fh83ZWkW3Oz9L3YCPQ9LT#?5unmd%X8<88Zct*SM`N>YxI>yHX2dkr%Bm%lNX z=f7*`Gci3|_V4|G!grV^bsF6hK#aKcal(3+VzlCBW+eVAuU(#8;GLaq+ug)Zw&eKu zn!ePcxrg6?hacKM(C#rNOq}Pp_GC;kwHT4F5pxlpdeSd7o!|5w!lxpV9-QPj9C5aA z_tVaP^VPrZyayZFc(GA^P>B1F_h@rMEr43u3+FP30R}$RY>|&c)Zgb01SR0Qy9EH| z^2e!_PQQP)eDnq~z3p(2Rbi%4V*L6aB|K(F)!E!Y)1Fg+v#<>isc>%Zeti1*va-4| z|LsCkH^<{FN!kwU$#_3r=eryC;Wt->OmsS_;~j->-;Yw!;%+Y|>JB*te;w_8en!!K z&}G#w7`#llzAiwfJO&tD8ufw%k$UM-{>8uF(_w)mW~SNuSlkz*PrLXnBHFR?l)|@F zp@vDHtSW5WwL-KJ)r`fBGPr$g)3l8;?xAe@>O;knU455XNXb&R(=AeiTsrcC%5)?& zhM3s4h)u)5y;QrHWpvtoTdV9%k*TREukFf%>|Cq7hAMfPZ5#;5A1Kccm!~0r0oIkWRoKxKfT;i=Gy#Uk78#U>wwyz zMn@cAC$#h}8Ha|3ruuw8IXP+RBil1yJsTYz9cY}O|10>LVC~f1--5k?J&u5jkaKK} zTcZ?Fdk()lhPVybLq&^q#E%hsTk#f#6&_xvAPCEhcHi)JNn-)NSbTPPEUS*5eEODM z0g4VC%En_-YH9&iji~6Dctn<84n;cMu~oi`euk-;DG@n&VO7{;KC_U}AdUC>WAoz$ zi5)L0t16j`Gqt49adBqLaQlkLczr9h*^C%}aojB|wW47oVhqy!y}b0f1FD3Cre#)@ ze0*dNgni;gyvl$P9yzaPaHxW+;mh|?goFr0eA=H$R3KL}Kjl&jlJDx?j3z)#@-vt- zvSo&I^ORR~w%D((uQAfm$xs(DO%%iwR=n9XY`o~$CgZsUDT(borgU4GI4x>D(@A}J z#cJoKh9+PMZiZ>0OC&6Ger_MhF9RrOUl@r-jPmKI=IR*sor(7FxM_&;kKufkuqlo4YmTGS5D5LgJ@$H zD*~4UPZwMah~sUImv&j6rn!&isU2}_Th6du*#G^A)JkCBv5ng=%7UvzYLVPdJJPGT zWW@#n&d!UEEh2cJ`NHh##$^NU&@~%58F~L#8LkoRtgd_NG*2x~zD}(9Wn`x!HBak^ z3)uCEMXcHrahX|JHQj9@q0nGH>p`0A>|BRJ8N+t!uSejVJ`XLP8XHy{~VD zP8Pd_u6r=-05Jh(wH#P(8~B_rh{i-IMfgPz-;(LOZ+O4&>(=|M z4c0K3Q2Q&8X#?^do~lg?@q;=(_kJmwCVsxwz z>#+F7HErjP!r$AHkns@5Xyp}@saI+d!!-a=0}ZD9P1_J9%~!VX)uRcF#;pxa7ll^O zVyCfisYDo^e!Qse%qz&3>MC`BZdhYV(A@=OBMY=wW3+6M`Kn%edIt^3*Wm6waXB+t zys?x_S=-)tu5RkmIMd^^2mMkbjObn^E6AwTY4K5htXw9}m>j*{(@VAzw_|YbVd>tj zO5wMht93yJmOy*G?z`g=SOr%>NeOorz z7pwIhISz5cittbT^Qv9hakh&J98ByiEExlQX=G}G4qQN8BmoC{V|#)pAbX=aH=FAA zrwTYVSoU;S-%%^mJ zh8R5OH+%mudYlckoCzOaXH4#+`!H1kR-y*iFK#rdVZ*Mtx@8~d=vp_?NimcQ1S>7E6MS|`7Uh5m zXiU;zMmu#Mc5;!sl7qg%OuASQ7xM-oHR5*p(mqH*?TuX>Jr#)(0iSp|9#kA&J z!?9AlULD?C$({hUEiH2{nqv$n+|@#EP7(mB4UPH_QKB3&qJNY0=4gWXlXO+ zdnQHxsZ<}Zm`L#beE;fQtQM*999D8a6qMN#sT^)5z{hsU99;_-6B zvTBqs%0H(g>UhG-0${A-J>*9DS9!fk6Q~hKOY`teN2R^?T`Yl9cI6as4cd3_YAe_J z6Uj(kn#%Gi!U6OZ^*Q{-lZYrr2n`*KG;JH>Ad2q`>SGFTf9Jr8x@qvTWV)S}((n=% z6My!x-byDsvu9t9KrVIL)9?6e-Z#P9*&kQgpZ!45F8;lm&b-GV5CT;9lB`L#SSPA*pt9E*d5;#nJ)v?_T$}T`aB8TG|EM@#^q!YXuUpRSgYTEOOn%upb8rkJqav$OeflMh$!68wm?#>9w=J z29YwLSOV$_Eq1PC^VTz6QUler4%mr3hX zyA~8Eg=n1*zq@NJc+3&YV&p!Gb}}zYZUfxjXJn>DhPB-1U4=mZoAd$@Q%mi8uie5D z%5DEoS#qKXGz6j}Z+#YS070q^V74?$Wp+0DQ;V+da)RGssRmWkUw4b|2Tv&=E~{Q(%y3d&|~3etd^h} zffCJb1!rYtai_v1GEtfZNiB+e0Sj;p;@qc`;kY)eHO5y5-=;RKo@kvlG|eg5g@X%& zH}a@o->|Ahw;$DQmBL|n_y~B#QfA}JPXrvF2&fHp)LYjM1X_DAwhuN_(XHRRd zkDi7$2|aJrVWc#CSjcBLZvWN%%ii1Q-C|+jD(;3sUvv_E`3fNpxXzpo4jvtvE|^`o zVdoB+W^Q~12j#u?uRU_CSCS`Wcy(g)8&EUMi@Uu^MBnApbGIYe8t|Nd(wiZjqV7ra zY)`#~p;)nnVZzDkviJm~Ey1F!PLVMy+*~mtgLQ7c2x$+(Y-9HR|^|8XM{LR>vx@TY*ea$=P6EY`rgB6e=} zipbh`Uo__WdcFH*_ic=bmy0yMwFF;=%d{x~89m&^vd64}y!NWBsBpV_;D(2XfiyZc zAS9GJ^__b?M4$KOgCVr#TCa+a0 zoHqaK>1L<^0dVLn;lut0z+W2&;BO?Ll*`R{!=Mqvsz$~OmTtt3VIA6{$6#mLv;tAW zvuYIlS{W(wY6M#M+nwuJHJQal>6tLQ`Zg6bQ#%31%0wudU01U5bx)LkinqJ#g*r{9;~vWtWxI zyv&!SdNS1dS-A ziG5_-{|%5h#sD^SiCMU)HXt?jzPNt*HwdwmrHCsiX+xql|2^M~@S%LZ$m&V!%f1*V zI|vm#pEucbvD=*L;GtZXZ}D#;H0~wX=^%*}6KB*Hu(41$Hqk@qi~P?CB1v8~dhp0w zJ!ExWFW--!hg+HGuxj%-h#b5GCyj31y!b7i3<)z#fMM!e47dW#KgwZi9juYiX|gZ% z61Uu`6cTpUsx>{<2a0qAJM-0Z}1m> zMDD1nXqu6zgM2`SKgEgmlNh*eS~^VIR^l4d6HO#)&i|GvtkAZ$xA`GDWrHx}EkpCM zzh`F)93_yolI=!O?tz`z?3|24%3t;Mj687VX5=Ryun9X%^NIrCxDY_uJE@kvOHWuG7dW>AW2VvmITc(_6wy=u1;2%<=QX$|^Qx}$NiWJ57fK228ni)z zAPEYHvNrlUxC5d)A_YUZMqNxxTiO{<>;y|bhm?YL<`z_%?9_dcmc4ofQfHLJ=_<3a zQ&T_!nVU+5i3WJ8r|n#B6DLKcugQcUsDmq}zV*MgnyZ6^0u2Ue3_x6TE-@LTHQ535 zD`td*=dX^i2W!#fR1NYwZS1azZ5p7nJ3Oj7qK>t(c>{NuDf`~XcmJzJY~VhgVt=VZ zd7B{SSC;~;QBr#S?D9TO$2grWS9$BEvowi{1v&Tau<{Gx&25XR-A~ws&Vvz%Effa7 zO<`G@5F%B~lM{Ajkezd*-Ofsq3>YvmUvfLO_*0|eoCERj!bAD8*|RgHq0D%U@YQ(+ zh9OEnVxJx??2RQ0x}E^kCA5{w%tj;NYlb`4ZPF_V?!<$RAm%m#A6tn?ccP#={sVme zOCD$u5VattgF_Sn4R9mx8srOohx~@LUVvjkg;nfK;ecJc!*;n=ExMvbvw5-i_SKa<=XsIS z=Cjw)okWw;Kx)2SBHi*C*Xut2Q%zeC1|Iv;Z4;7kpEwTMiFKR({T3^J=LUSds|9w3 zWJ`Q0sJG&Hm&KXTw_CRox?;J!wxyIdiU+Xz)Q;IdQ`aVjEGJUKTq9L)!Y z2Q;ZNSU4wps?c>(gy63)^+dlOyi3+$UCkEq%%>LM0vCbQj4f4yMEh0OE|2^uauJ&- zrhf9GcD=MrxvlLOoUnM9>JkK2OVw~h#d(&Hn=-l_(4>~iYMk@w)2Fl`4a+^yuKS`U zTjEVvEJt$TpD?`8DIf8F@-PJi1Peb9%+f6L|mr$ z*KsS031&R|-zXXnnNfjHPfz!~A9s2Tt{{-4HR6a(hnv=|FtJBAmw#&ap~56 zWFktY3s&_?^#ZAv89FaTWo|8F2oN5S9s~=Mifk7j6svY24ECQiaJ6NGLf!dtI|!RlOt7{Z`JXPf2T}bz-UZ zf8+G6A)9|Ebpr?zP?cC=pRl8mS@o$;@l7x;`z&arj;Mrh{Qlpds|f#AC;@C6F1pNp ze-ns>jU5b2|4fXeMNkS%Wna&X;^L$Lv@-sADi0%z2S$#`|ooSIU z4=WAL5qI`L62C?{>Q%85yTmsxnYl zRiB^r)PEd<0Q3L58WzJYiBlB%zz}9qu=lk`0FWdLp(KXS$h-K z(2uUaLrA)xB~pC{td{nriW>vKg6IEH7~EV8#0Xe^-MT%d#(j7G`+v@R01sw+8N`5Y zaTiGe)bjZ`MHcrtzlA*wq*rY8)K2FB%Cx#3+Sb-KAfX*toSK{1_!jF#8GvYCovmL2 zmXR7~bDpW}jVJ0x&h5l?%bWZtay6b|D5*F!3=u@XSeE1mCyzLEE&2v}8k(UVro=V5G{<`$H_?Zie#b**E65Dgdy=YDl>ixRCXMMiA z7B?HR;=`+P##90d;*1^xC@p(Q%OEy8)Uz>#AUf>RRg^zN?g73MR;gZ>9c8}#U!sX< zwv4i7JKdLXK@GP;?CLEniJ)NTs(udVMK{9-oL4?f>sH1sCir9o>Fr@s%Bms7&ykw z)EA}@XzRjsLZev1p-kS(06#`dp|EdStDwR#z9B;b$JRq26bYT6oXZYX5g(guL^P`s z*${_QF1t}3uM@0EX9LFGp-)jIp+u1*@W#2gj9=ieI{YdaYr?-kC5{7oymPnilkWsV znUtwm7sL^PxN58)t(SwR1M9C)!>TRBtz$FdmM6UYz&B-k9TGGIlS2vcPp$91u+eQ_ z6bU+X_TFkPcmEl87W#jko?*s{e6;5As0MSqSYzk*6r@oB-XZ7;NlWxT8R`b!=6{~v zq#qH@2p`3F$Gx+?mpVrVt#d_2cgZotBn{P9a`p^-XdX}gx{0!a_!@jq+)tV zl!1jr+annhJY(FOg4+~7B|32HLh`$H^M^fn2HG+kd2jW|Lc)xdB6p>*ETDUjX_di@ z*pulvR@;8-S#~~2_LbN%F$_nM?iEOuHSwJl2Q0qh^>UdIpTz)g)w5?$MF?P)S{hhO zCjb+ z_hS{6@r-a1)M^OLI*P!a)peS z3sfP|{nRfD{FIdbAd+6WhFkViq83;e998oYBimC98-tOPcoCD+Z|8UOHlRc?oC@Ts z(3DFhtrtRHx_x@nHK}Q^zbk%;e)T6(djcw7%1&Tn@9}j$rCzQJyRxV_eW!_4{Uo#xAht^c)Y2xe^?K*VXp-q(x(e-O(HsM&8RUw02 zfK4nZIB8ykYEbY5((0-6`$Amq@6!c_`OLN!7Ll=Aqcn?S_XjzrFo*2S& zYF)LwcPpWSD-SJ<{k;ZAaSVR^+lvPfA{n?GSSKYRHn!?forinPl7DuWHb9W8w(xl| z0i14KK2T~nVpT@x{Pu5HjWswKQ3xo;?iakDmOp5D`Xrav@<=I-naqfg>9*HhTw7cC z982z$T@y$-B^aC#T6i2{Q)3t(0Dcvdi_A--DzQ*4K)UdR8b#;!%cFo6TF0plZJT|6 zcF)t|`n_QQ=R>9%T?s3RT+T@5i?3e5)Mz*citIdGoogYkL6Da0?xt`GvfPEaGki+- z9r+)k^Uexp%;;R`Afwsu=4t)x&W;-RUlL>~ApM$qXzbBXdoE|OKb)>!)p*+I65}bu z8+n0wo%128zpv(+VY8%|oo~au*E*$ABCidDpVUriVP(wpz#eXLF|awm za1JT^beFJDOF~Y*H9XvC;SkI8edZotuWN>sG+o+)=-|<^)P5DQ>;~ym?fu^{8O;}w zZ8Gvw^w1?>e}MWt7$y3vCVD_eS8xxZ9A+;nyL!{SxYEzLuEm?`7d6~IDJUDLLJ$q6 zY7CK2fivMOS!L)pFO{t>u7Q=^LTrxP{4mi|U%&m?JB{Y&Hg2(ji7iSg*yfZAD*D>1 zvjrgK7IsxnO^*gD|4mW9)1IrhZnFgPEn=XWWb9m~D@npz#cOb8#PA(-ef)F?O+Z7C zL#c6kEFbfDh~w!v-|r(7CoGzj_=zcG_xgx(x90??0N6LJ!V@`Y*|>c+wO3bke~A<< z2$kSeKx?>H3l;y3sx|NY8C~qmFnppM%{vaFbi5{*R=3}@A9o7aAl(w$!z2dqxdY8UxLMWsoq+?|tvEdTPa*!uZ0_3m00Soq4MdbbFsbour4 zaw2&Qyp#2t2qj*ida{<+CVmBjT{G=jXhFj=Q7*H^9~=;ivQly^Ov*4GQE}Cw9@1^p zwDo$bKfN0yx1`#>8$>vYpPVLQ=Vobj;mg^a2hGGP<6Au_*$5AB$n!gvDFd$_X&G>xPapP#OeC0IS3)3*?&+l3sZmS@P$?%TXhkInj`X+Ag$_qbyKy3sB*nssP=>JhH z39VCP2doPhG^~=~0@TNUiX~8iMe}twL1(B&|66~;|39i$Fu?*(j}43GY*NNf4W)71 z+^Zp`&NW9P%~4S=pMDsk|GEpHs%b!6rDa_6(6t3re*o9m?NdNf#x@m`@O1VX7d(i9 z$L$-Ni-9!EOb$>gLP>1`kUSQ;Q-FMQ;st&L&XrI%&KbDr&8!`1n^lg->Sc96s4uLO?-67S zqFjE7#X@&(eEj@q>uq{HEQ0-#D*H8P$&i@VxjK+`2B^V+6ks&Pc4)MpI^MywJ`Lz* z2kDq5*Q5Kc67N-6yoDL_f?r=fqE1b-;b;vQExmuaQrqEjD!El`5#cm z0BL63>4#pr<+f6#!qW%JT&Fy^e9ZiLUU5U}xc!){pmTKNV`I_dsMcL}H5#1YZP9_?Cn zTPf=&fn9-GMn=Snpd5%G!>W61mIj_mc|naXhCmm#1qinb7fs0faZq5)kvj#FiU445 znx}{L?%0o`IIQfh#8@$Q&%l6LMzxY?*O4uh-}pF^gQP5Gp*wWOqb zKLQD)SzAZ*H8QV4%20E4g{~IsL1u@nx7W;p1R(;`rdCXvs~X zFjW^FaH@osVLXeSAzs10OSlGt&$$E-m#=DxjFTi@sNrpHNDP7Bn))SP1WqVSb9 zZSIRW*=|rG#5~|n=Ke-AhghL!9Eq*TYY$Lh5PqJc^F_WiI!G|U`;z~38hc=izxg7R z{pKR($FXy`HgDYxiVFBgjYj7>bm}Q-5ywsE1r!M$Xx9@KX@%>QnZuiLT3Aa#w3l*$ zQi}+?+EWR`k)vYy~i^3oMYPUlvHbH2|aLXuZ3;d%)P5g~zGE5x3^@%=(jQE73+i@JD=f)}worow;irocPK6_ATyfx(GsH1Ni>>qI4Cv*XDQ1`N*5^EiNM-edEg7gZ|~|26+y?^SEx_s^X~vaT+;>qw7r{SRa0FHQ~(H`XG}u~X%?3cg9R zb+O$!UU_+W^@a;BMj2O=gS&Z{RpANuUu6M?cOT!x2H*`j3`l$N0d%Iu`eLmo@}+Pd zUW%g_J^|Q9&zZC8k~Eq=gonu;gI9f+D#Z)Bd2>Ciq_AqUh}w(Fykv6PmjkMv1t{L# z7r=+T0&y?VwCk)pK(nF(XPQ`1kgFhR^8m94ZoX3}_asAYiAbEi=4`nL=_JnJsxOsDIVlZO zzb?MA^9eh%t5V^gE@zs@8&EOk{z<2Ag0zG29h#&!=WdnN)!Tsi*$7|(&I_zk{dcwX z{Wn5mbU&Cy6R`6jJL(ty2L<5mPbmssxO-g+%o_~>PKT8W9m?+B3H z((tY0sa@=c+k$|RsC@UV*YBUtv3bGq@pDsc!FCP&Ppd~9p!1A&ATD^R|6m)IYR^qm z5aGUBL35how6H<9lDiW@9%el1d>K03DH5CLX5Y+n8q6wC;)Z%?x2_@YTI?m|+ZWZ>ie>k8j~6#+oZSx0&B_u#de> zH2qqwU5llQNmn3c>N1Sg2O|ffUAuS*+11y|;k>S`|h>>m&OOGGpb#%0pd$Xut+8j0CJ>ZfUZdy+MkiFu${+I^O;d1}CCiM!kH*FO4lc!{^2^XB(RP-?Dp{vv5y` z!54V++%Nlp>bPPjDIldde zUgvXzphpY#cs*IkThgZsA*Si0aWT0JSl>FFDH%DEIS%X{1w16TQ#qP4VtQgXitBGP zcm_mx{u^txKc7EC949SU19lQ60I~J-ov1;YC)42h?N!0~Nb37$)ZM#5ssF!20BWop zh>VO3_#JM*MIJZ+u*hV4_PM9G_uB9vn0CCQI1}hVHg1ssZ?upn;6$8Ez$2jX{M-+0 zc(X2k_XM%O%$|)%@zwmqF@_{82M;p;3c-UTakyVOmqHi;6-;gZQRrq+@tbHB z>>m#Lql?8CM*-uYQGeAHitl3KM4n6o5L*cE69Hj(ffv&^cKtFrrhx&ujai0^yf&gm zf3(}nD?uv_-AtB#WkoP!GkH z|1#Yt;bONRZk;-fm>0h5lcoeX9yAA3&v_)Fb89gHsw$q#`?=qo@$(s@ zYGC#o9t-;Z~W#RH3WO>VC*gaFmU)4d(9 z?w+koU|$87id&l0Fd#P!(4+~)zUq9C_n-#VGAcJ%kTCv17AohZuhwltL${)$$GwxIOsQ#6d9kIJuIBn0crGJHs`B9593aq$_X%F3dEfSZ+o>g_Y<{C zWdi^L_>N5Gd){i#6UG8pXV8ArVIrWN<_!2WDfuV~NiSUgIjh|Na(j84dS{Fq6ZbDJ z0LAs&nO}z(TP=kCM7%$`yX1!NCn3P3>&1WHUF6-L>X@s~;?_6SP1))DO)TvnZ&yc_ zV;tb~X%frz`vphE^`k?BY2a{isv+8cAIShE!*?GRiA-d1#Ms>vQr2P}GDjKPw$4rr z7~l2BtHNzQ*M?Wn!@d$TfL*pVGcrcXs+*>vEGPpwd8Xq5rmy#oO?9`P0fX-a_nDZu z)BtMa_omaLw>AfpK~PFgb(A9)NM`|uPv^(rXu@B=(tIg98TPb_d0+`J1&NJ#WdBjX z{(H37pFHc;;b|g9Z!UG%4DQ9iYzkVF(g2}Es$R)LNa5Y+31`qzs z6f~W^uGG>gTm*z=n7{r9q;NksSZ7i-GNOcO;p_6BMcYME89<7RN;(tD-y~)8r`7dE z?GBQYup2Up0YcoDa;=~7CmIL(RlRRHcN6CG3>@pY82Q&5k^X|ulxvG~pn#!PcXyY@ zOP7Ks-cwGc$2^9Bi>_8b><6<76|ywY^#w#7x(Ux`qhuF?;i#k2ol(ci+Z+zp_ybg+ zcLVE@0BmAXQQR)5)+K{xzr@XkjQ%Bs_7%czW=0{G-J7x?%6rO%wu!0k72TtV77T$h z{lG9Rqk59>8j?M!>lw-i%cN?F+7AvK6IO;-l!q9;D8$GGjnfgUD;?Tuu_hJW^}kap zX?h{m-cBd9b3l7CmWemx z+F|Hxry7#4#eplAud<*x)@D(F%#xE;oK|>Z!vxM+O&N<}D^U0~J}*cK4ck?9wfn3+ z=9X7grHrPRzZEx|e$)}xQc6ZWk{))XHEp*;WxFA#spk3~aIog%7FL_eKV0IF0F}0( zFGlsrdmIqyuqcm;mI;A3GhEPWOL;>~#n$Odh;^|v9US+2HPD9pnPOoTo9=efc)M5S z>sv)<>$Y9*Yi*H*~~!@ zWL&=Q4$y3DW5gIe`kH@c;zvbAyN`&5Z!e7~{~tr5ZDj$;3t;oZTAA-4JB}!cB}q1RafR-dkY|u+-DX#^yUnz5BWm`ELWxzxjn8SrB)3_c=&Oq-`Fte-*G}0zx|O z%8Y{cJ57VO+iCUTTlWkcK)hmKQt&EEJ{sb=wkHtFZbXKNZWi)&e0H}i(|FfWn8@S$$;IW;<_N(=#%UQ4O2#kQsXV2j=f|U| zuAULD9cSaNmas(DJ@#5B9q6+bE6ehr#9Cto(+By(BkF;9;g^@*W21a|1vxK3+0Lb0 z&p2v}XkSrsaY??mdujM}nmDt{1MLDFlS7^77jOR*?D? z)_~lO1|m5sqwRzA>Y6zV4~qep%?&S2N$RK50leAj&lIj1(;DR!3xll0E0%=?O?suZ zw3RxNpKQquDy=V|l`;MWF)TV;z_%JpZfB`{bD9fP_=;J)E9`9gFo@HoBuL19$vV5O z)B3ivhj%$JyyDNYV50c8(P12y`S4!A${`uwrkql@t5+G$k=_D_TP;%h*}0_UZhV8e z=+Q4;zbk`=#-m=Kr(Er{{T!^0HclDPaJ1iKAd{O~e0{aA0{{YQbs96{{iP(#jGU!e zR8Oyt62qv)#XyC|7u`C%7TpLVwJnZ{_0^uZ?f4RZrL*QHMR7sPh8*!fUjQFL>dGqN z_+XysBiADlrjX7q`OTY@sp8p`9PNSrcPhtg{X?HDuq(_DJHZdw9*bxu)S~d_nZRD* z?Qx{I{={U+d-_yimni|(kfu) zPo@LnCSRxisC(-xbpx>Ow^hXS zP*T02r3nc$+iF#N?0FJ%&9~W!R;jqKtu#{OuN#>E{U){0oa#N$ij1D_{Q2d9I}t)} zFb6K_l@G~?sRX3>;YZ10#cm9L;@>OKG+-x~;tFhrF3d zTxOpxPqs@k%CIU&wexZcUIJ_#$x+aqZ+i!W?fEY?vQcsVK@l!dg`mxZtq}$(Dc9i` zL{v_}AEI^!`uu>@{-|5rQ?6&Ze4$G)zy6=nQl$O%)3iHYugpU`^1|nHsH`Y}?{T38K!WhpK@{f(@P$v&U8!Ipn z!O7r|!V20cXsHkboS-!8Yn2Y@2-tXAG|pU&4ZkXnB^LxHVuv5Eh;Ia5sX4go4Kdlv zmR+Mh?8CVFjcrz4OCLXqWsy9r2uynZbn)#oDufFc-^u&S#zaq@WOonx3*Yqae2~qD z$$OvX<@`H0mdQs06hxT4*$;{P^v#Ub+A;soc*(xE@a5^ovJ;g5S5gpqu=OIh0{VFJ z2cy*I$!Y3&gTz9&R>oh~n=708e*hfq*t}bP8w!57p)qaKBMmUz7OqWzx#}U(Z{X6OqlJ`}6?zg1JDC1qL+6uTx`PmfiAS z9dbz)TWDhyK9eOj)~afPH>Ae<$&9TE68KAFdIEJi%|n5%8Eh0gz8C@<+JXZj9Z5t73e~pdVx|?^ zupQ6ZU`PeJtGCk8upA!0p-vh+)TrdXN7JZe9N}k=a=k^rDq>zwx73DtH4ZlqUJ2_e zK4fqEel5k8C~>6_Yp8KMjywFOPdSQCnzh@d^&*YvdFeNj4G91O*^ubC?5JqXl&JuH zdEs--40|-0+c*WKd{TBS@bBWZS+oI-HJ1D601O`&BxMqBb^C^frd)LA-61*!0mDEG z;1mx1#I+m+qWP4m{ztzAuqOnCB+qt4->l{>y!*v7io>}8si1>V@o5F7ddhW+4$?4J ze~^w>C**h-T*C>uDybd5EVWopmyD4C`(>(F4N0K<{%Qz1ccG64-KB3~vW$vX=5F6N zpjpN4-5TXjBiaM?=g)!Nv;9k0T>!**l(WRv)R3A9;li(MLuBiCPzOxs^sRndre4>a zUK9*s(MAerjRzE*h`4ePgQS) z`w=etyu6;KV%`j|jsU(W!~(#7!~g!+rH#O8g=k?E_pe$DcVg)H-V;$Xo(VUiDR4$l zZ9<&H+CgOe2Z~e@w@M*6pEfJu$-~MLTJkVIGx@R>q~bbn&L{#13rBFrO%B;nPZQqO z07J|?#Mi9#19}pd$_RikL}m1t<1fGRAENqy&@4w67faUH*I}B^=f_Vr*=|C$y=KQDpT5>87O;Z73DIjVt z7HRb?kh`0^;yx}YgoQC0y?gFdLiBYq+tq}Tlg@nxjrYBcqUii)A3&!^GM$el21lHr zBEeNSY>&qeQ(3|WTG82Z(avmFY+TwH@50j>+hh^fNPs0o^o0}mYYwqzJivJy>VXXd zUA#ot;nq<(YNs^5MJvTz#X>4&>Nkz-djI?Vg~rTIeI<3Jj>d>(Ts7g7tf6S zZ>pf@cVe~1+Pkc^179L2P z{*Tn8B}L#><~*lk9=^%&;~Q;RuG>|ZvOGffg7piL7x+c#E6sc|pl#<^8RCBl*Uw_{G56t@?RlN$#Z5`X{zK|7A7aEZ? z$4vpIl<=$j9S2sGOQE6PrZr3vhtki_&%fgu&u|L-a$3dg!ISVdR_mqRd04~n`H)(e zkG`Dd<$W#-_!9Xr=(XsAF$9zZS`KN`?!(EyTf>U)#7oRcmU(83jUQ_l)YI}sStV3c zAc(^Td5J{5?c;8nWCN@v@cCi#S#{y^;65cuC-E`LrvBQjjp5N$I$8Z_18%2mD5ERR zLFYvK6uSig>#h^=rDxX~u1u~BRPq#w8AVF!KG5KVzo zOs#AOXTt^yw8Khw!A4;VqE=sR_0N^+WYzc8~pop6HN|J2tbh%!65V1QAOtDVKWg@d7 zTtOpUf+2hpkQAPozw>^-65Li)%=Dl~ul5 zk#K6JLtM-wh)$1ME%w0+h4(~}76WF9Y4RxDbd9l_FVqmr#lrkob?fv}Z)3dzXirab zZ93loGQx6IrnJ0+pL_4Cc8&3kLBa**y||TiB0TmD=%GCcdh*v1VORYxnJ}+i?UgTH zBC6Ta=Yb9-=rwHfMrWl;((7*~tLj(3%2bkdHd9;IwqEM3x#&n|IAF*#ob4MfYZUt% z0UOk-0ljAjkCzRzgA-EV6*wl%I%3XH%xtMta$;&D^qtDPyWnukFp=d{<R)1A7XxRcy~kcS zWq0FYQ19NN;|LnQJp+9LzGhB{TiPmiq9hmJjHd<9AQmvTzLmTl&+cwU1X?Zd|}QBFg6u%o0} zX${@texkNB0HcHm_0-(hYe%LXea6-J4q`3toF`#H>`yZ9ExmsFm(@$yw~wobTB&5z zL0imvc}a}h*^cuc1g8c2d$XZsplt#x5JPQ$Er{+anB4i!MAfqG90vz5p7>KWtaL2P z0W#y z3)Irl)6g)WICyPi1^9^iX~n5|@N^%i!S5of6V<-v(g(W49~1HF{2Tr&w`~R z1H-72el&2Epj^K=4S&v^Rs*wqPGxS_=%3My@vVCab@d9qU25;y?}-V@HU!F)TmmYx{L7U!LSI1G5n@?rdGypj{#w8akdD z&)$v{KqssuvbM%7zrGG{Lz=mL!M3)xNc}7>`EWSFRbDDfly&px#F~K~d-&^f?)zpE zck9PApSiQJYv&qzEh@o=my4c;EUvYiS~WE~_V70ALj|N0+qLebFE-C6%PX7w+rR5( z)w@rb8fVqFUarb{4jpIb{r7b}na$Q!UwR1eWEkXYMZ2k5C2~Nna4-ZvyWd%nVHWV1qu*fzuyHM9cO?7dGe2s@(+L{ zW}0d*7FB_d;``7*u($RD$XN;p+AgCWVjv%edo_yu@+w2S^vN+dEb2`?4Qv2RKWx@Z zI+gS{gCx2!3>WRPUkI)9TgCVE3bplPY{}b%>2Fr-LMxx6y$Ctcv^H2*nx3d$WiJy9lswohN&LPwh;$oPLL^yS6Mh?`t{{`zHTym||Z0aen(v>kS|5 zTi;05-1$YAW}p#n%~HL+BEn&VK}IZbZ=b)SMyqkn&B-y_8_(^Sa|L+LR=mrr9sW-9 z=J|-_lQhm(QLlN7`rg8-%HJRN&p-Ue`qq)Of`Ko>Ow&z`fo-=dJ?{5Q@K27bn(#@L zvc1W83*b|#`#i#Zg2QdLr}GO&X_Yc!ykd?~7^WS$1VQ`!>#9DY$-45@X$&8%sC~*g zkMDaL=|V4A2&SCmvYS?ruc8ms^rYi3j;R2N)|i;?Ws#jMcA{ zW+JDbnS1M>Qrgi(9v#AVARo5@2QSa zrjgJGn{0L$ogAMxVABkq(fQVMd?P^Ia@hqiX2+455QBF+R;RsfoQ8MW2f}@;a=V2j zwu5v#P96iaXDoHfY|p*Th}!bga<-`=huR)&C`s%DS*^A{d#HO%D$=^{gK*MZ((iTL zw$LL#72I}`AQYgx*u$yAmcnXZp(~!ST-l6_dUV{7BZzZwD~jB7b@etmH7;)Kz(xY( zPg5%vCMqhJ5# z9au(>w}T!Ts?wa!{YqD3DB;%Vo6bYubriPs8JJ(M9YhREPcYQ2WL*sNe}=Ylwb=0x z>sz5XSgk$T%}i0PetmsFpD3WGrfV;H`s8z+WU37+VH!g7r(0+&)61vJ*&E|-9pr3I zolT&6aM(I!G1TiB(dp>2Voxc=jVT6-t)RMx6lsP|-X$*l}q1VGa3fM{;oI00mS z(o%O9)^h&p84vNax5uWUB*SXy!(xRN&7Ey=fr6DuLFz{tisTIIV|OjLhIPkXGdzdQ zb1bAIIl`I|zWlr!;ONz$xxG9EA9wv0O$?$a(F8%>?iWkc>HbG=2#0v)g#~a4WTG-IO76nVcjQ$JwDFt+qU-UVS?% z2?k|wp;92Q#xO{|H@j``ml6SCp!!wo4BW%U#GZink97)`J5aw+D*1ZBkogsER>m}; zx^^!~QnU{u(tR7AL0524`p$T}H}Y-pWCr3gLSbph>=V^VP&S{WCZu0dA z?>j5DJ_ZKoyMpJtsUe$;MINdNE^*kNobAAb;|+F1ZpUlBDjs=sgSy^#%0o~xWy^Y zoi5|IhA@LBD<0?)7=n))rfE~ZMv_ue66pWpy+yZACmzl53~#qkfAF*myZz*MsW=#s zUHrP{*l)3k1NGUY_EFv4F)a(?bNp#}&!y9$>-x%&6XB4>79!JY_o1HacYT0m2fohn z5`vtVg{|T<&Rz2n+hA&8Ajis^b4o3MgwDyw&c|n_&`oE);w504l=l2QPadh<@p+`+ z*5^e{V<#;?=jnQ3&!ZCUOC%fsvJh;(+E503dH6DZ9(4h<&40C$0KBEd3O9A^`6)Kc zqaG>`LoX0Qa0?jae;0rV%uDrabM!`w6BlC1agw=;4w%{4sEUb+DSVXQ`0aH)CoZ1h zX`^5vklW^H!XiaLxY-2^g;W}z4GNWKXk}4^z&?z6$LPM7LutN2oC2T&lEmY7@?&LE z5L)>yN>?``;B4ZX?cxIGh*!@Of(-Mu^8N7aK$qtx`pY*VIVpX!j!YFOb>vrg2=ThzffPgU`h~90m9F7}DIeIHA4z5h z((M`p#Yuj@qLLvjCnki`sH(Z{xK4pHl5qYw-eY-; zs`Ju4v#tk@tm3P4JtqT=7dCh4R_>NI}6QBNdl_GQbR@A$s3QClSV7M{gW z3`U7y+0_uzfYKSr73@B1AcZgBouKSMt^kM#ulP3JP|Bw_OQ(0Tw|HR}c$yl4`4NpeCD=$@wyt9 zW*iE$d! z+^mtcIS$u*C?z^QJuAhg6}W;PAZGXTv%2n?06~j5*tAYe#;@mQd0|(~ znvP+q-bPzDeH`H0z(n>V5r3%IIVuU@RbXg&bTCeJFH%D1HZ!&EPA3RJo#egC(R|OO ziY6HMM&)^%UAWWN;>V9_7Kg=<(knJ<0${&ZRh6 zyWzgCb=Uw3Lj@2r9`YyCD<3j*+J@Ld&p*9aqeE9+-C2F(!ej%^WO$3Y8r%fw`23Zd za2G+LmzwHe4evEv0^J!rypql^h*9!Tk&c$!saKy9f{$tb*K1 zQiHrySfw1Lb$)@SFZ?-SzM`V{xmey+aP`tgzbffhLlKnOru&rSIb|n^Mni=OgG={m zisJ#U2Yp~*wA`j%^2KGWp=n$VdfkZg7nYeZGzE>G@8$au)EM-e;u?Fs zB;{SqhR)O1wF8bEa8q>U$~`V^#j#9Y=1si4@0Zwc3hsIZzD$Y^VaWsZT{Z#Xg_?g@ zNBl#Np9t5Y=4bY*DE0(i-ml=Z>G0xu&|7o``|RsItV*2gm`JpseJ#u6%eoc4Ut~|? z-?rk^UQu^4&%L#f!NFy*hCwk`7p=-W4U}fp`jZt>HQ_IxOgGj+Oe*aNl3G$z<_@C5 zE8-Nw^A&-v+h^T-^t0rKVEIB;`iZ)0P>{EtCGU=xy}26Z9oPLDZSODqny}TZG7xr1 zjZ1GrbCRjd&tVE}vz66%@cApI2ZOxx_4GXARO6-#tJwe|&M~pi`}QK2sPhtlHM#Df z_KZPz)!8`;FXFMz{po{5n2n&JgQ?#iKof22dQ8iVbHSwdQ>Mj=A5o#8hl!PavW;(? z>YA;TC%2R}QWqpz2gf#%U%Um8Csgoy@j^t~|HwL+g-=FijrXrH$KPp(vl?PHTETES zpBCVnH3L{YRDatWAMvEGmjx}{FcB8Cpk+GqeFI`-mvvS0cb@MFy1Tk|ftes$yQ!l8 zfdvM#u~p$3Ci=k5Pr^5Zkge_YE(HFV&)n<_4Kn@Y-FJApdG>ZYutLW`l3cCDq|7Sd3i-PpSN6?ZhOGzFZ`_TnK* zc%S$d^~kkvaw2SM1B?lvc0TkM-ub%ZBSErc*NN7+`u(-@%nQn%(++#)`~m-X0gr;X zh`(cY>r*W-&%jog!l+aL0}eTTqTwXGocq_rduXM6`!qPbPu0+}COPw7qWsBDjL1uJ zs}>z~zhnRus&ub`DF!;>HH`6Eo4-o(zortvH$huv(s^GXGhoL3@%9W529EbUPLPQN zUiL&96q`y(N-7!~=i0R1nMB{X4{yyNQ9iX#hAEI!ky4{imZ2r6C)(V!i#UMsSWQ2g zV&_xKCYZ?tz~fNUYUBVKDZv8(G_V@y429*C4Rd@4$L|u$;!*Zsq9HgSx;0)XE~6k~ zoty6N6W3$Z_CqV4XV*G!8?tqCnx?Z80$fV$~oWUARNYhTw($B=h=#dm7c^&F3 zq1EpSbbVUJThDQFiJc~gUox$b6n_a00aGL6CwEv?U=T6X`;~zCzZH*v9Ujf#i-6!z z&s7=3>~kq8)Z(ROS%J#xYF1#Nynnr5t7f1P*Xni_{wHKFMT$%s2EVg$#G`OKpPW7OmaZH%VFG!!R`wkK6INAGc0NOV~?q*PMfc(@i`RLBcKybx8x2|&Ox2EqDw7+QZrO!lcM`nT$!c|(?*1Hr z>%U`lOuy`vlD2t#;lB$Hy&3E082zIG{A@lV3JULQ4thKMx|3F}w7UY<{dL4HO2=sc zkAu1K<#h=d3G@j(1`Pj}E*It=BCG{#ZoKlw?6PA9nADz%%bjf~8RZIti1B0V^X1Zt zW_5TE9)4Oh0~uB-r<)MkwzXHeg!W%xRIC%>bp7_li50aBvWiGN27QaX82Ql@o{74q#{HS2g`JV| z=QtpVOH!WyCtJ$+S>F%wu^vdtCh`p>zFX>g^<;56C{i&Pu#yary6bPuiw(mL(jFhl zJ&uiCObkU)K^>h=M=Pmk#>UEFs}aj#K^t&@DRvyh^iJqh0t$n${^0 z2`n36vKjTklK9)~HX)#z4hqW3q0^|wDO@e>Zvz9Hui@~ack!j zSoe$WwA^6w^X9S8XA~$J_^3L~*%K8ZA8Q4_~G1kLpnBWoCamQ5Q*z70*I_4!Q zJm{pU6VWI`mUkgfH`x{?hef6I76|XNRM@2vek_~(Tq3J+D5OSv?yv^C*4dCoS4~sp zCo{QRXM&!0zy)JH@(3SaH&@2@7{l<>K4tI9_Xo^DPNJ&!&WDQ?@S;NYxBcK0wrMa@ z+)UX`u#uFyZS-IlEn-C}u4QjG3p{p_%yuP7#Lp+CGKVrN>_I||oiFAV2gMH0D<&yl zO4D#33>l`fbT;g6bY6mhY}%t6M^&U&As~F2Bi&yNNCT_IAP#$9WkO~JYu8lq4hnSFYNBeSKsG66}1!7W|rN z`I%9w+7^9D-nRq-CZxpRJQ@2O>WRYGyhUXwJE~-;@HG5Zr6;=$akpdA{NV@N_)D;f z5~{OQf!_YG5{y=;3ar4}}mib6iLyO7PEp<q#*2DfpRkbENoJsJHRMo_4t9f zy_O;69S)IyR_E}SI&|ykBrV^YWiMPUpJiVk0cJ$p$kzEe>|$BgQE0JlNgBB*<5qz+ zdj<49kG)-3NG5B58O#mNj6pK~lyJKc1EW$yl?3gifL8N| z4>Wsw>$(W!L)`+3mk<)4$9I6+^4Sxv6#1hHuzv1BUsl`S&GH)|Cy%6tJm#khtKS?4 z^FJKIyErcO@1S7wzVPlt2RcS-S3EcNUE>s-aPqma{uwC_gwVZ^17SxGBfJa@Xn>pf zpQe-6pXCXq?UXmX2f<>~pqlXYsO;a;yrX}rC^Cd&&m#$F6kE?BlyHeQ#vdo4>QZ7h z`rVD$+Rr4|W|(ON8q%>}Yo`0urkR}flj89^B*vu7`!7KW2jJOrVr}jbLzyZTqJb~; zC%rw5WyOwuHLrC$=qA#;X7qf*XJ=E%8 zVXl&=sv#Zcs9zhrt^?n($LK;NIFteCG!*uHQSFC6UHYggC-S zo#npznnaloE7?m9M|N9=Cg_p*k|= zxN~n?6{f$GR#x(PJw~ZUEhaic`&sU+4?n-whGvTtT#6m4tvRxpOAh+!yRVtqjBj=_ z*A>F1l!U-X-<_ifqDfNS;*tqdQa;DG36A?<5|f>)vhB_us%1kVI%Xk_dPqy#Idky& zEfH?eaw!%O*}XOTTY|5^(eE+Zt(3{6Oj!1iR83LmV{PEK*3z`@V(RToE^o%|6>S(4 zBaY3oS}Z9ldR1^LVaVe%>=!dH+59J1g~}D=1Ok`VhlhrW1)&q!0jSw3;FkX9JK}54 zmw<6(pEw}sP513PPCEqW$1Ns_G{Q|jCLcl;rht&sC+JYN>&sH% zq9!>Qq~yj2scB9hPppyGK<|(O4?}u6*=?$gcl0f*mGYFnR8?FGIfF(ivs&31M4c^! z-^4&5%$L<2vz5lLw?{Hg9++aqXtIi21Q9%cv`0+Hi|<#BWUqGPzMHNA4AZ%K#l`Xr zZsGhMP1A)01!2zfWk0hi>sSFXd&pp&|5l6${G%AjU(S;ym_!WF2h!Xk`1d8~wd+Um zSi!l{pZ$J(BEPr#^|+w%hH!qcPl2(+lyCgD4EskzlYV=gnRwAoo3H{n|GO$X*?ghg=qDwZ;Qo~ zO2pdmJU&Ea+IY)r)+z~(T5(@8RcZS<(N+pADrl)IG{p=@HSvJP;fOGC-8d!=q@8Cq zSBsN^Rz;J*rOF;^HN02Pp7nv#j`UAwr~Q)Cj6tGv!S$0*rnlh_sRH(U@d6aqk8nJJ zXd?Sy`7B^#0b6Cvkw-yfD1!wNR!FEXgP+Jcfx+>r7PF{qUD*`Kg#PDe)dvWj_IgRG zvsobaisZ*5X}-fm=Q;e8e@sIV@YN`|Fw%lRll0(0w)yA71m}!60GM_D$n) z;pXYrubJ_CKyu$_W_X^O^vuy}8S z`O9kH^8ttb`ihJNOGq-I`4<|3gk{HH#i|wU*B{9>Hu(02PcD43Q6aaRES~J?(r(a8 z`fh4SVQdn1YixThGYIN{0IzglnSS1+GG~G(a8Ntn!yGr2=w4PrDLp53CT%}3R7n#& zH%k)9TnNQ4uid7%EKyQ$)Qugq!pjxQbC9W(`7?a^M%(3r=+^x#|HI2DeEmI?IXEn= z%ernU5HQGIzR{x!KjrvahEjb@BZVt_H*3G0B$336C;HihG{;h{%dEf6(kew*{7#x3KhPqjyw$-;>OXBX5|>5$dVr>rnzp&IvBCTpN^lDO3>hu-vRriC2>-zA`bnKU zj%W6>hIvA}BYdF_4pr&C4#`#iBj1Q&Hl$FA3Jk)x=7u~evML2uDt56T;uEMc5hW4l z_T?t#Smk;eCC7#Lg5)3aCT-Wdx=iKp-V$5IWIm>Wl=AefmEvbu7zV}>`LO&bB^Yv`54ma;{=4+`A|LR1t8??J}||I&5{CT1lpe*$d`5; zIGz#i*ZmNxtlc;@3S}L#`D*U|6ka^P=x7?(XSeFRb7?hdAgzv8&goxaiNN`M2BNg5 zZZf#A3W2hf|M}^}XJd%#B$Lz@w`bXv98lr-&FFfd-5~~W?k&=NG8x3#om=b@YpB4S zC|bfin8=RJ(Dok5JuRUIDj+&aa$|~sj|ux#_MITc{+%8fE9`ig8l@l3Asiu!P*vRl zWomFy^D>og68hFFl@bqVa*@@cts&W4Y<;C)i?^ZUbt*C~T~0l^W=8boSMep-<=B<6 zh1z8!V*S#ffojk}i_*3dBe=OXgx#yk-un>Zqzy81O9iR2QyGD@Fm5n=je4iah!d|j z+n9kM(>I*=2&t@(n6Xwbu;6@|EUOcSu?dm;v)JJHe|1zb$o0bdJ9DPwcW1qd ztd4m?Qvpw!X zw+)p=mj%w|hhf5e8&d}?xHg42s^~bpw4e zRI1i2kn@;qwjYv%>W--&W2JW=a+7~hafIHJ;R?cxGIStou&3kVy-}V@ro6315H!L2 zO@^_Ht(SqG+m#b#ykQ@@=+uzc>{~hnJ*lU7NudXqPd;#p(o)*4Y~EMGAjX-bmS7AR zF&HzJzc3MMx*2e9cTO0av4MDV2X23Pm}IkdlT>G}BSwzy}# zROIW=ajblOPWf!t^V{G9pF3*=&l&}*?HFd~FfZ1Kz(vkUn*_drKX7BQBSYjcVbzci z0e=(hy4`kxZE?Va;*Sz?EX_>CLT%bdU(h2 z+hOUI=h?Ef>q%N&nh-dK|Nf7aXVWb?{gV}}u!zW5wiAKT_WSjGHadGi^0c-MWW)iL za1>VRey}1H*t!wI64MWsQjSN_51$CXGpVArA?@Zah|QIWX6cZ8XCX>ao{OCuoEufc zCN@VBKxG*ZL|79f%z*P8noDe<6m$b9UxJbyd5e$JJSrkU3A>J5tw5M$JJc#uT&X2< zOWUfFo%xL!kzp6pL9xKSf7kQg4R zW$5q4jEprpSyE6@eVr;g89n+9%lWccWjBoHBf2x3mWH1-muphUYfJ;q$4(fijmvhP zF-e_od;R_U_jdQ<0F^wkBH1Pf5l|?YamzFt!V2klUf>Uqr9yj) zXb}*Qe$_S;`WYAuTfC`-N$<>v!6fCJoeNpMBQY6?ywI_)h)#KIDS7jIXL35bEc;Ib zdQHuC(9Yy!W^Dm7GOELI|HB23xWhSNB(iZvO+bL4N?b!(89J<=pPY|SNJs$#BfFQY z_r8T?8h6fNFP^-9<^|D+kOm&y`g&>`*Gzi>&CUxm>Ca4)p;X6uMfI*R@uu@;A6x1M zEGkk?E<8l0X5Z-ypJxxIm#K^YRFRxafVuTM^U z*6i4I($t<*`a!ufeWbj&{F@!B8n7qw*@;ph5PzJN((W)WlGR?EpB3hv7_&8WxNB=` zyQ2*Ra(4rtQa@i(5CWW4ngUb@i`q&>nqJp9l+3Vg6RjZ#mOU_6Tkk+3x%{^?d)fAc zCV8_Gxy}K&`U=<7X2S41pW}*WzxolxqcnjkCC;y;%x-BykyjU%)3wP)MixQuL_zsN zM=5HepGj`upZ!a**ATnv-8*Dt$(MB6{6(u)Ml$_a=wz` zP7Uvu4q^`IA4;kZ!49BWh`%IkzS*0(wdqcnecrIJ(thFde!erUK3F7L^0@Aa_VW0i zqTgL}7(Bm=-G!(|zR_#@76&KbXiwvI^XC?_5YBXoP7njZbXIi^0-wkA7tJ7IvzbD= zh@%OVH12PL3$@nKY6N>$AL8EHVCHG-wiUlI!%~b$T*X&bRpl1Q@To%~s{rk$D}?VC z(Uxe{p_dReu`?mRBl8QUor`cjn&+x?5toFq*oRY4*ABBO922z0Q9Yy=u~B9JT2%Ra zKLsa(aH;k)d`fLTxd0FR+?)o@;E0H%J@fZFrYtr#2??vfV;!E2UBifO=FVa985_Q| zN&17bN(4B{-(Xr8lShVMZ`zJ1+ehxijH(`CvDyuL!a77S_%{EoX4~6I3(5hwlxJGD zM7&whSaU&NN%M07?m&D>dmP zd6tJKetts8=;*@8U}2c-U%%dA5{Jyrs&{o!XL%iL@48BOT<*?c8I-znohM4F=egIt z$Mch7rzdK!wpPSxA8J&8<(J|RjkCZQkCK@w_sJM-mJ`=TfzA8Tb;T!|_0KDyYO{Vz zl|}(q_ErKIkWnmoafF(V@S6!w#f20=1Ofz)MKH(;L|jU-TJ( z-2VAF6#(k))|bJCfk#=6r60bxs;=u5LBBUAM4K$#Y-m3f_IrcGU@GnJop)sJASqf=WcaFQySrELlq?p#d8uav=zQ;GII$ z_WL-prh3efHCQ>!IF>l2>Jmn5C$cp&$lHkB0tsp&Ei6pLAEM;EwC{<)hSBlZKfkxT zVk3%e6kJy{4Iq6|1g0O-7@KM^rP&7@_`Z2D_ zcy6fH6&>6jJE5uomZ7GgG8o()I0M?1+vlG-@7RNZNss@hJ&JdB0fP5^(GvG<>|(0vv5N3})3pQO)Y7Hh))xhuH?#Km?4@Q~jO<%G7f4Y)6NgY+KKOt8~C z@BlE#{maU^iWapa-D8HO6d{J(_kH@-0m1nieLqQ*$#_|8sf>Q5j8@$bccB69Y_=Mz zvHr&V!t`@kZOpcsx4Y>GT^`5sXMNstBnRo0BjEvpRiDTitPQiTQ>-!_0n8Kvv*mIK z5w5m%@ETHD&X7I@NjDT???5kTg1KBgY<1-&Eq4BF#6qo7e)j7CJrzXK$PM~WTM~)b9-Fbz32t6o&G50s z1>gh=xQr(h(@;y`thr&ci3^F3c{PCSm-+I9gO+l5_gv7);MCZao zi)1oNFre`|ChXDK>Tl3X5-O5%#XOzEishS>gaJT_-8Y*i7v_l>BN&BwVDso1AG-=b zxVEFkH0OIfO6QEfeA=Q+0T09yh)m%{OAA56^St{I8>yz5T6P)$P~e9HO52V498>Q+ z+3=;b&;T}5uX!$A0CTRTS(IdLWjt+zUCAcl@toBF%T}Jgk+Ve|g|_=%t}9^m_-`c_ z>C^YuQQsbpXuNN9fE5X5FO_q)2bVY>DO}zkm~r{E7DFKap~rYkG>qubx#9Z}90juo z@YQZ_FS^y^&Do{S<-8muc@BJ(V&*lAG;-S`Fd_R^xR%>eWc`jw(=zA5(s_Irv;Y~q zmNwz#gr0q12PAT&(Di4)}s$R#iRM}p@+8bu<@j2c z+(B6ov6EEq500Du76UZy8)((3JTw2fzx4*Z(AI#Cr1Spj;0&NH z=&z?z4p%ybN-fYJG-$UVQ5Kr|f0gPP3J}wE4&&&TW$e$d-&`qSg7$P#4ItTFPy#i# zWBfjDqNN7iB$XtHP(%9L$<~QX6U$sbf8xE-cc9;GqE61qw_e#we5N6{Hbjk$iTel% zQDdi)0J$YO7;DBqRZbtA%RpkT!{|WKs5K-4gYCRI!6;bXohTV|i2kfIYUbT@X5r_W?sSphEnA+*BykslH9OOgJ4K z_0M?=pS+HJ#_~0u>Oo%`s?)E&l*$azFguAgD5nh^C7o{uN@K^wLbRWUd$vwxv{}67 z=gHNR*mGsJ*kaRU!W~ff5J3Sn9Bt=iBhZK6B%uT*wDLbw+99th6{HEF``I4AM zSiG)JDO5q)AaLHFh~6T`tI&d8eC|e(T_-K`gdXZ#Zq7lX0+5 zvuO^S3qbD-n-gSv=kZH}+@>6i8?W-=GNnDetW{B`UNbeng;2?!|63UwCe0`WjG<|g zIOK(=aI$5KA*nzrxLLm3aGo*4vRm)}WAltlQm+B=1-P{d2w%@{Jt_{{G7ihcp*fTX zZVr1bOID#Q(E~>y6zj&AulDAF8rwD#S=!@m{38l5tD8Is_LDJn(K z_d6FRrs%;75Lg7B&q;n`|DZ!@St(n|+u>N|w?i0ZIs#q7Bly136Lv@by()`is-?rA ziIy`uP*F@W47^c48+;J&?J^jxpw=Jbb$JmTxmkhc@d%?#$R0yFWG;9-|h&5#eUge1B%Cz3Iqu2$vJ>M4V20%vnilc~{eNK$ZFE~i&=yU3P1EAS z*8L&mN*ZKV`Zm`!Cu&Pfk)Es_fd(tXT=B{w;U@2!@zAu5Bvpq6K`yoE*1IA2Q(zcM z%F2o@j?Og`og&lgAV-MHF~4r20Y852sk#s<>jfSTxT!W*pVrAWv%#{CDc8)rR6rCKkvkU?rsN=U*SYJ3+PVO7GM z47TZ_n<%?>XKWjxL39T=yH{3+k`qst9u@c}D*4q9qL$#4*4ys&e9b4T@&7}~pv|njo$iLI!UJ|kf zW0|f*3w~CLQF`lm$UBT*$rI|wULhzGELrKDX2ZT;8KRS?4NA9`ggZAhB$`jw)R%~) zIL*k)Pj`pj=uw3K76N7n1V1FHLwxBFXa+IMTHaH>e$SbL0j$jTn*MJwjt8a4d(&vS z7dyZ1N;JxbrT6Y$J>F$n^MjB}l15r?9C~0Bnl5S858A?fMrAE4>>ptUm&iT&0$8&$5e<94g z@x*?1thKf}i+%!`%?FJno6*WU=^7aW_3Kw0)}Q8&s97N|Zps)IrbsI~?6LB552U|M z@P5lVV`x8At)6Ept1gRpjiJ6@N$6GS?VDU#BJ+CXB@;>OAZC&CiwCtm=Ftn(Ozqh{ zGELQtFDQ61w4~3g|B;cjxAerGFKfL8ze)~Dcc|0vbx1E4?%LROrmN&{kBfsYaK4_W z7d~3OxG}Q-x5^drSLF(uLd=I1oCRXAgan)AzLa{0Raa$OU4h9i=o;x`>iM;`(8>NH zfCz4m~MgZ;|IjfxU zVo>_n@nuYDvg*?7(As)!y}HrZgx!RNtP%M3H3A-A{Zauq9W=&h;9UV?#IlAjkqOT@ zXp9dU?JrNGZ4Mo(F_hSwox+b%hw>$+>RzXnq5@U*l79|bEKN*3N4dg^GVLEO!|U_G zF#3`pe^~Gt@0%ubQ)RgtrxuPkG52_iNYZ&ZLa0YKT2+YqalVKf#4M~Y`s1n?1I9Nt13k63iG3b9}$&Ul$z)ttp43{N#$kD05~dhv>8FPpsGD*QH_ zK{)ZPc{Rk?bD8w%dZg3f7>y@FpXJp4VZw^~6f0L1qA%oVM(db)MTOT%?b^0^o++Ck zPM{mT@50x250)niAB6SXN}JA4A7)Zq?h0mLWqZiR&Bi4?WwV+->-F9m6t9VZ)aC6f zgf#1)ti|l}>_xBS>4+M}MlRu)Dxw?s%ILu3%18$cdAyIMFY^|mHxg2{cka{VJHn4@ z*!1%rqzph`k6I4$VXpa6uBhF1Gc%W?f07wOhRP%TBM|@8ysO$_;YX<|eXQ_@hq3Ot z@Iu?7nZju}ze!B=-{PG?k%en%f~#KkFdVz3hvPdZeM#xE-V@S#x#ofH!g)ny*ohA4 z&usSjU}`5R6mTAU+fkuJENkZkh=xgzs)(R19w|NE||zxdt~A(Y)Ef3kxe+3o4kSMuCBzHmcgKEB4F0i7Iv2g-)codQil zSD)r{bMB?8@WFud3K!ZIVhZ6q>@IW}skcvdnO1lEs>#<4P``VyWBZ#yTV?OC@WNA- zKG*+q(B9cmXPaJ(8r}5`V2T2O%RnOm$bS3w29g}>)+tZ)S zqjgn{qOArJRP^#J+00YA;Py))DOph4Bs_g_oPqeg`DXF1RIS*AdD!dmv4>0b#=?oI z7St2;L@2WGEK?e>aR1vzEew;yL8gVFW2kvm_-&Y~zY)MI_I?qZ+g!~D zn}3P%IjB(5rC67SX0k+z7jaTx&aCCs(*aLNGA|lz%noTDfD72usyle4NF=p-Ck5dv zsVb*hLo?^>Q>ybVn6)aWJDg6g-dTu%a4Q)alHEvUvfClZx+T+HfASj33Z82JgO>|{ zO7_$Z?_uh(NI!I1*lJ(xQ_UYJx>;iu-RBjCu$z79l~AZlQ?-O9!py9j628Q2pgyEq zU#X7XS?(X5^dcFQ@KudeO)GPK;Km!@nR0qELVmhm>3D)|J5r#N{C)QxW$UG(&}1_F282|?y7|=9Cr8F`mWB>}8$8rmd$Bx* znT7I%*=QSrBs&>X_0$BrbB);usoy6%Z1%X6MnT{DI_0rp%d9vJ z5*lvz{m8g69Jn)A!b7T~(JR$r?!rlzfn80+`H^9d#a=%3>V7Eln6y#77DQDsNE1!c z4o;5tiN{y(No z8DO=&2PMhX5;puX(rYH?JV8dCpeHRoRsYyY`s`Ko(SG53A90In0h-%=)gII``F9*1RWM6 z)IT{Xzf&2jyf1`raKZl*uw;(^kun_f9lyQYyqBi&w0d#wX0ApN3I5?{%NFh9kA!zq zM81qR{99-+`_4{T0_dSVuvuRF5!zur9k4*5EL6F{j@dee{Zkzz3Hy1uhx7YfNn8?} zrn*Rfl;KzP%~@s|5tQ@`rpo)O@HkXTv6Yt>2(1|AiLy~QOlJ-aHA%6>8l$De=)_iu=S3K@j@`T(V4YxtQXm7gT|BpZ&otel^>4nV1 zYo6>bH z-IeF>yCjST>+x&r@v?SlEXYmyLEYX zQzLN|$GDa@4-C!uOitX$^x|d8b58<14+FVtkT(K0{RX@AS>m%u%gsOBhdidob{=Gm zpUt3+E$JVjj^O3eUkrrHj|VreJJS=aNcbe zQ@@@B(4u?EH>;l!%RHc~a2(S&_g$h0K0Cg_=r27~7>^)j$7bzx@(5R{^GcC|Bwdml zrRZM4PH4aVE^mN+5KihwiJcp;JSbX8z}2nF%?XvUF*9>vEKe`ydMfjMy=8*Tr8l<8 z6-z4gQsWx680p?}|K0|ebdJ#JFz zwo3K`<4Tp_N?ts~(kbkj`NKh_9R%sx>^MJ)1atCMJs3cxT-bO`yn#^R zE$tChTW`pnBQf@UoefKYwPFVOekjn%@mRmVPk(+rCQ?-$2FEi&X(lO6lP08|Do2kUPbNB_%CHF~qimrcB?b08ne(8U@r+f|Dp( zx8d2pq<&st^HICi2Sl8q!>)nU_OXG6^G4rkX^v3_5FKj0_4eqGVP;3hpi zJr8Kilm-+=HDAZiSBE*De`RB2BWMvWwq1QapWv(J7M);JanG5u(8Ba(Ew<^s%%22_YJNy1bs-EYR^nE!U9pa2P$K0AUVtvFN%`7e%Uan`UA(c93 zyARYUv~`_U({vMcOLT|$IuW`!uB7Et&(*=vp&Zfc&6>exC>k~}I_3qxyPtv7I+pQk zn9wgaF^4heo6!30%y&8t=f&J_Q@>^LH(*g?$dbyRSO8r6myo9g0Iv%iwLeUqJ&0F7 zgF!Sij>^3}mHxMEZC&Ou2()9E)DpA8E-#E5eF_;w*|2ePZ=rwcB3&j70Hqu~jPMw> zA1QKBYF+NRx)?d0;=U-O&~7EsUQht27F2GCAW1r{@&O5+$u5mtW2@>aBin@Dc{YIw zmdC6R;jXv!OL@jZln8IjKqxa!+dg~#GLT=Rjg==9r@}_(jjfNtE&3>4uC55Q42HY8 z4qtRb$1jIfp6mMPQji-^nmCi>7}U_Dtg>mf+jI5a^60sIBEjzmJly2@ggxXI*ISJ@ z3BX73Pkft9pE6#R$Syo;E8Pq@8kdRm@o0IuA)w(IkJsW|EG#Ub!=KSEIBvc$bthhF z{O@qV6eIJ9ZP`fe_|!kr-FS0^_@j+Ox{McVR%M!+zfC6Hm)?74*Znp(7i%tf@A~Yi zDv3tE>)b5et^{5fT}}u`^AkA`%6w)2B5{LwQ|UI%JgsVm!UN7z^B=~!{m zv$F>Xk0@_cp58wV+K;6MDc>dXHvh8gc(QIPcn|gT*2)imx||Z?UiCit7J~Uv@)BB& z?y*)>q-|u=4l2vTQ=8TS!#m0#UqyA-@L!RZ|!29=6IO2nxP(TIP?&BY% zV_?7>Ki?y`CXQ^LfvD*S>0V4Q?fWs;cCommOu|pp;g$Mm@WPqoXKaS5AGW`pkaBb` zHJUT_#O!EJ{)?5RdJuY7b(IT(tu9Z-4irn&)s<^9)6awVv7F-VQM|Y z%@E(jk|MwXf%0{~-^GFDbKX>H;&dbWhK9b}0GKdIih952}U}4B&qCKvFvn zHg=1n)%3-O4;XWyaRQB3NI-3$hdrGY<5YtV31sa&*WVQ!Z8k-+>lgF$^* zi5Hku1?sHC#WXF!l~ohNcCh7|{1hz$8uXC-bUZ{gOH0dQp@#Y*Z!2pHJ`vZ%qV1n$ zHBJ&gmKcny7ZhAh1I&{65Yd>qiDgPV(9VL@CV;E+jv^;(cw{6o#m&c!Z*C#|5#|W1 zS$-bQKH}VgcNIJg(Y*+&q)k`%#g=SPqTe%G6R;U3&{XXZUpJrSF>^oy785B`5XbzZz3@|;h&;ydo86q= z?WUEO!>dTmmA)PRcZgjaY;5d$aNwscT`Y9JRCaUYy4RQdU4pzO4>0sQloEdxDTVFbyi%?&6`cqk;6mU4K|T`!it#*@$XAb zvum@mEDZy5;d_!x_V&3d1%b4L*A+Y82u6v_N1bfR$*Ic;@emoL3NtLel0SG=6sx`; zHMg6rNL@VJN*%GYGw^N(Z#3ktt))X}IYScqBcJuCk^D?4);s!L zOi6UrX2MTRK3o^B7rXL%59hLPF6=TSEoigho9+W<1~%iD)Iv9u38bbbun@IBb-$fj z+*4^Qg(EtaD_>le;sB4WlRoL2cO;8Y9uFH&P!OIevzZYxSt@0*I_8<)7AgD7EzK_G z@%+1o2}g?!(WSY*!0dI5Kkss)oI;=KQS%Ybt4f=J^f6CCh*7Y zpPMs~Du=-?5J`Iip}y~DdxPEaT2d?js4X{bwnA(|PTwv@5~jhC{hMub389ycEO6Qo z`x|M~M7zjL;;~|*7PS3O+Iey*mcEHAQ=}`jE!kzK1{jp&7K@DGn09u=VH-7NL%#(C zPl{w5HwWwiPmxfh3E}_aGoYKue)`n+LD0x*>C;#xQ1q#qb5UJ39fMK(lY+nqqY5t?X_~>wnt9v*R`-^G}(A68o+Yz zIw^s*sb}$hM|A08mOKSe4^cKfHVrm~sV)qsxMhhv$FOaf#gb>LD{z@VoGlIQaBY9_m;_5Fa+q9^*1Oi7M0=Fplgw#c; zkT`RF*gd?HcUwfU&>s&i+KhYDK{pYK{y-}J9b~V6uC|q}TOp-)vbdzwnWRcwO+CiI z{Vi0u%eDx%Vtg^f_CCrqHWi|`BW8IYGF%HAnD#(4&7AgeHj5ow=E4~lK!>>xK(^q8 zsQ0qsY)L?W6G`w5PL+(Y(^4_(I@n-e+)cS`kCU;__?^z zksZsAfHwA3ty?9AutrTWNvIHj-aHq- zKN)hw!bxe&u6>udI%ZqNIXlM3RUiN{(+RHqnQN?891hK}n?^$jjY-1mp?sq>cgnAf zExyskRF6`6lFW1fKp|L8kaBxYl5$Rx_FLtKQI)goNlSWfgbqQj%iQE@;)mpC1|SLb zFh>MlvZ4BZ^;w^ngnH#0D&QTP0=M%hXu}> zJT+iZSES7P`wnpzrOtTdChPp23!V^Cg#Q@UI&6`3R=|moIVsS=-T3+?K-u^NS= zh1lbZOGsFP-7s!+rFtf$>JGF1?<&UWNCJE&ssbCt-QY&qKBHMW*0!3@c;qRpBq!;pn14|N70)wBEao&_3$^HT9frJd71&=F3+E)M<~L*9&yYVpV?UF*^^3ESuH3z6b3Sw?ReB7KApe z#RVN%=;xkaZJvrkID9uJsYiP(KC?8meqig&k3YKBEAMwgu>SQOCckC+;|iF(c6u8L z_Jnx4ow`?EwG`Hr@cE;}e!aNF#3v&o`dFQC`fQV1>{Y_wfrIB0tH_vV8)Q+*pf?q0 z&z`i!KcSkUs^Yv>t6SloZl>Ej#5L z10Bzxb5)QO4z`d3|#)6F-)S__;UWJyd8DazeL!Kg`>= z`L{{pb7TsCM1yT7eu-%fzItUSXyWm;!Ul7)?*2;v+RgS|8`InsU}L(@r5m6A8VwW{ zV}tvyM#&ylJLdbK4ZoTGo1cI9*rbVx1X6d8u0((YbbU|)ewtsNgSCSa zbM)0+s)jcJVGhXIu+hgNsbWk@kLd&$JJ?Lx#VP_4Wbeja^C$!D41E))^a7X3lWu3_ z-2h#tHBB&7buLJ6-W2U-l?8~{w4Ha0vK}54d)5IVva;yx?CewxhycH>S!&6^pB^22 z!}+Ekwxlnw3E1a>*grehTcMpABjEBv_~HD5&d!W~l-RkCLWajfaq8kEW?8!P`(g4P zQ?*ePB1B)1X$z}qzqMT_bNq)HypI_aIfTt^cLgLRXG~58z0gQs1q1}Ja&r1CGAS}I zF)cAdS>a4-Sldru`)+9kal#cL_O8PF7HrBc&dyXO&eE6|7*tMb&)&qveGlF>r9rZU zv8sw1B6Kf27`SN`w0wN3_o$&2oz83RM17Z=YWG_Lckn zyl5&mP55X#nOL=q9HC?mIT9XB3w z9w&1#ti=Er6u6^t48)#`aTigt-WS*y;!g>cJ&FyZ#P;5&9gs#_H6dat`3;i&UTrY@GLJ`rH2G@9XSr)I~L&I6pevSeug(IF$qh zsWnb4IQ%vcN*G^TQ!QA9{*Bd7{PhmV6`elTB7Yu^D}a)i&{&Uf7L(G*0<>fuBjc#u z`>iM}7Vg^2RDO5ydun40alpiS-_sM-a%8JO6;fR+ob+4FW7wcMKhXX-f`V%Xilej# zSc8wDBa6##gFEpVXZ#>?P5#mz!sF?|8SSw)ABljn$`PplAGJr6#Txz2xD{PWdiNWH z-~`A!gyRC5G=TK##K%SKF7?Lw9L}JiPvlSFyhpeZ)M4iwVA&IgEyR>1MFPtj_q4?3 zzj_cdc1!?>Cz4_A>nr#$G7{2lEvQQD*ntJpcIzP2lE3eR6>S+tZD3aSHh6-MQ5u|K zVm^*O-k-xSB{0M_rKux$dG9u#`#suUU0RL#{l2;2Zv!feJa%2z%t7Bq5kSiI08Z^~ zc6P)e&IOK=v7TNepwf19bzMXh`#!^6mj=uG+@XL*AuAgzdN!EEK>GaAE?g~6!s!M8 z;=28SiwzRuu2YDSqU-25`pt#lZcL7jQmXy#+yoRs&4@n8k|OA;$?QG8wR{CSNl`r9(03b z%5;OLrb5nz9$Xmm}|KG&m35xe={ZFqSbtfBf0Uto*p7Zw0ru%vbS z!&T$9(DO|dbFUtzSmT)3Sa)-hy%%Hnqo9#zr^<}`WHsK($_g$Xo~)c4hI`Y7Py&V{ zOsjV1nyFmPjs!7GI=@F``+=S&y0godvlwBb zMK4M^JOkm3S0{gw1c=b3fnk`er;$p!WMN?ukF>0Dz8Sc``WUFB+JPQ-nj1Q`vn@dj z=ZAC3kffyU28|*>=R$QCWl*mJzRLa9<1Wg>@|wfhAi}WoHLDq#=o1nks{)ASd?;eM zcBM5sLhAGy)G9b4ac}u{MchSqJEn49+h+CoV!V!#iPLOMGKCS#o!?ckYdj_Qnwup6 zKTj+&PiAAhJaaEdL*qIMn}HH}(G(3&PNv{Asq7#w!zl)h8f|8q1C9p6E+V&DPYKRW zOR5GwbUY(T4=e}KdpDrJpA>&oQ&D-ZqfCvRC?ImS{qgFFez@o4)Z{kanPJ`#sP zK;UOsSt($~G$1pYf(Q<c>AMp4nR$)`|HZM2^`dzD#ZpXpC= z1v-q-yTHBol=MMJBJK)BXGUP+Tnu8d8nB%v>%$pCkm>$JKaRVt7{P-!3;Sza^*``51Q!df1FPS%4%y*RrxkJUQ7XSM7XOLz>vE~O3 zrO$tobj4DyQ7wq1$Sef|xdSAlkqN`W_$}n|%dj&{5O01()*sDK`%gzyR8-8&fu2QS z-Jlah=)0-scUmd5B=gBrCd?CGEx>zMv#bebd8g(hMaN*(VvT@S18tlydAaL5m>QhID|AoH`Ls6T|2U83C!0p5$FG{b!xpIx-+lYze zP6tW)(yDq0Ux@Qm6fr|uWAT?SUt+I?fXr|j)0rV`x2mMXwXS{17xqm3U1coWa0+SO zy2DtBiDq5UxFUQpcri|y$MtK)l~Jh{U{$H#t;`@>Gl-aY;@{Qo(3H^jdg>hTK?6aO zL)41D^mHl7O04%F#Tc4Xy_DQM1@aMVFpeK%rI5)Qm%1h?E+$T-lV1HS9N*c+1tL)1 zwV!x==*XmhetPV3fRiXQk!1B2^hVWw=fyPn;Jmv6QyhP$-w4lDe3g}s5vx@Cv~T}5 zum!(;`5>^r2bfh|!9APeE6?Efu-W0vit8v=M6SvNcf;Zh<-AE3&Ag zrlOCbMWcj`CoxO#EA)vZWw@dz|S2s!jrRf&z6n-X&TOosv(v?UkR zAbIhC8q#oYjR?{=q3%W5;Qc}j&+atW$gdaOvFNn;Ap1LWQlv%Zf9vYZ4 z55icnnV=i9F@G!o5iXG798Po(0B;i&v~RdgcIpu5v(OcDN_BIs)2mQ=>IEhL zHW0oRhhGa-dsHca(S2+d0}172`@fcT1j>EAfUy;v93!Qf9{-rC8vhacynT$}rGmCC zz3D*usagKT<`$EFm@uEkUm7ythi||=so7~EG$*(OJby9FLrbp#=;<`;v)-8&Vdp>) zE2d)=)@k?m^mqzAn{0Y#buf4ULgH_cgi;=f-Bq4dtlUp8B}?=2uBb=rKB_zmcbh(Y z_UucGOR-mc(K8a33sBEUHD6LPP@g5Bm6}$GJ^=YopeAsVd3&dI{dtY{L~`QN6MYy| zy|)sp{Z0)k>ymkSme1hArqRGV3LvP|ynSz~U@fGbApSd&Wc6`pKzSs0X<$z#7;*JW zOdn5o)5!M^q&wC)>cl6Q+#~=MxEl{1z-`%1qDG6XLzgf}2gF&G7?l{-l!ta5Q z;zs7LFJ4jcvttWYK32|lO!`<3WEu{Y^h&~1J$o9nz%d&uVjrR(EiYttBOHp(O7FI? z8;DRJvp9zfc`vIkB9Y>7G?6Pp+&TG+&|@?XLx4u^(b?+iFE#E?7=K3yc<}=RDE+9k z23{yatXRF+67byz;IPzjbMNaw5d=L?x1l)<^a*H6TKCpWvJ5PYXNJxgxZh1kNjZML zP`5JPqF0%akN^Tjx8OT3l+K!fO}pSW7c;>+crF7H@t;DrWk=R09~)jJTQ{<5tY0Di zD&|NeH(4XU2a;GJUcX)2A(sLCf}kmDM#iQqO{YfEdY8`WElDV2CzGP1tLr*V{)2q8 zFsJ1bD}-sz`}glz@7(DI2V45m{%j1y;@vfwOsSBx{!~_pjT@n6eBpRnV;b>*hSMD>7H%raxW@iMUoQfO}k~~;0tV@U*5#X%}D=v9_;zISc8(?yme}3#zVC&?c z0>;%Z3G-`ez zZ3~Fg+=gI~7G(oTjE$uvi_EG=MQe9Qv;VT*xJTq5lQI`J3wp%;pn*tiNZK-Uo9xA_ zO|S_cgRR;y_)JHE5BmfqV+Z#3YzP627uL@{O*BXoa@9P1Vk%y6)U(RvsQ2IuP}XhUVG1*=^|Ko6#In#bXw0D~ka z$!%s)Bk))#Zh`2*g=c(*H$Ubmzq|r<1pDVeR4I|<+pvw$NQ%elM-`v5Be$Q6Rh%E~nF(32Gw7I^X45uR~20x1a+Ob4&0W?0Z*lJo z905gPlJ|ujdKcLh=bv1MdG#ECkp;6vUqron)_dPb0WX6?UTw-h(3y&R0iD1;sZCsxK+=?b4xtis)?+Pfj& zA7lF4%@pboeVr_)eLElpQK_5TJb3`HILbu?f2*pup@Qr2y)jqtI}nT!M1_|V74!^r z4R{aw`gv(N$@Dv0QPy>NTH|ULC}oC8-RO?-K7aCCvv$4~;8F{YagdO~QD`^91<4p& zB&7pg@Mk(kt>kYUABRMME?r{iIgx)4Q&ii5>9{gu_Nth!FYA8;fD4ck8*R&|yT<{3 z_a7;{$1-lR=(U8@(Vs}P6smxhOoELQm6V)y@Asz+7YGhw;lS!O22gAJ%?`90G^+wZ zH5edqI*eAiEHQo(${eNJyN^g-9hHY2BPs+4Xm878zu5+`--EZ$&1vKQe?tw{^DH{? z_Du|5IQ*;M2fV1`skv7?ekJAC)E2ghG0aX56z5Cp=uwHV|MA5v6tQcmy|3AtuaIWtSw4vZ*`NPmwu*~Bj4{M|*gH={lm2?A$@T1~M1l`0{->dl zlF!P!Vs_=4%zV9P6s72p!xMPK5n4o}Sj7`(u!*81Hbus9{+GFW60ZD)X(oW>?Vwoi ze=dSP*c5j%qK8W>@M$0Yw@$%+IkI#D`29cvlbQ(KioTuH7R<8z$mi@<0nV5uYm$P} zPOxZR35AG#IQi)rBJe*fz9d|c-bPT0U+iHy9jXvWjMQk?BY1ncz2Oy3aeX*U0xM<+64z&FUhI^-XSDtT$$mYOSc_=6yjcwEms9Ik@=|$#zuosnP%Rh;<6YQ;vr{v@4;}MjQM~Z9OCbxWADQ7?lj#lEBbsh* zSd;(Fkz)NA*vX7&r{TQg@kK#1)M4}Wc{|8l@H!1Kf~o~2y+=5%h}?bylIVhEq(?$V z@3Y$l7KUL#?a7fH787HAC=Q;%cwCYEU&j>j_8stNRFTL|1D744Udr6jZjxT9&e#ACv*wJYl89W9j;M@M!Ot?`|9wDE8Gbbh5OC`Xcn74`_ zM;R9d>pK_+m(0#RZI;)GMR>4S^%s<1Drr9_19CtZ4jP$+lw? zqo<867k;fsPlCC*TSAeak_`O!N_Gu7s zVR6+pXV1^py?ruX9`f&>wVqP$nHU+;AAW!O#jg{eUn?o~QBkkVX!_rL8%PA|U;?Qk zue|<8?D0j;sOaTeITlE!uBltdjatp2^`FX+umf!^sm}i3V)nwFWtl6vrF>87{+8&LyqNfz+iB_C zVdJbNBmlv77q~#uO9?QLWvnRD``hf(V$@pFIY{`f>R+Rg_!`Dsp7JaHCXd*dszVj8 zy<)+p90f%IFyT7ib8n{8=Fv)K9^6M78zk(d?|rP!MGlK|HHgdhY`Hl11a|5UtzwAK*a&l08b2WIIzxu?DlT|XzB z9FCW-95yuAjISEkt*YEUl(s1Sf-U}K>6YT~aHy~s2Yzi5H+*iM0l$aEiv01iT2gC> z^zIh&yw56A3NC$dwl^1W#k<=)=}y>l607bsau3!5%XW<_dkidGB08Dyc22w(WcuRi znyY?IRn-zMw`Ia*pcv&=ZJKLqYxqS8&#o$jqeTQ_k?OB`k|&^{|N7din080>>>2lb z;rp}p@u>>nO4|$=Im%hCuCBTL{r$AZzS3&p;KSq!GP3&*U1880;8v(F-!riONj7er z{$_p@T#~gxzz2FeH5@ zh*GL8hA`18(c^K^F(&Y+sHx#VhQ`M7ODV0z=@ryB7Fn{g-ppknqW*YT?S^)cArzC4 z;GA#VX!8n1$0JHckNC2eYbQ!89hw$lV1j}A8tisfmg$osQ9RVBfUjOBN#mK7MJ{*Fz{#zSJ&6qb6Z+kY^F8y;~Qt(m)3}0f!`b8 ztNZ2GnwI`N*qCBZqWkq=`kdgkcGuvnp=Ge^7@#FHjN6r1sEU|E}VKF!9p@tzb zF%gXzb+ayTGs^pABY)n^;A%-}DLgVV(#F`L2o#MTVBzYs@f<4+O;4MDNe^5?B(Nvl z{_U;Oun7nWhd~>22{Bfo3pKC#3dVwTDB1b2;@ z^1}ObbPzYA9y*g6;^cru##*%cm3@3>9o(3sqNyo01K@GUMtE69HzORuYZa)0f#nYH z{Bz<`@H@CFKu%DtySw}2wFRf~oe`HvFgl{R>swp-I6y)kzPtVXV|8QW8~H#l)NDj} zgXL{O|HlxaW9X5KY>Zu^mRd`Tb8{+=Jk$iOj&gLrS!_G! z1D9^smXRIp{Ll6PgRcFieQeBdbwAK&mI{v)AMb$ zwTbi7BVUN?j-`VRI6K#pWl>wDpF~^RZ8oDf8T^d8toCymQs&LMwoIwuBc2zu)J0#u zkFNMQo5W{9p7Kq07FiV(bUxB z6c#2Oa&Zwf#VcS$D{c&5kDk zn9c8fTQIWI+B?koxKYAg=hVe(RD+@Oc@SP8^gK*>c(R{0vCE~$`dVw);ikoo|FL9P z>JGD}Pi_}PtYQ7QI=EjRb^STiBF00F!LK*x2YuG`RbQ1<{u;2e==Mdo57PYOr-xhJ?t&L7 zqME1aTP+#^raP-?$jj*YFDew&k2Gj}r;BKnsm)sgYXtnLv*BOf{8~h+q}tkC#N#7> zz0Kc#SY!hBeLNKL;!eYFlj!9QQ+@1F?($I5a|W+gb{{{HJu3!&s3NU(7-0nqN8}Etf6!ZR zQ5?q$2sbzNhZPF1>5N{_7Lws?eV|<`6d^0w9@d< zvcR?fUw_F@<~MrD2OK5##^!IM4K96@(+PwCzNSj+uN z9>ZJ+8T;M6qsj0fmhcW*=SE>zHHz)cjUOv{BylR9iQl^1_3_Mq?CzTo*aMSD%e%fi zJOA-~Os4hEAM?D14K#cy9Ye{=X{P=Rd;ui>>qDmVNV%!ybu_X$ypFY5R%GqdF;>;g z8_~TvCi-a_`S$MnuPO=}n)&Tq>rZ>4Nrm63j1m+K*!j#rM(n|8-K%q26%#z9!Q?cw zQl|DFRaSWXv>nm>v@h2mOL>O~B3EYzZV782cSWB1FXQ||epu57Uq85GXr5lQa+6s3 z*t>-l9V~nC-ofE(iF>XxNTa!Ebpsgq*qJ{ebbDjPsW6|KYVHpo|Mw-u4w39 zbL6*8dOo5)&TFQM(8XmwxP|YC^R=7qxtgG*^i05ZLXOTLBsjRR>~h6)Uw(#(meh&= zI2U5``wwd#Yb`W5zw>Ki4aCgty|CML>%nV+De~N6Jk$vHl0Ax*$r5$hWXOhuNV-24 zZugbb6)~%@&;)Op-E9n`l{?&%qQb%VT0YHwrkB^C?qvx_b+Q|ZEMB5F`AW{oYsPYz zh92ZfYNo@#k6}y>>3(*}ji21kzdyTtgQz*XWY~+MUob?yT7jD`*WTa^Pw>lbDS7`p z(ihA(mL{vG8*d6eU!wi@VcZW-X?=h9W-pxQ)UDW~Vxc5$_+TFulbPLA*A-PF|2y2A zN+QZ5$FJ&uEo@SQnU*v(xH!>{#PpZ;kwx$8a`h3w7!3onx$UY`;&4erFR#v8xBbj5=`!tUK>iY*QEy)WW_OhK0b{%d336FZ8zYq}CqAc&UeR z`i%_@sW#&+n0!l}bjqDO-}!C0L(l7Urwf`Esc*0D+^#2r^e0L^`MOzBQ_exbH6!ip zF~ek)BYo%o?KW_P@0JOO?>-C{a|{ogQESu;CWp1Vtz+-X)6nXtRQbH-y#`%Tk^gap z{U15NoXZ$9b}z9YBZd-UPEAqIO>TZCfd1G}T_Z$)>*=A~2h~0TzE|~YRAcY@FU*#- zpZSlcA>xJ*LA>)p%srBvbp3pbfqvtQ(&4wo#)UG}=LkNnoNoj>~PTKuGj=P9?j z8MwO^Z%9K})%0e1bGg$j>zxA_7V1ZrI%V@oDUPX*w?w8{4wYSDSls==zh0{ed=e59 zTDtqtYDd0fa?J*ca{Z5AJGSkIy_64!4DesP_fgu{DbJ4LXOyk;p@w)`?f84|2x{q*?i*`7^or}DD;JLNA(U&M6 ztBF$#7L5z$;B;9rQ#2Vc?M}vhy#HDX&0(@*pu0yJ?5B`mFq;A~`*uDQQzw=TVSR{Z z>!zvTG3~mXw)y6(FMGi-afF~s8+fT=;{BH#x@)n+!spEsxLQPID9R2rppEiK+zr2BC+l7y}gQo?ax_NoZ^`7N_yQKCEQ0wdBT8E$ez{iP)*=pm+y~z`G$aKpYZeK$=&DH z((NyJh=CB0qE(y+r-pl((cn9+`_@`Qnr2}~2#3z>R;o_pwk}^(8o%LM{QjLHy$dt4dP3o~|UtG}&koOq^tB&dQN7Um9g*F;_?( z&8K^^9y*kchG_J9?qf1$)5yzNY!w|!MkF0Y@^uVi2VLgO4Gj%>O*+n_x)SAkH@ug}7M zJkp`)G&eTw_x|Ji3(?CneSFw~FF*dg_RcG@df3U3=_I4!-|xshBi1(QgpKU|R{z?D z_lZ)9GR_XGWdtrXN$!;M>a4j|v3**b2=lt0|9g#m3wDxtO|v^ZPQp8;^QUWkSq4XC z-M-}L&gRq=dGHBwh8pTADKGnRw`4jkj(Kx!ko%ceco1RU-`dA;-O=iJanlcxv6e`b`{uGr|A<*04yg;BU(`G|{Wi%=gqU_AnbSNm<8I{5^>{yaWn@sJL1 zUubmO&FNl}mgF9{Vbn}jL)WwjjK53#ZVlJY5odDxbVzU_=1D9D?iKU7X<16V`Sc8& zP4G~4%}BN<@nu^JjU1!)O5=Au^BWHG8Mnps&s6v8t}TqNRP3d?ZSOSfxv%$OEhF97 znHO$4SuKk~b0z4?YN9(-uRasCgmGV*vWaosSiuxL9ARCF`F`lLi!k@N->~&9Y?gj) zZ3JC^tIq38QsPkS>TRHwSE&SGN4 z1N}~Mv=d>{wNTsZ*m6zB!tp4VB83n_=9Mq&OUIFS>yUuK;~DnypVs*96qUk^)^B8k zlwa@o9y46vd_N*O&33!TNNK zH@qIWr>W;kp|X_IH49aJ-a3ppPG{9l*DzT6ElGZH>B}=;OpjjcIx~n56JQ_-Whl~i zo{WoUrqp(sD_u3)n8|N#I#BmTThj2I!1=MZ-zN6577{Dy4W~Z(MmxLa@q+0G+ywBt(PR@i0JlOV1!n< z|J}Fp4@UzgoI*JwZ)s+eTS;|?wKSAQEGO`=#9NhoqXN_IoR2UR5byvf+~k_CxkIQw6%QF&tJr$>Y;aF?uDa>1XExG9w)ZaXHP9e@xHh59L{w zt4qn&D((`7S>y5dwS7ty@Ls!Xt)j=Dl4jIHTw%eW{v-T&84b4E+F{ZE0?Mu;&YWE{m|fR`^a+ z&DvA$CFVtw@uOv%*KAaHnRQa;(;)25jJ(wkc!;iWU!T0>0+I6E#;XYAb7V@AduM`d zK1cb_4uY%Qno!@uXOyQx`aj#s+h?X(beLg*zlPJuGr= z+3ndVt)=_AU-#TCEpF#IY@&R1X}OU7v@yg9-}Th)AyIJAqjqdL(RkO`)`QFP=+$Ml z{634~!q$UY8qWu*r*5l84=~@)D07^duRY~}O~IxY&kFXLrljcwX!u-;ezZU)>bs0f z8?uoV-OLf9TOWwS4{el4U&B0NGBT#yD|B-wo6cs1RL0Qi&`!YG|M6U=LH$uwcXDud zx>g^u~#pw3q%Ml&#OWPYm+j`D;pK`Be zT_?5e8omMB*LJi0IA)dflevMf&WZPW5bf;{30xg;h-P%?e<+_({`Ccm+1F3cUzPi+ zj46*kRr2XUO~qoomwSiYx6PNQx_xncX&Xsm2>ozgbM+>3i?TJO5Ufh9n?0C`cvVN~ z>R=y}<1mrpz2mLvDjC}L^PlNz*;bvGyH)ynvh=Cful!hjTA9VGocOg|ZM*FjRpGE? zSys5$v+b?ba`uAfSej(p$&L#ana*Fl>C_{5&85)pr7IJ$fkhR(ozk8!Bra@J*W9i} zv?ZguQ75o086`pXeB(`FBVH^SCZ*b1ik0!X9<+N%GnBUN$rF62PR-t=&JbSv_B!6G z2IV~GOq95tTQo~^*=Z&*Hh?Q~!*K?ytZUqZ9+8MS`B2ttZK(aYO?PnL0vb2q{a1~UcsWt4BNlSPn;7B7pm z8>KK^e%4)NKVu5F5#tJ?ACrF)VsIj zA<4#2g6t`}7vHi?R$hBu7_&DQ?s*!Si>8*P%OMeAlRVqamd&YM8;fu_r3$D>)txzN zO~c69yIXG)g{&*2D6`S^DkmuzK2A45_JX&G3yV3SyDUlOu?6Rb6Wq3cuC^iX-Zi*)=T z{y_k@wpG%1*}2692}71Ts+O)!p7>RZ4>sctozudW_C{@skkoKEtb7#F-}H3O^O9_i zS8|M)_(E%Wot$hr^3$n)7f}j4TBx%!PNbN20WT!>A^xfRUH=ogHyp!SN2Jzoy`tRB zt*#SFX*BgshuvyPmH6{f%_*Px$5bK(m4}tKAVh?Qm2r_b^9K(r#7z?tu>J3mwI-OC z#uQ189+ojG= zoLF!#o9rh!b@Lgm&y--Nl%ep+qmKfOPccW9wBKd8b2Y`ZD$FQ~e%ZmhGh=fG`9myU2~MQpXEZ?5#U3OgQ6nRG52 z3rF$(kS}Sq(4t_@NNuHXwNLTOE;r0(7Q0wOPwid43+0fD0d}!R=BU=6@30|!?P_de z<@qKbOY_}Us^Kc|QO|32eoudiL33-R&(dPxy1V0tqtA;3W_Y}v)e~CXN3^&tu2Cza zo+MqFmXk|T3)AkD)(#HwcbLuDiYPDnXmKnhN0*r`HDahz-;`2{<6Q^;QaO*1>C!~| zE4izx9caVG5cb;(>fgumx3syFa-tHAJ}jT+k!*BF>v$$2jV^Z5E9U@>m@*~fAVl}W&w$P93E^eEH?O$rpJn~UD06oqDK?gZ&X197M96yUvHXk<;S?( zxDk8v&fNO*<&FC&9p^dy<~&TQe#Yc#h=}+m{(;-(GIrK=nCgiVMu5MV`?!ZGGOl6U z86C+Zq>0s?8!U}YF9c`e9CIrgwH43RG=&aNSMq)Xk=v?^H$06|Gn!v9dLUHW*#wcK zEo>Xhqo!z*B~qMGx}uK9q-lJ6he|ElDt&r5$KAV~`F@%Q4z+f@1tHaQY`50E1gYsG zL53yLop+v{lQVv=p8CMJIa3uiJy{b|6{VSflY3=fsJaH{@%`!QxeX#4)7+iCk(Fxa z)Qwt2M`XDrX)K(jiK4t-4@;fd)bxDnMU^pm*Mal;g4?mXZ(k)vRBa6_Yz99~C*nx# z9=g(|)Hd!NP&}P2*%fYC88I}Ep+Q`R^vvOQ6Tm3nB(7fUtRiRUd2IuUpTFQYNGdf| z@&2+8{E#kd5jR!0{Wlz;EwDgh)QJ=<=rWUa_n1mM6Xu8dV9o_mr-1w(d*QdBM0S$u z+uMR6uxphl^~j0#*rybI`aB$UAk1#-80*lKj;sor zd=o!JR_$Ub>U)gsg?gyMC{oEDgCb8`#*Of3p~(k-a@&_iPnIM`Yd7AtqCcplw1q1# zOU8!Ah^MOk3|pUWxot&1ht~KKC!+ro{V>{fMXi%nHe+;=pJiRG4!`SeH31eLlZYBQ zyMshr*-xXMceRr!cavx-niSqMuC%lWZd#JH_p03z( z-MPTq&4LSae14qg6#2X#SH@vxJ5RV(+W(xd6-UaEJ}%X@qHjIRufZZOM|0inc%|Wo zid&w!}iwQx7~;*?x%K=k=1@=I>4zE-KR&eLV7v_NU<# zT8J8MaY#R$C*`Sh39nfXr_gej;<;^6$OTrOp61wfN!EnqaD=+z%kEklkPt9msBE*L z3D@IC&mH7+f6#0ZlJlarj&kHXtbq4H6>TR&?WJytIYe)l#x^U5;IRbiNs>@zTIa9N zFIi-Yd!H|&$LD%e(B1V`*`hNZxSKXP#uFT{oIy7+#JjV^qmaX;siBb!JwLF@icS>` z!7So=2gpJP*WD{4N%rm!evMFTF;G8V8Ar?+IifmKve?2M{ApR;iZ^{*&^LEB+C7?C z7|(GgY!Z@zLs8-xqTxs4!W6>glNE}yFf{(-(e=Yz$^zQUx46$J1>jHsT7UtQOK+lG zskIpDHKPZ4ACzFLPbQSlRSt6vvbqNwcSjEkw9RG0$+^9youT4*EaS?A0=-)9FzfJe zZrstldCaC(p>-?xC2Y#b7%C@(%#~SxwH$#mxbivfL+QtHUx;%$cZGH9Augk8-#F7_rdft$5S2y z%5UR2&@O&TYgVAez$!`MojNyXFd-Cs44w*qy8Qj#@wsKr+R0T)vd}C{q?E0QUhoZ> z(VuIl$O;x)ga*{n+MR+C(h&BH)3Ii*$kaC?Y%<)FUateV0LPKi6s+v;%5uwG^6>i4-IN3TeOFfib zYdTm(`@XNJJg$?o%1>&5<`7wExM8^IGp2*=kI}TkZJPT+i`p~PZf2=N5(_0L`WTz~ zZSDB%EOnQw+5{vKaXHj~Six2PfUMTs^YyW1HKHe8vA5%7x#=}ceG03p5bD{UgRP?5 z(-?zg_0)w(>!P)>h;x0HKXp~_qc#7--rTPf>}~zc{6PTv7;NI5vyot%l{{VvQej7V zBDf1=-TB{wef>Br=0wFz(%8VRnppwGzi-`Pu4P>0bNI4R{_5I?EN8YK0&gMpTsdJV zv;&=<9s;OlamKIh)b2Y0VhJ*lQh3e)dhVKQ)Muk3kS~mp^-x_gpd*SNrZ0bANTLnO z=>N&ht{?1j(8rKjEiuSpi=i>9yrDE+gGmLCHi^trWrK5zi>L~hpW+aDm#8y^;w+Fd zx@eQNFSN4bmH5erPfXYf9NAPV@;C`;dZ%cVu9bk(f3CiTJ*s z(QK93{m+qAct;%U6+ZeRGi6$K4Ko!!SNFc=rfa-NF&ga6=)=^5K%%{5w`-zcT~@Y+T9sMRu{nj~-QL25gz2KiqY~@P65KRK(`i#+?0t9riOTRf-IW<+@>- z{^-ejap!sPdL19{%cC4dCAYmcFLM-htr$f)#Zx>i!4x}K;(K%PWVf85aGvf(W%&Pe zP38{zO79BQs(hS4BZZ&d%q3X3$~M;EVJ?jHszHkjEE?B$9V3_7LZhm%!ooxBkH@7@ zBvTKl(u$MX28WI|XHt?GldsTxkL#of(sKOb{VFtBDpf){z9Wco$%~5R6NL1QxgPyM z|C8F|h;kL)UgKCqQBGyvZ-@4{a#&#mz!x`(k>zr#9JgfRTHA1@>kF-J7?*fBq7NN{ zo1y9$00@+B(pQ9zr?0DgFnP6F_a0N@c}RjPH!rG3b1|7^LmFHva%)NVHP!;)yiyRj8O)ei9VD%stUp~IP{``7B)~i5pGu&|nv-a^`t#CFyoPC{WQJi1!psT# zkvMEG)%CXEH_PfD)Eoez^l_=ELaj@6+P#&bI9KqF|KW#-(y?3-*Y&sQtGb=*v0Guf zSze99tm-rRJ(&*(SV%l~NgaxihYHAS>sIY&FQs1_bgKhR;~dFC|i7_O^d)Q ztjU0qupdoIaBysgwBg)BiCtBXZ$RG-x8lt)80x$W#WCj=MrrCjZ|$3?fFCy#vx z$RaF#8KKTu&^6c4n~SRL_bx*xe#*uog49R#N<+YeKR+?wjm(dZLQ$E3zM->QRFV*@ zQ%K6LrAVbB&A(}MQsQO@ip9P6ZiyscOU0-))wXp+$iH9k1g_Ql3)GsXCoe_X2ARxo zWsn;APR!8?o^+mgFF!8BH)UBolSlqEw9>9kff7xCT(DFbpUGc|tbL;vnk^XlAj42; z$X{v5xzt|0FP!JN8Tluf>+`oaelE726O~xjG@5cvD9Z(acWT`Yz!`G@-kv$t>6+Xl z-6S`IcGoLwy6XMxkGZeV>YDiLZcFi}SvvJRczVbGsr0l#A#`(Z&BXTO2fDM%KC}&*Z z@Amr%Wyswv3m_5MFVtTW*Ft3>FBnasHD=W#x>3%TZ@8-zT8|pYc=la9u4&r8^^~@R zF-TO8Rsn|9RBFuzRTZSa$v;`FN}uj1Ta%ESYqPn{5AsR0r1FYiXwgvP+2=P z3H8EBm!Rq@UfvtQgF+Ip&zl?r2QdeE-8&D8d9(H=U~Xxlk^YlDTNn*Xb2jyyXT7}2 zVH6o9 ztDak10Pa37LA-zCe(a+J558zAT%>hba=@QPX29GMpN7GAAyDqK!Y zp;Q)eqT9vz{P8B|jqYhP;fk11m$CkOeq$nl?OrB`Zjpey&N11pk zOXA=@qlrnob;jh?ymT!sMUJu`AByuf37J&C7g1h%t|WWmr_%Ck>{_TTTq6`_z8Z?W zKb+_>Mh@Q}k%-DJ+u#zd|fI9vSjt&L~*oy%?6hLR3@1#~bEx}=?&hbaa* zSJ%@b`k7PW=_%Iw$qhH<${%D`#x)q^Squm=7qp-FNT>5?o-E-ux3e8JFE0M5b9iZR z;uZdd&~C|`v~=p`p9_4XM9fOZ=T0oEjVmN0+0NxI9H-Bu>ZWJ|3Hib3o5 zkFwbY4`PP#uM6F11!bZ|gvZ{~Rv|L9g%{8Op*H#)qz^LV8?fgnT?*o{=;xnBF|TW5 z1FDZFAUn0~x|Qw+N>|1+J-M6_$0ahuGT2*4zKsYF3X6AjL~mvv(M*0jb`CRajl6oKlWwox6I?NR|U*k;mZIS;qJ@x;BMIVwluS}b$Yp@KSFv59t9z4~2 zR)WSl>0=sZv}@Y?k!ARuIGiIlvdN0u2>dI9rYUP>?uae7CSHBLaoa?wSy!>`-Ly2S znaP)@@9IS8w7*qz|8v?B670!?vpPhv4a=X&Pwi)K_O>O*>p_W82(&Cq&J-E$x@!zq z_-!!dqFRdNM>e_pq8=^W@2ox+W;&`xmlcpgZ>=7k0lmos#c}lZ;hx7WOSQLsX$M(8 zsWc+2lIW||-P?4sx%l@42X+^(v>sk0)Kn*i^aF*F;gr#43|IA@xa>bGpEBVh?Dr7K zT+mf8X7?T^QVKnq`Fy5O!Jlr`%G;&o#M*Sw3^J%I?ZGw=N7`# zUD9y$bDM}kmuq3Iv#PFbth%M4o7|!!%RB`aN7k$1;QW9i9o^}n-rNV}B*4!#fq=~X9iq9THq}O~6gI4aZ7DbB1 z$48pm&eo&Ny$bi10?hme%Xt(U+*V95)bk@3~%vcNf=Rj5u$1xzzGnRC}zrrJO0Ie6P2Y zc3rI41=U&p_R1j2?Odut{0*YuBg=e|?!j|iJ=cl_elWG?RStK!lnxG&Cjr+q_K7+5CQMUmo+`YhQSR0|E1vuJGASu%ied4DP^t zq1|_!h{=s|?3LM#_Jz=CUR2-5*8T&bzTmO<(B;6>UD4x>tQfg{v|n3tS|S^AOoc@U zGF3S9XY%@r!g8{17A+h~1_@s)zFx?96Wb_4g|DPw>10w%bt*_1Se*)t_m0e!7I zM>&4I5SsOOw=TzYY_A3=Y7hNzvL+@!fQVN@1(~wln|o*C^Fj2dPJz{X^Rl(E`k8CZ zX|{~US|Vx1$#!hHypc?A<)U+r#cd@B^GL{ccx!et;yGHMz51cZYg_AW_dVGhz0R=gG^#Ao4Vz?+z^oYt&xT$h&FSd$;y@ zej`By*h1b%;F0Gaijt^Zk6_9KQpyc$A$=CY-TOj;qV{P_Zr3nRz#Aqp^SiXT(^%r) z980yQhZr@DU(Vz~$2S5~NKtHvMx?6rD&TcYw90_q=Pi74<*Atbe$9uUX)QJj?C_XE zJKEe9{ST|#(81A%;~^>U7kT5fhuA!UsyZZKc>DLS%4x!nezp^#-yzq&dFD~Ix4DQF zaPLt)CEk#baF2Xtw*3yE`=%q z(jrVKMUWcE!SHi5WgbjHZGMf-ynRiO)qdSd5$&@qFNOHslV(^5NC@4Q7(Eg3{x?r5 zG%QOE8$&r9^ns(79rKl`pQVLG_kV4C4{^91F3(&;qsTUi^2D_|LHp_BD`#F<5STn% z!7pY8ON*BQ-kzwb_1H1=9V{Qn!Z&HbHoLgR%Xdq%ZMx%N=FrP-gz94qgX(fO$s#SG zPX=;L9snR!PyQre?Y?>`4y){B%QZp1yIJWT64A*d%op@WC|ZCO3#H&Vubi5eTKm6m zf{0{69FQ^h7E<9IWQ?Tam|T5N2%Cnwnp)WMWM^%W?PSnO*^7Da1$pE9&wKQ)7yBLf z^D%zEdT|GqSGDE*$PULpi0&=W8o;8P1*F2S)t}%oMJ!xo^3eR&38eps>rPzkJJz=Q zC;+!R5xU7wFL|@?#`8(?rMaOYXpDY#4iyoz)FLcRQCl`(J(CLy|3UHF<9U!k7Cs!R zwVNaq9{!Fg?(&mN8&!pk(X>6s5g-6LAix!&cnZAw*yk(nssDX5VwwcO&7mDTh^ZsV zi5i(qh_;|O#%eXHC&^Q?^fcr#8D(|=qtG#27?S@#F7c3XYO!ku*LFVhpq6v|Z80YI zeL}a|vj3oe(xo5V`Jq1z?!WltwxnaN(Q`;`5J?0h94D#Rgn+;DcpsHa=u7PGn(h1= ztgL}NY_D(6N%EZ?tIPAKYD`HlVpia_|McvLGFZHs!#Pwf1CQyOWmsv(jNjC^RK|$C z|8+%#6zsH~Tgi#tE4mPEhlc_ah1)x{IfAcs!!4oXhX2C~52AQnh-s=x*bAn<-B!w< zx3+c$w))h#r}i$DfSy(^6N@&ICD4_H0+44-@9JH>*38`9vou85^o~qGWv=+Vze@J+ zrHc7Tpp;G7a;yIS?6TbsmG9&&;JEjL$by{_&q3J-*2lnQ@Qi`(KUEIj;W89+@7ncu zej?^3F{n!H@!IUbltKvRNrtkM@@*)fS#m?-YCd2JZH)MQ5hL&aYoTc3!QDNG;&b-j zv2et_)2f2XXm%w%o*`@*qMS$H_FLU6i=S_N(%K9we0Ti7&ccF^C?S9}DV;Zrc8;#+ zG`Vq&Ub)+*S)~{0Yy}{9Hdlv;rAR}VDN-sFwl$X;eG=3D>)P9IoOepkzz4bMZP3PjLA~xFB-w z((N=R(_^C@agux!seI=Y(cCe?H`|>+^p;hsPcTPtNTt` zd2|CO-3>_j8rcTVU4c7C5CXUWtuzg~toBV;YkLk{dZ$Ye&gk9u`FrB>uY%%_956GQzxnh(K^B{RLtDS)~0-Qnph7zaa&#`#xxL zN92MqG}s0hF)6|;J^HiT34H(^GSGP?H>LJ|xZ`UEU=w=}qQY^hUA)hWl%jtKg&6~4 z^EC27vMXpMctI8KvNjqzaInRnZ?MPveRxt*SPZ$?YA=d|E*#`SxaR&Y)^^eDpPk+j z%;Fl*3`DiXcv5Hv3Qk?_bK98dWF*h{d-`g97HThQ`$I4Oo+-YR^aSB*0aqrF%CmlD z))~OP7q2P$h^cxe9L8ecq+^hl3fKLZybIl{5e+(Ry zY5v~j*F4}bG%#DTDC2N)SO4^@GZRnVHF@&xtYkGoh0arlEDpQvSfZtP?H){p>) zqRgUMJXqSl8+2Mmu$SDVZ<)WWgUIHhYicH6$<{iapgFZz8NYui@>|1aaph_ z?iGPXh`_@dm%#pA5$gG`zWeX2Kj^3?`{U;inyj&5PuFA6)(Fw}5L&Z7L3>(Z+M z#}WTLt1mYRnpO(VzFbsjyMDIK-`v1yH7G+2;(8A4R3U#D^63O(P=`E^P!rIvIoJo6 znaQj^gobLkV2_NU3ZW?*u9kROQMAvtQ+j3@tCJ6!8@1+e9&=zk@IL%{TP4a4kO2GH z!tpWN>w*PUilnK8GkoJQnPHkJ@EwpJrDlP3@%8#&Wsv}Q{)=GqM^PuH=?O%w>Y4yX zJaIii?&JLmr~=j(WJ~Eopyl$Kg+im;%@x%d@6Tvmcjnb54EtzMPE~yXSH& z;O@`_PB2H81Gujgg}4_KO5Lkwc_1ViAjqXPN*uFHjwMUf|6}Jg0azMSh}Pc)mi{o+ zPqc4=KtRcKy(idpzzp4-3W`^I0%tZ4JAFURXwo|@%l?|pm{jq%AW6-&9`ih04Z!63 zpf1e2MJ0S~e4$ljBbC4~%OmJR!`>GfX9M9mZS#9I6|dbCLdE(Y8t88lP`s%uoTU55 zM#Xy&H^SV7K5!{g=qrHd7;J8==7SJ7{z81FHFR0+7n}Jsu<2ShW$AHz&w$NV(IT>8 z#+L^t9pG?I?Q2%UVk$RR!Rqa~{~&&78I7eIzf?c%Gm3UShGFmQ4MfH;cXg+>0x$Gq zE|vD*vCSs{4$=FZ-T3WHcb+uyWd(ue2d*Y*6X+rqqNUEu+0?Svvu@T#r)n|0a!S;LQJ`MN_GdQBO14`c7?!fvX2twihOlW(lICgc>zp8O&=q0Mj-_ZVt zN?h)YCoPL|!AolhH?!sH*lcK)()JpC|C|u*Rs^c)rg+(CrYSd;Y;=u3Lmv-TQy=ux z+IQ>~zhF&EWrLqF3fdZbuYNxCmx>>Nofk0ML-}?f_8-6)LUkClcUIhNxrgGAr;#77 zyzNvE4Yv!UG6y*!c^7zg&{aUq`1S9~^ z@xNvL({INpc;fYKyy|eNX-dyT@pZX3fYa)nw3x_fpH?7E!fH8aO>D^6KMkLReNklK)b*gG=lT>pRr+(G(HwqK z)}GF6S8n#}ah_ZR^Eh$gpK!{(JHtVe>{%d+EPg3PguK`XasJ$yB&5(Fcw!6)wxyV| z`(01jG+k9Lb+%&=b$e9UhtzuvjE%xf>)HN7doY+j_%ky=<#?T!j8Sk5O|;qo3rBu! zo2LHT@;-ynG40Qxx&$P(zqDKg_8~%K1z3R&Ummdc zLUb}G4D!GC}GtIM|R1kvDk_M{sUy4M~ zKW2!exu4!?-S_4Df^EbOt6ESfkmw zyJ^Hs*`$N(1|TP$`RdCdBjDII-9*UtK)*h+p0fE4j*5lwKL!dNCd_J54VvEfx6Kth zeZdoSb1i8JiD8e(sR*5d4t#b0k6f&_4fm10924p>i`TuONa19}Z2hI^cU>z<7Pc5Q zBOD~>CIsaw^i3{xhgxQCLi1(Di-O%M&LCH0k}w7Vf?vjWH{9P3CYTb>UqDNOQN6VD zgsi^@+4S}S$nlEU2&7#|yoG*#uO?72)#fDAsS<}o z(J*{$F!+sfzt2z+tU>3GlUoF&RJ;52HTf@@nN{h2m_J`3>aRA zSxum#g)Gkw^ab#LeI^%J+45JH6_Z0i*zu3J)&HI~Tp}*tT!|?C@zr-_8r|9peLy3^ ztPH$3L|#p~gzwT{u;8Fyz1|v~icyzUFw~>{<_5<<&$#`p25``nGPDOkpbOI`V22@D zOgH2D2|k=Kdx?FbTdEdj&742TPU~ZO7Z0svF5XYzqk0i5D&yZlc-_$897)e`Q0T9I z-18kFO23JI{8ua6L;RT|#_l{Ax&>bw0K3IOad3OqXRwgWPH%0&C<)Hy;m+{qggGJ3 zG~NZU@&$xe2C#Qk_*@o%R&Is9l8cFN7@G>*Dg%GkAamGbq&XAWC z;c}QC3R!=x(C~?WtnkX06v3{bqn9PCu#0?ywF=qz7b~=8($8 zPP(I9@1+TcCB>gcpszx@-Xg_#cFc`uQGuJ2*1(if#v8L!#@>?_Q=+EPtTEkp>QN?) zcVf?F%;q7i*=blaQtJ;B7ylOSP*!fE6?UQL+M0uweuc`OI})9j>KD2cSFpb!r@m*cGd2^=bK37>a!fk?-00Z+xnB=n&P6<7(nE9#t_aeZIv31C;q5<-!1vZBBVG^6 zQ{a_$n&}tVzJt#x$W|;`yYXjPS7%9-Hk4?0h$oh?2~tI9=z3YBxXP9>8iXF-N1f0) zhCy(R=?1efIAjI}FSR;4fM;=zfaH};vw(A?_JxCohDG7$8b5mdDc>s8PR(@wQVi}5 zGus@G(uV>#L1v*^_V+FJnJFQN$&;WosIPl#QM#?fF# z@Y04T?NujaJJ>*AZ{pG-Z~f&tbvz*+7ah&wDEWS~A+jsA5!_8JL3X}?TAU}YJlKL{ zQteK&LOspx;ztMaURD56-U5hv?j*OAq;uR#m=s_>(EV?7py)Rwj8A-i?A^orR$;{` zN$x93>=+b*q8N;Xp%FYn5LM2(XIQD9vAe?S)u?bC??>-ROw-n!0)PSK?gu*MoBu=!*$tLbt6oqYbFt23Xb7@;}2j57G+;fvR@`HvQ@$8EGRcznXmUz=HcWr{L;wbaW^xqe z{a6N1+oEsD#wMCW;xtQkV8_0IIU-7W?OY7Bj%nF_lVc=NS|F9qSPim_#hkdcF2aO= zJ^{S=>L}-_E1ludgo-skJ)u9E0P?YyE-B>OBY}xg{M@-qUK|ImW+LhbR$Y>uBfm$F zf==66`b%0%9Hz9Z`NYmPf1=x;Zc^vh*iohq?wzR^^i=2eI~UBstb#m13>&4te-AGU zos4>zdeP^b$#i8&8kyAhpR`l> zg!v{y%2LYLr&3k`VJNmv@*YI2C^zyOUwl^cZu)IbK;RZ-te6c9qa!5wHxpvE|1bap zsbTPiIRM45W^Wsmt0d`#Z9i}q){L9GlI#_eTfOWdAIz-OKldQbGSPbWc{nq(agEAo zL-0BGrDDz3J zMU+g6)Oxn3w1&i&hX|QdvlYTgnY5xl182Gp|^;GQ3epa>YPfMGK^4iYA`!uMJ1OZFZ`49`539|65 ziJH1!1b(yZ=Nm&sR}$K6Au+Jyqe3XKC^oa`r|9E-!o3O{XL34axCf+Id|sT{-kftE zaFR>nN{bFN?UW)Kc-BQ3B^8n-ovUN^M_Ddbc7Wt@Bgmy{VC0R5busuI`i-WwgN(64wn2E+G3A)GF5{&v}n}=WH=foPqfeq~zG80d6{TP{M;qYp(9S zQu;Qa^!D32o;ZjoU$-0<$C_g?cb8Xrs4L^#^`u$pmgTY8Hy3WRP2({_AzsdfSI*0J zT~kyVvZ40wgjtv8r+S}ZfiX65kuI37N87soLj2+v9`AK>x5wDXK@~yduD=;3^*W{Z z-0ceOPu*?zJzHYF3R&$&O0owgpPzKS(G^;L(_;N>vKp;+XA?=x3^k&+?u(7S51`3wMc3xP)4?rGknj z4VO5_wgn=`L-7i7&P7my4;{+ozzyO+X$KR14%zTg5dweCAr4VU^5z!R$-cr!2nm#E z$sgkbJ@x41p8tGKed(@`fc>;806=Pm)rZK$!f|n8obsIx;V^#kbN_i|4?0VoEj5oy zbFVl{{h3>l{aM4C`mIKDTt5+%(k z;I@j6tDV8UBHj{Cxyo!}B#$?Gp6O8IZL7)XnXriyibEVwi5EmXaUGZvD?%P+@&QSP6SeJs$+~0P0}}#(mJC=OWxK|BGQ&bMY5Eh? zGq0CyH*az;PJO_WMIN)kxKrc`L#jv^9E#nPBIzp10`-uDLp3Wd8|iI&|2G4$F4%)_ zM1e>@izoO*=c;6yVbWoau=wSPWh>=)uYQB|!!^9&&5(vty|wtZ7HW0RZm8&f?4{o2 z^F?K=^K;_}QXCVZijCqjp@QY_Hg1{7=*)R zZA_{kejzblWRi$G7sPcvK-tx|zdAjv5G-@8PnjsYWc$ZR;5|`2J26@b!nnA%uHfU8 zw5d5Ju(uq6J^cT$byiVThHJDIqy&{N>F#b=q;z+8cXv07l6DOekd_AN?ymFW z-g}I5#yGcJz~Wn9yfNqVI%>#PQ-S8aWK>kz8Uci%8E#v$Gk*t=CH{bNw8z*9!PMPL zP6W>fp29LTYXfpj8k-|FpHn`d2rcn*4`B<7)e%bojZ7hZmHm9%`FiZjRC*M=I#PE31L)FV5V(2) zZ|icpsAUNh`W|Ds%pR$_bD5 zTu17X^9npxYx*2rxuPtfBxpeLNmT~f17!XNfaJ1P-yZ9uJsJpl9(9J-V~p`Sb9i`o z+2`Q{D!K4OJ{vzk^_g=FD}Bt?tR4QjR%C+YtjDB=*ae&AoMmnpmj2q2`%nSfYuHd7 zG~x~h5-Zo+Z!UC}HcC}gXqkLIo}(!tz!~qz>2~*<_7X@iY;!8HSK5wnv98+Z`S}C; zl&|^47a$-h6v5BCJbS(@ijHWO#Dn>lXWEYd$Qjp;`}JA&E#G_(v?jx}mmRhGb$Z}E z%Q$EL@?irOhd5R9436rDDtZvxX^-sTF*NEfVY00Gnc*b6r{V^XO*>?iD@Q@530Xdb zsRC9cVx2h|H#?t3p)asLbhHp-jWd`Seh6M;V$X^IG~_2wFrPq!ZLDTnaoLm4$j`l7FM>qD zBT|?`T#jqMD7|CvAt!bUQ`N4<<~1cSkbA-TjUPg32*IoIgY6x<`y{-UK;6aFDe)=6 zIy!c=E#zL25D;MI4U?-F=j?Z=uCcfnQBHo5Jil21aBa834?ry<{$#`HfxB`x|F$^0 zB8S_>GUPE&14462Qm(sv#`b%wGFBkoe>ULRE9A?gE`G#`j=7@%kT=mS#Anu+{^%7e%=y~~eT2)_S!3=jCqFO1ew)XP`#RC4 zuh^1!LFG3c+zu4JNiNh{eVy?efn4BIf9?XUBAxL6v;aLXXZ#9#lwkmQqSa?FK;X2! zFU$WvDK@r6!*2rMqL zzW|yrF`$OZw35|NzpKs8Lby62jz;uQh!-dZs2$s+JLX<_1j+2Ly(p4)Ui0!9M&Q@m z(=WOhaH!q#P`dxsO9GydngQ^*HenvD+FKgI1C)*~^Np^EO(32>&nw-vZY0`HyEOv> z$TVL)tD>A?^3wf%Qx%*K`THO2pC4tmC*mzU-${~m zWhhp1C#_?v@!99BKdLxQRlq$z3f9s8w~3cn=KtE|g}tYnuozwkpQgy4=O8|L;!cTaZ6!^M>xyqk{01-%R+j<0n{`vNs zGvTRA%|q4igMWT_ar7ND(PYgLS9_|pb?Hg>ZP%VQ+n~*v9>*7rq?+-cG&aQ% zR6y0veT&SKVV@Vh+=9GTa>Fyh_sS0D)kV#6UQ1*k<9uBa5K=BF%0W=dpUKs!>^^fW zqw8ZVeu96Urg{ir1_TVl%gR?}Th||;uL|VPBqD{Cj;A-SCZwgduc>vuU!cOnUkA&j z%CI!Djp@nA8B%3lX`Yp`_<11AVype$XG?MyFRB+!^_jTpcoJ+h)w3ManN6Ah)bZ9% z!jXRPp2Fn!p}I01F>BS^Khi6dbbc=-BR711cuP^op9FnV6-|hkq@y9i0y1zi8?B()b3`4hK(cBh6NqZ2C$^jpf~yPM;Seb=tfI4s^{!cK$p0H5G(X zi=x8hpys36k>%)AAVZ_7KRNuFJ?yf-Q($YozsC08ikzf^z-_9^{ivSntdo30m~n~; zJd6O4Et@T$%Dg;UoR^V#`uuJu8w*t?`=~AK==8Jm;pe8>;PBw|%))+7lP!aasDoKw$_ zz_I2v@(W26J`K1Se+IxZ%gkZAI6SV_%P!mv(LY#p>?}jnb?QOitY1Kd>xxR9*AQh0 z@*I9_m1;!(eyvrq1&wbWu;I%4iVMaUXd9K)syr(bV^q0;DSl@*wya2|rHewZHuX9p z^t;F6xUlc}6w4}VxYd;j)H+&nau0>%&A|yE+RoY;3Y2#`;C^qFR>3aSerFGJO|&(#Zu*&X`OPKI1&RM zfUSMywp3VVjX-%pXo*U?9S}1p;bmt5+5Cy=7Rv-EAue8yda@ix6lf#Mwb+I0=02B6 zrmbt*<{!V(LF8bnX*%8sqOF zzzBpVJ4uo^&%8hPB-HjB)}kIitir#Tdaw6_;cCC{$H+T7UDiN-ty4Rr`KPNe8FA*> zT&HT-Bi>?I@kucEHl49*=}nn(x}+P)0mL3JFlLA3mtX8MF7RQ*P4o(zZ6v!)Xmj;> zkHYXhDJ@-Nt zJ&aCbYCPM$3LOmL)8_POy&fs}SWLqGZbeVN7>{=V! z^vGe&i%zM4wAF|GqSWlbdAE-B2Ub8nz`i0N-=Yp}L5->!S2Sf+Hon0d^~Fx_AV+fe z2I%5kOVzzsYR9<7lO{8|KKs%At2(1wC8X^kCf`!bfDQUb>q1Ew*7d1g66EK)Xk4zT zHvp@ekXqf337|y7=r-|RHajIJL{jF=#fO)L6rki9$YwEBuv`<2{jM}VrNfT`N75q; zfNiB6rRUWAa_*GB{UlWx0?=#jK`Tm^a^_O~EP$20Gd<&>;I!IPoIdLYNSf2<)MScP z3Z5Lf2WT4Sl{uSd3=19x;^q}wh)(=$h{%6;i6pf51w|ec8-VV~aw&d)16>T3$o7@1 z5x7psac!eD(SOryoL5vvDvUR<)ZF7#4X1-vd~hr%?qdDA2{d#5ycw$)C^Ro=R$^rk zL-$51)rQsdd@pPEMGHH7t8G0k*=@LG=KXCcsb}V-eP?sm%=4p{KREy-7F6}2RcV=G z#5S0>REm%b!ly~$p3tXh?i7+OMDd}+2@q-hT)0?`{-EOVdknRbB`@PsVKcjoW7)!r zB%;zjOu$^i+!z8cXOV!y2+kglmWRnf*hlQYLYD2a+x@H=3frceY*}6_S8q-?gtovD z$^zyG0LR^OxDT}Vr0$rb71jh*9;8{myrm}7(+{QWl`4e`k?Cn)Zqm;VBA;A7LKhs^ zi7Wxqz*Fn1Q*Y>0&0;aDn9!B_WFJ7-vJwUz)2BkUt;#8Sel;P^NfabFR~A%EFUVG2zL4{{!%Se$+kl z>{hi>CY^o)zWggA=BlwU8<@|y=71hB=P=@>ro7ls>U(MMH1I^5dp?Mf1zZ<6wQ@wi zp~3*Fwrl6!)kcw?XMHc^L-jm^nV*#M4Ux25($q!v%~bc2WkQ^_wZLv+a#K_OiRg+JbmYB8H9_T6#L5bFZFagL6%c$qnW<@om zk1d%|7DbJd4{)B~KC7BJ;gY?YXFcn;2_#XotVV(y2g*CL978Uf3~3ZNY&fc?aex-< z=Y;5rd-1e+=?BtmG57wZ3kb8)eWK%mRdv$LaPJCP^)52BXV$^J;V|y>63r@Y+c#5=0e3i5(A4w2fmHWk#J(TP9W?7pA!$GoN2Nba!1&cptInJozVl zvm54idF-Y0F5H_o6NTYlMMbZO{*ohSs7Qv%Tg8b>?~q1GFewVO1BFxr0IOk9DwXN< zqh+UzHKj)>qvH%`S7#V8cCit?XEQ^ZS7M87xO%Do7HXHSzac9|1j~s*f2XO&Nlsy(mW6xZCx7xDtl@%2F$N? zhqeQIoUEuX9sMq?LYGm->A`lAIv^%Qs+W6eTfed>rnj8EFpX%;2)|{)QY_TVi+l%6 zFq8p>T(?B!w1>L8VIUy5Wc$U!)}u6*AZv3oZpm5!9}%ZFKarZXAuJ44_hKfa)bfq$ zmAQr4GlZ;p=D!C$VrmogL3AbA_Dh44;txA0U11Kz?+sRcuzUmeJp?aEN)FeH7pxyE z3qnpre>#dmfXvE@VlBzFFXFfRqI1){-c9a^c4Ooq?)r?nCBk)^&PiNxNBRK655nUR zXz_bsCfqQ#&q_#K^pZT`#}?I3FaPGICR)>yx=w%jkW$d1Z$B1jnC+T+$su(r5KJ*K zU>KoksULS?4ICYUWX)er<8bb3yN#%0QyT`a_X2mOu>(bZM&8Mly7`gX`RQ z>EE_RTBj@k1Zt*NHhlxQ#JVJIAkjx6`z5Ayx5$*s?4$$K7Y90b_Q{-dPSdRWu4eD3URYKu9-nz z!t%@qXoORDzgT*(Gx%6`D*Zd1F9h%>8-I9YGb{A;B!+q8o60ScDw1z7$@k^sP9|J| z-T5vdCAtkCcp@){kVpoT#)-Yswjo`3+1(*G!j}fmGR5}BCv+NlshGHQlR_d*=gTH{ z;Yi$NSg0o2h0ln|4L{A%8ndz-$QCKf3ELNjap>+81Pr%NAm0H>s;5>V5`_0)@+Z-c z8|ft8j1Fe!0bVu$>+}a(>FlXaUB}F0<|$1`Rl;Py?381};^&hLv{`9xrm0?)8s?4o z5Zo#C*a&KdIc?A?0P+4ZZGdh=dSaA~!0ASKLL|$q-b<6U3t1ZIp{rloXok*ifs-e5 z|AJZ*+W~uhv&&~Ja1r;-yC6wPdC`?~$}Z(Y3S(g?^{raF&}B4~0nF*Wkd20!#%Q6h z($C5Lq<5`xP^5$bOp^Y$^u&mpcE0w1EKS{mDUj2>7#RJS&L*W5|$0IJL8l7#}sLHxKx#oVbnXhqr6De9utn=81Z-iz(uO`3WF}O zrC4^P8fG%Ga#)K-3BY%FfGSIWGRi@j<_2gJ_IWVgvPZ_wTpl$#Xk)}8i7iQ3j0B=J z3?QQ-o2N}D>TKFJ;e2x?E-@J96$&Nz-;M)AVa-?DCVixEnzCoFv;B~g$uQyCe;39i zIH|5hSG4iAnH+FqtXsY-G4dTI*U0v<>D#2TwG^i4wrYjp$89=GH5<2cS7A%oOK9MU zHyS;&SIu(cc5ee)7;6{y{tJQC^>PoCS-oW+t$W4s=9RdlcBLB>HwOKArio(kF`z3q z0HP@C(uQRL;g&>^Tn8tTl5J!@TGeXts6Bxd9P%DJzsIFbuC%-FMa5sdLDCt8WR|5h z3;iCP+zI2<_^m4AF2H~o*2!hQymKA)~yrpv8~}2K`PTmnoZi z@)z@N_auD-%K{WEyrRfI6>j>$P`y36Sbyn={`T#a{B3a;RLUvVKr^L3A`b|1vIfd+ zh0k=6Tbo`2G9UspdOksHjEeLSST)wTI^KJ;#wSfLhpoSSb}4zF9d_(t!1^_O4! zoLly>1r{qo?N$oS+JF?h=S{!5RhZVqFY;U~uS7l`z2!67P54YB%DXdJQYoq(zly`a zhw-S%1k+u8RD|BXjTMa2)y1|I769u)?V-1>{hRV30nU<`H-bvqaRWuQjo%)Il111c zSQBI7Pva5SFCH*-J9U3ufkM4Rl2LOsuoodd1ar=r$;S4Mno5hSNNT|De16v4ZP6fj zICsL}L;8fAY`?DIJSifTAyM$|v$?UtI z*eDk38JqI%`+}G@(J~|RGcN#+EqZ_>prY19qIzri+ zyizY^16BtGbrXESL^E^}9Kp6hx$ub%cMu zw$h1xe>&c#5N@ivn5=5Dk4<;*he-(#$GFsiN1!iOF|%%u#m6u%-1(sc`9fB~8l6m| z`r>>#_>`#P@)!e+oUggERz^@wTit}WFt-Tv=}deT_Ku?>Qdo|aL&MBKyRy$el@)ex z&L`sp8dBw8CF3e>PiiqDyZ3=eP@>{K+2k%i3c^!*aeL0RC3jpX<)%75{Dd$X#E-k*hJhlSoO-{OQ|}63EzL?^>;S^q zau1|JRT69I^5Vtmscau8TGGuP2p zv4uGA3}q-^2>JcU!EiO5-j3me2(h5Y-UfCwI5n?d|CizWkdmJ^^&zloyc6c2?#mJu zU3d3+Rul=xuC(XU$a(SK>*@uKlE_`r!sBhP1s3bjG4|Qk&G3?&I@y+*iZsRQnLn52 zOWi@B1MC6=_h?j+IGpPw_m;CM(j@fAM0JJcih0=|INc6ugy}c?#gCk)u%Ew_b||*K zmT7;ht?#kmKT!Z3PM_;*vSJ)v2pT6ju`d>Jf^A$u!OnT#BIlz{83IWRc@XqkV0CBz z9t-QlW;ex63+cMCH6%ehWV?I}^yp_ilkEQqc8e$f7fP*=_}hzM2VE&KIPzu1eW0GO zB+sUOawgf7y7gr7jOq0TM-kG()SUgMJ%l$h!a%ggFa-5MVnm9=g>gnUSi()>hTU2( z7$RJ~B1Fl~s-VwseAs3bHhdcq^`F(O$3$*FJo3ivOL6Xeb$Tu?jX z&LG@#loeLKGShJQFK`IypL1RVZBA_%Om`HvfP|vboUq8Y-8g?J(fD_VX>cM#Na0xJ zXVc}@jwB0u6DyelHT`5(2_}Cf&c#zkAX0&gBqg=h zdNH5miHAV?nn|)okt?lk%$pMYXG*Cv|G41m#bP?uEIET86cgnjz7F?; z>Kc7MqNL1pwX0=a-J8?N8#I(w;N(|m&WRrn?s11Bvr07T$T+)r%!ZIS-(DGWO2R`> zVAg4>4czj0ybu~13zqtU8TS{rGp*RKb9q37uP?={J;mCApO_HbNc#m6$6{qC$M0oP zOqjbn5%ZH<@%pS5Y*`z|QKi&RYFhU8 z!ckxK%gux-2{jE9bM#-j-9w@2F(&LmCQSJ&SGS`y>gERH-5_FyZTY14pIZRQnF*R* zc+61VxmM1z>It>WTFZEoaJX;RT>RC{xWk1D=P0M^1*Mq{Tl{8}Uv^ zRAD`{06kH6-%v$I%p@vfII!@2N~M@8w?&RM06vc2MK>BX&135N8YQM-MytsXA~oKZ z&S>aFV}hDWkZb}&W*+m4tUk$fyf!lfyao~|m$MB}n2m}FmLj+E0CS@bh$>iA#twBxrgS;S8!;kn#F5|UxVb4o4$CQ zbbp-C>ixZioDlilLq_nAV>ZA7rGWQpiOb;jK&s}HkJ_W%X*bmq7Qld4RL~YGK~}wF zV2#TFJFnCm!b3bnaUN3IB?h=#PW`OqI}oW)J~QImuZ=k&O4e@7S{# zU*ponKkhSpZ)m6B9cpqevM6J^uinxev+Fv`EwEyV@SY4tT0PB8`Q@4L_?<<~l$2#@ zVKTK1OgA4x6{x|w{pM!cN86eNLFeABu*^OU;bcG6n*xdAkay%fJx^Bv|9OnC{!ZI` zw{`RG_n=0=p~w~&GD@Et1l581U612Nyzf?IdUWu}+RIIo%i+_`vq5#YTbeJ$=6&jc zc${Tm7xLO8=o)9jdHlNskW$79oPLPifrYpP8kFB4=K*0H`-%ki>&9Im2kpL3y(N_R zWrV(1tIH0=%5byN@zLu~^i970>u-Dy<~=LffVQalSM2HnII*6?WXF6vv(IKoRFDU& zsyI>Y826iqcrB}aXFPK`W`A>PeSOq?YE{x~@j;&l4;T5GRP8A#g$b@U+goaD6!&gY zpbf#@Ooc623Q9UyT9S5_W-ZfgIC(u zFA(VbPaBBEBY6Y&L!-YQ$OEV<5)!$f$v0}7$1@fC6g&ACL}nz*{$xUI`p&rznNEtU zmwjOyVjAR3?T>4xmEh2J(^m*Gzi#vBC40PBHvCjW+Z-}Df=1b$?xq=)mU41#`wZU{WPC}(_tq-L?pS=xYz2M`{VEp-iH+0m}2K>;Yx=~ z*O7=xU$|IWQOxKt%NlkZ4viOo!|!<-(cNXo{WR=3d&mJZ5|y|Ivo%;bO-;Hr+uF?% z$0oV)yF3iORELGM#O)-gg1XKgpGLMm2^^3?ZOqVjSc65tb|OYNAcJt#O_9DlAKSKH zNQNU7x^P@ewQ|`9jR$OHO*%C@6o^E5!I61nx_?aAY`@Ifi!_^T6ph|Rg>xk9eFX** z*z}skWJL*zYXGbo$FBUli{l%&D*R4u<-ZVFyE&&c9n3+@S!?mW9F4pr5w+`4e7$f6HEe0JUdpu{_fbsnI~Xzrg@s2)-_^mMle^d5->A#b?m;x!VB= zBC+pN|5cRAB2%y0PRi(zr$$qDNE^TeB^4!OwA*((M}cJ&cEr^&=q1j7iy8DQPzb+- zdrOsf9+>97C#~p+AS0ghW`y=YL$7Xzj12m9a<) z+GLaYGU81vTA{a~VB3*2=n#Dp{449S`7!&WFeJSZ&@ zzvJ4Ru#lMUx)hxinOZC2%;)!7*F7f3NwZ7F5*JH{U8-tj=;^2A$WKgyxDJ+MW4@0b ztVn@H8R;}@cFeMO0>srYkicJLui3*F(?K4NrMt?LEL8lMLW)(eMc(sDx@U3X(Xj|A zw0szS;=PwZ%_T3C>=W(X*BX82@gI`h`@XH;MovcZ?1vDV_XOS{9{g}TtyAw?&!yD< zJ30RRXEdULX~Z&yokw(Gx6czcn%HZ^l5V}XTOdF66Je(wG9q%z$j5z-=*2`Q-&OtU9Kw~KkF3n>HZnXk+o0TXGT$-yIi+iNMDSNbP zSjYE{zE>s#&$_&9F3AS7pZmpBuT;I3s$8@5KEjAlhY~A7wq$S_#i1%I(!dDGP6QR) z(mqcqe<@8kktVW_T0J1KHxM zvwzl=44R}AORK@qQ){ue;1ZZ2SBldpM~c2L_w(2#4k0+WY*!(sRFC!-n$pUS54mRf zXhx}YP~j}@|Fi%fiN+wZyFDI4wf$u$D{1QsN;W(p9cn^}=0Ob3QTloekQK95s&-x& z%8IKOWmIfGU2J25oP-aYG+O-)g>A}+G|_6-b)X)x%P>BC)@ts z;lhkF$UB$@S3aIitDb(<3}l2nsli!EVft76>sna>=}F>-`cjS=&N!~7AJV!{sF}@! z;MTv(3j6)y#Flb?#+_jr})XKZ7%JredaQ_wpTDx`B~El zvhB9SAo$jgU-*|oar~9xsM6i7J+_eqvwtjeN(;GBpKO!^{q;y_e&`e6Vn)L=vUet_ zgFanUjXp(SqrA7hp0XeBJC9+a-Ynf{>u*0w8Fb~^xj0mlc0K7Q@=2w81O&SUG?Apm*kXSfzjBJ-TWuvNVv>rMzqUj_Ny5`L z8Q${-re-SgOt}Ul2BY>;26Mv57QFbJdaW%DQ&HPx%vTCm6T;K1q*D?qa5UdBDAr;7}`#~yYb0>!w9v!?C>c%Q46Hwv9*y^ zt-hIY3DTXHFrEo}40)<8nF5CWy7B@%Qgja82Dl#{Hes@9F*na?ndcQWMW|Gdel$KZ z`<9x|(O+d~_$y<4B7DS!zU+=dcR#&G^GOA2(^xO7LgpvETM)8`+X}k{_47p6uo@MB zqPag)fUJHZ7ffuV$3*=)(278IjT zJF1-2%wZPud+*2NmLs#^b5$1on>zvfh8RT7Ez~8mB$&~*)X?}rKYIu_WYMynwf}tC zuI&)Z=#v?Do9<2diV=zMI%DLt4$WrVWTQ>-X0hviYYvns0tE4Hniq4&28UHO)}`4_ zN0ftj@wlhS!*_BmTVqI}!L5}ZO8EdKG95FL#X)47NjYug|CD*PMkS+M_d05FI+$>yg;J_odkRGY5 z_k5tXXZ)`x1 zAcQMdiA+YZHW%QS1ZqV^{cVQhrV1GcpbB`H-U&5>dR3o|@C8MTF-n7wd?WV#_W5&I zYATt(59WWkYzdM#%CyxbLH|laWJkh?Tx)b9 zaQu|UxP6M0eX4Dz_=~wk$eOI1wUdBLjY-GDcTDjC4VU5K6m^2DV?+cNC1${*r85oA zzGEvof<(H*Ty)Z{O>K!S`x0s*T##<Xt?( z-co)b^XU=Ft+fw-7lzfP&!yM=V{w@I`}fHXlWsE9azyX-_X^dUm&Vf6De8vdL}4T* zh}}@C4A54yjk`o);GA0VpN5hWz3Nkh(ZT5XBVQW6Q{oa5DoU|( zU|6ebuV;^1Q4+^9-d9m96E!P{A$}Dn`nwAVZpwoTu&&dKU$=CH^p8Soeut%Mp^ca0 zXFxbLQ5XvsDQ?09@j}{8Izpsb1u^Pe`HO9R4K@^zD{!AgCc;Y)i3hwZye~~TC!4ia zXMp%N?5#C$gRGk@{L2aPBI~QMoN7hDuXc5`fQ95!7W|a!shnE%usvO}^y&kj9@qQ2 zp9jA60gr6o=EieoY2`2RzW$lWpTAs8iMAY^nn0t{LF3W@9Tfaft>L5c8PA_CXU3vg zN-}}RqVcICXeW_JYZwVWQGK0@G!i0g$e+DQJm%T6>BWv3#cLZFkrXfBiQ)Fi+Y{F~zbaT6%k8`lx;*Fk`?ZkE>wVbT;ml6$OMYJx?;`~}uEwzJ&tB|G^7ryg0 zJK}Z;*J%`4sTZ?uy55Ptb|h|RD*=G~~apyRoc;u`bqxBoP*kxyp524ZQg?eFoH zWicQod^h0(NyhrNjsC@lCA8Yi5?$}>RE!L3y5VVhH6tr&?J-Hfj$Pzcl^P76Xji`q zHyW&r-pf$)=WVna)JH;NDsr%vl@GQcQNHf_y_mI94j3+W&(OCiZHyJfhj?O~eNJWBK+0t-4S@CKBq=<4SF@x}7n(&HLF6E0@Tv0(xc3-liEU0j zpH5D9D$HSm=*neDU3qBQuZB}ankYT88-#=8bSYAp2TOFzvAWbkB5djAn7ID>>`H*S z;OKX;LLKQ(XRC#g_E>4>VNDMv3b*n$=fwYuN zNNM}NFLi>Q^{}(H6~Bg)f1Ta5(j(^|McP-v)Uy^b=^+XwD9ppWew9yD@z*|NZ?Fwk zp<2bLV(?G4HRTmL$yDMk^Cn$?QdW_e$*7<(g%dH9dw*VZp70QRu=#Lv3YRWJR1Mld z|Mz%W%u%{jHO6tlnPBKktAmUmtp{ZrfQbK8gr$gw{8Ihln}2xjaB9?je=e3Rr1vSX z0dk!n+a8rxoH9G_+|Jz=)~481_zsJ~vUBtvF`Kvvc{C349>)#ymW)H@y{&kkxho0_ zDx|ZfZ{Ys5kqHCFKPY3+YmAC`TN}ff(8641NZ>K>zUW=eR7_dx-nl0#J|b3`E28Zn zYfI#Q$5-c-f~2rO9#))Cu@GBemj;IqN=@n=;o3bi)`W0edg?nYkwi4nyg5^-L&}#8zDDHDC4kQISPhsl9=w4 z`iwk?^72{=)iSsoY9e>w=%5m-7^L4qo5G>eIwsCeZ@SYmzm3KDieZ(?iF%HpMV?G+ z&f2P4sw4c%!7o0~0%$`=>e^hlB_pqiTWBySCV7FqU=eM|uaCXgBHAwCraWoGjkoYO zdB$(Ee1KV@KU_OM%iPiw^_z4RQ{EA?D`p@`bsI?@4>fS_W;$6aB<=^(V%5AGXc&xE zrU@roeSF(}0Yui9+Lp$kp3*ce*6gN&*HA=@&S5BdVm5=3zkpBg+!PbnAidu1I(r4g zzmG6qNZXpmhs4x%3o5iF59Fl#c=1eA4jo_wk$EvVYG|_#oc+e+)imF;woQCXa@~uM z8)>48_T=HxNRd<(T)OSxzow&4+K@q#xtLU)o(|GK}m~zF4 zb|5n%6UHE(9(M2fI5`9!xyi<8^TK9Oa;w(+B=&VjX_bA51Xe>__Q`#q;El5(hf*+1v% z)U;juCGNfyHqW>ev*^vnHN9)AOj zlhMUkHzexBj4F8%tu#^Q?-Xq;Q7rK|MU~Sm(Ip37@wy%tfs)>l3QHMLVJBgHVQzIN zAFT$~{Xu9#(8isw1KqtXQ8OxAl>W@9IqI#)xvyoT9w28BC_Nwa8S91`*nZi5g{$L4 zxBvFJDXt^pF;JWz1UWKg;UFVnlU0}7NL!(l)m`iA#7}7Nz^hR~?df#8>63XIWQG?v zZjwh5gD|6C_7GLnLA~bIksv*q`e(+A{^|jLy%hx5rq0tHC(daF7^|l6GwJVi<0L^` zw!gx6jq_QJuChe9phWY&Nu(rewbxIBcGZ6uM(<~du<}d@VL&)I4&sr6HQfOhYz<-AvJQ?^_1srdecD4n=~aJ>IM*JaoUoqcs&?QX5YG}osXN+CLgFavXJ8py~H#+ zDTSp+AU>niB|lYXlHG=vb_4S&6Hz)_XNbbU5a4ppyhuq z$GT+Su~QjPFUX;g0gbzjWd=^)Cn(`ZjvBX{4`9s`i#9U*n&I}QhP8Y^<{nLJP4Xy7 z;$y@#pzVX?xfZ5U%LB55XA%E1mjl^yGFEi5o&)1)7 z+5TOO@}fX+M#c95?E_=D3DXAy{}S1@jQg=}o;)>_W%U99Bx299Q6wbw0z2iZpovn8 zJPG{*xTw%atgg6{5dd$%(st@|8i~Mj_~WN7+D^j>%CGPs^L@`y_i*qsrDPVId|qHG zsT?p{FCt5rLCr;d-}W0Q{Y@1TM^O!oB2gZW7fSbrlJY$;8OFL_jcUhWY~1~_;r%Cq ziH{3Fjr6>ATbzzIbW$>v9JqUkVz2g%rS?LZP6?l>6!M;IO=oIJkNIrM)Lz?;uqd+K z4QaXj;WYXJVp(s5nb^O4MVkm34BtfWwk)^!iu@jl)*$lztT}714el>-Xlbr*h7#W< z|4=N^0<9^0$gGIkO8Amva5LxqU!;-8#7i;~asO8qiZP{L@?sxJ-lQZ+Y+kj2WT0Mv z68lS?JlCEpxym-@I@-M3)slc%Guv^PDv3Akv0{ncEDHII58nv0KImkS0Xg&l^U;9)pwC2D35^L39_aZ+ z7=dJD6i84KmlEwl%x3FI{kt;>TRxs|)90o6jPZyrp-s@a*(}BJz0bM~0Ldld+bFo6 zo;SeFoI7a`f^3>f7I^TT5dYwj!%mu+>*ChbGd2U#ti(QC9=#S*ZHfHXgdEo$gUZbE zgGSowCPj)YYla!@!Wr1fV$l14aaWM6zqPt?DdDrbu5QSvU}AgMLdcO|n-P<+bY^bN4-f}5#H<)rCP`hy3GWQC8^k9V!I6dig(q5KR*Uz5FH| z?kn?3CeZjXm=wXRY}Hym0mj6DBNxHVXSfz5EO3Pq_UmPq#`(Fc7zpI~gAZgC#3cAi9g^1GpbQQq}c7B}hxdB^PYWzsXR5cs4)8zkA!f zMbW4ttANMQAuv7JU2W4vMn(*SJoV{k34cHR9oBKo?KxI^wT*Kmn$Qf0{2`v3h}KjQ zC(-Q7b%cz`9~fmfppOm{`YsLqVrxw*d+nY8EY~x_&^> zZW~~PHe#h%$_9KHGEH`F?;j>w0AR-)R@`EQ!fTS}HaO*3zredNcn}&>TfUTQhNf7R z>r(Up?1-rNS@5mt%4iOiYD6r1kr1W)*O*zK&L-#+JJ_O+NwUv-UrH|`kZk*95P3VgQhcBoCa1Q}_;iXYyBuTM??$)OCm~ zMHpkIgjn zshB6wqq!ylJmacDf96vvd`geI0&miP{_nTz1Xw7-fJsUH5xS<2e)l7K-2k>!rwKuL zm{>>;n2zT&Jq8Tuq_76a0S3icuJ8a2%~EbT~&6y7*MUL~6Aw>G8yth9FF z*7>MYC&TmbgrIjP85~avf+m>&i2`1#1hFKQ6h?HWQ5rB3RY6j38hNba|nmNi3ZQlk}CoQ?p z1vlJ*u)F^j@zkxKDGIg~8<~|7_qZ6URp#BU(3F?^i7X_;qF`sP&0tD?o$z}> z<#;>`WyOei%XX18Wz~SZVqt<(<)mr;!J)^;=GmdR?fb)O?hf`zZCl zRr+^!P_W)X0;<>#Km@FnSrOk_XAWl~%k!bm<;WDE_ zMge*j2fVS$fqVRmD`4XOOiOz^1^!INOO2deiG>{T`Y_*Vn7-`GWU2i&B-8+SmMao< zQyz~oJMTm)tR!qksSV9KuV!7}64AZjcN0H7$G-smbu*a?ZHvMrZmzM^HOzn}PTlX> zE=-r*sjvpHu%Yja_$|jM^3ia}78r+lV;5Pl$3w64>ZpRQUSHN;%SVoRPd((Eqe${4 zeug%SEQBnZKk}^IkGF=A;=p2q;*ou_c}NPu8lpcp(Cz^3 zZm%sPz8=t9|J&Izr+sNWy7?u^tQoL}bVI=6B8?aV!((+ea$cWykyzV$fT4)HF%DGP zu2H|=|4XLdcHDp0@(~Km*jw%q!?lwtD-Hlm9`nw`oN*f%A1A>=AYfng0DT6WlBU7H z5k$YcodVPL&bz|&IkagKLB|=~301yu!67qu7os;0cJ=O#5*F1GvafWq%=-lp!lrE0P#^-nT=Ydz!Qf5Y|{U&r9J==l-71b~ssy@6Api08o4waq(q)sdp^Z~4dkHjW(fX0By-4?lJ zo^sa8mwr)lW*a!Wn6IOq4pG*|z~Zd}@N5GBYV#Q|Gl@sVm@ac;F#91%3}w<`8`56p zTK#AtC)}dw;kq0o`DVQz2^2$5+=EzT0Wa_ZmEi4Lvhv@v{&s2ut z!TIdN0RMpVK#`KERJ6iaFfasT$`r6R8`bc;<6v5yg6_8xm_pE>R{_<#p)g@2Pdg*= zSAf*ALs{KuC>-!|KjHUz27IK}LzB_RdXnZTs@cl`ykO@>Q2!+Sztl+u%KDYRxrd;u zf|2E;70F^mVA@Zn5g6K#UM_Q9que8OLPm7;_WK72pf=*2rz7ba)jtX2FO0ZzF!27S zqyyB#(vxA{-@pgP_yW}O!R^bR`Jc|J`!KZXoxgLf0{iiX@vx$feZ{UhSnA6K0QrFF zf6kh_uwE=RSoU{d(=d%+0Wj7M_+kFxJ^IW$D4ag^hJEI;AtS1LEtUVz!T@XJ9P$p~ z3s_c)q1{=!#S?%ahX8t^MyrOvlFq=p8-P;A7JCp4CpYB_L&Dot!My^}XM= z2bfH~tV&Ohn*^`!6=Yw>h5*|IulK`rj;#mEifj;)WGP*EG1Kb)>m~myYX3I^Jsvc} zC4hH{1@Pxz?>H2nCQXS=#aSZv!kfe0|NKx01B(Q=?4;9F5B_o}|FSD4-=ENt(%+31 zi3ec0tVW6hnDR~mphXXWfFuC_7>$112he>H7LLG)A5Z>v`uX3-W>kl=x3@&=L;R%J zUB@|bDk`dDl385sv6Du@jrc~Q+;~gMr0c*+==&t$)+CY`4khNsws+T6e!N=~El9s! z3i2w+KKBX%GE2P2DTaQkD1x#t`-yhV8=`*X(>gzOKA{PLP79d z#~Ejo`QQ6~yPxhCe+)Y3?6ddUE1vbNwcvBY1h+}UF_-u}vQNutnXEQleF$xc{07Hv zZ#Q?c5WVkmJ1~5RW%xS(UC%T$*|vqXv>$|h>H#0&a)%9-iA%_9dAE7o^3ArKtQY+0 z=TY7>eVk#>qtq!O!*rjt=p?M=@MR<(VIk=0dL#MaPU$CW#%7x4Rq)iljOVP9Cf#c> zH6e=9GSA^a81ccIi%g|GIE{JLr%WTu>f7EA;J+a146d?t zG&$7y+Z+Bz?Yn~Q+>X+=CL&0M1VT@!QjzS7>wF3i{qRzma59M_pgE+Bj=!PO8|p+n z6(+fzagF6yPZI8RLy-{`dRga_#5Uf=`9D3h-|-lQ^gRmK>d2@Pcj0z=MlA#6_6KGrd1tIeb&hI9blDc9=<(YGoFfP;YCj=IhWh>h9X1 zFlw>wv$)HRyCnn`LI-jolCI$59%$frxx+&A02;-Rl8zG(T^BDyI>(=SA1UVREW_{v zv{xC9n0JE8+X&LHPI)UVUvS@DygU{+CjV%EAcmBR2;~i;qD^ae*1udYAqbCQV^XuE zeXrB5X`-fD)oSo~Q-Zy9wq$?P|0u-Ubyt2*kLp?hu&wV_U+dm`vH9p-!>16N^8v)n z0)i+`EUwX3&<<#?l^wS;MsO0EMBZKvTs)xAV?VOOGYdR`qLY;!XYQ%K`ZMIHB1c!c7t`1vW5Zz|Ic0=^63OLDFs?@m4WPQva*Y zz^NdiV#6ZS^w+oh0!@+C@i@QD%@DrB>a2|;PVtZWew8104wuyhU(@u3%Oc@M6Xit= zxgGqdF5c2UXDFQ$$1aO*r(}6C#Pp`ZNQ4b5+;dBI(Bw+7G#M7BCZ}gD5)#5X?};R< zH`7p(2&*)V-WkSBuq5Q6UdynRB!0-QqEeh16TO>5tMi#J)C|qB8cp%Kx~7yuJ8xYh zwnZ+X;S5Z^RH&hUPnBvE#DzyfT1mHy8Et#+y`jKM&}~xh;#7IdYh+o-3re~e%KJ~E zMf%%!+FqxkEbYlfk478y@9sqU<fwjZjMk!D1mHb`P^>kqvX$$nH8(KARg%Y<+ zEII5qSY(oCyPJ)v-Y!EM39kFM`e=NJumk+VzsJ*|UXPbl?6C3H#Q%mxCm9lSubYx0 z99u-Q&RXJ((ToS~-44Hwq-MrtzRFT1c7mcSP$uzR0 z7y{0LOZ2%K61V$k+@pUTiP+=UFOjaP4cZS{s_6)3wL+>sXoz@GT-hR_C22?XN3Z@w z6pQjT#1 zD9sXx5{k${R1`w|F#QfQF{X)#YrlHCvW^YA!PAE5<^H;l+|5HUyH!~Lb>f}xB(d(P z5{p@xksc&?i(#hX=^t!Z)7><$u#ms`R@w0x-v^jB5xjIOyGK;CKQ1bR&ve4InT8$5 zh!>UBTVXYl_Py4F-mc4-)lRSTmCCNWTUJuB1iABmczS#Oh7#vT?DntKc6S!`qy(^| zSE`%r?7Uhf!-4MMcY-pldM3!%_DX(0dR-PxRUm2bVx<7cIT2_%!EaF6t ziKY-`;w9jKH>)#ttnO5#OSjyP54ag*$i!Q;rBv~Hc&$R^#u2z#IH&c!XXjB2hm1HU zFe#!$Rq<*3qx8emHJJ;)#|-Wpr1+Paqz@UsGld4kcDN#ns{wN>Q%nUKrH=d6KNoSH z+cVYqU8wE3TkI#ctYug91G1@`*$L()E{-qj#$;`Yy#Bgj^Jt>K?G|UVA{_w-VOpPf z|2w0)By5I&E}8af3Fi2$l3w}X$B|(Rqc70%ud6b=QQzhf^(6>Zn>e0b}hZm zypB**#PCTX9@t80)r>k$7e_aR7B^V zB|_)0##aw-@`Og;9fQ`LYJ`gJGY8Xq=J6aXLa!N!Q>oubN@_6T=J{XkJ{`y4r51UR z23mT_13)XoC^X9Z-R6(f%y?KdE%Jf#Xp!;` zjFj4}P5i|SgGZ6|L++{fy0E1ykk996q%YF9p<1nT>qje06q$20>KTVStI z6_u(CMVPaetx$*RxL59XGpnl)7Ir+{B6du3B6WiUd^GAHOiPk4M;d z>;?0a?aejsW+$)e9Z%>v=hQWrIr6kuRd?x`<3;C^9ESPlyw~p;Ga` zYM|(sE%SG+Vc$7VYqaR}J9hxw%Cu;Q(E@}PeKhY6hv^pFv%oh*!PM`xCC`h-X`s6| zxOS?80hgs)sfyAACnF|!&jaV?J#`j0&RiSu_S4?)jN8g`jX!8Wk4nESG1QF0t6Xgp z<$CDR8x=Ws_VO5XGIu#JsWHwP9lD0so49ZI*h+>B!zhxAe578)d-AGlzP6IBwDhsv z=g)dypW@7*`OF{9i~4kdMw5;CfrpGz&7zk*)m(}BkBOng=q6YwQ6xZmOO?18LPk<67wsIVrpS3ciCQ!`D=)8bOU9+ zy1%oALg#b+t6R^udxNFvZ$Bk@ul(_o8tM?7c}OdN0$J=unlw&P4`|wso~jj)T5#88 zU%i5ee(ssl#q>eu;VtS^JiGF6W%2q3B0J3$iw|PQ{k5mKuO57Zj!jqQ7qrFDZiUD3 zLo;=m;J&nWxk}~7^Hb7s?D!K`Wn%iCW9&d@;dT{$Ci37jPk4fIzdvoj46h~;8$1<+ zo1WHIdxdI<5kr-U7{D=`Aj+$X*i1M-F$ZV;=g(FVHj*NvNcL0Hs(VbYaeO*Ge&rIB zQKqn10^As+bND8k7p^>9$5wt-X%r$dlxnM~=2SAq_aG%>tfK90w>SRkE`=9R0!k&` z)~@E}aZhlDGP^3iFIfo?pF7jG*$Z9Pr5=8QM&0e7ZWvV>_gws3))Z1RlVhuSdIKsh zlz1>C>JMuvbUv1JvlIsl$8zDYNK$m~(GzLP5Sz$*9%74MR-Eui_wEMz_@XpYw6rGX zqh9TZ9<54O{Z~=)@5+`!$chS|F)l!cHp%V&c9-eg#il_2Hf!P+!wF9YQ<2<2YBD$U zsd4;*m$s|n{1A8SYiZIU6N_un*LQ`*bi$~o-{B7yjsp%QbOqTn zLKn&L%1i1xY;Mc9$rz@>e2Bf5kdWYpw9SO|%#{3385N^NZhqUnY!SmMAL)cf1ct9* z5lQ2$5S=U0bMF$P)zkMpJ#J1Q5U%!&t~V9CTD1Opdwg&3r_)42ril*2HSr8O+-YAQ zC8=Lx8GBN*BQl7LCn`U+HI@A=F@2=jq_n1V#~)Pv_wkVi{L;_}O{zH@Kf#hrYAP$s zw9OL_$k`*QNkG=}8DVWn<{NZ*o0^B1-)ERFv1rpQ?j9F4ED2mu1|{<=%ynDM4gpWO zLqtOm97e$?CtfVE7YYPvGA~fwpbw%(OLLyFpa}FsppN`oT!2*)eyRaJZ>6`dsFG4o zxG%=sbJ12kLX7Lk+_LTdtkCSg-N@yLR~k4hE(FRJwWMiRTDM}Hh!K8b1HfroVK_uxvR>OY2TC^(V`7=M(wFH; zZ7m6dE!70r-M^3S~IGa zAJe`Ii31D-Ok(uQu4Cq^u+uUM_G(eK9z0BdB3G#}Q8EAP8I~L=rT3q-&-xaiM;q_H zioHf;07>ey{#x&06zW!LLpIN4Ob#fDrn^)R!7Yw`_uzc~Z&+2%`@v z^g4CU6b*^Bg@`%6Z{>(ZnzO z<2T)w1dJlq5x7}`Rh(LsHJDfpNeSl%NNNx3Jm`%gkbd}%C5--OglP0cNbp>AFp={n zjfLtY)rA9?P;GJW{K^i@QwU3yr5(O7_j%MPEM%wbdAQY+nmC}8(YV1p7pZo9!NwW@Q^?&+;2(zXW1Ya2{k)^Bg zkhV?Z(7SP~Ha~=lS*H*hxEG7LvoC7yNHHsPQM_d1SIRM@-xKN#H5P*{Joq2z3$))4 z9}j@$`ubBUGE58<60$MZQqmlYX3HSc)GjJm_uo_>sfO5LIehqR9~^I4BFPJ+9swLy(72S%INzcsE_!S| zp%Ath@=%)!jfgY;{Mu!fy7Dz;(BKUbwnP&ikt+!e5>7!6Mj_dQOIkDygB8>_#O_$D zt5z9*muMw>V^^&Ux-ms+Y5*$AMW&una#sS-_=8ZP!~cMN2TeVlnb-)p za}{sDbuqs5mgSk^{JwVQM_`ctp!sDpqm(o69Q{Zgv$c85(fmoeQ<&|8M z`oQ=uZe< z<6duxga=qe4~#5DbgjKvF8ezRQpRn?qxGgD5xxPmnY;Y?E@hX$^X-Uu1PH8+aAM7w zwmZMybN41kI{r5jn>y^o=w}q{3vW!I8NdS?D#lYCR;HJq*eO~EV@qVoo1W%AGbH`J6!Y|<;znmibD)ik2foulGIV$uoON8vev_f+7z7Jj(&Za~Nr4_fr zltNMj4Wn41dWg32Wrb?v{Z|-+XIV#^J7|{vk#0x)`P_G(r+jgsi|{wK3$Zcpy2&4H zX9>zW2`W902)?3GL(uK4AV(S2&nO)bcp|Qg%~tm&q^w3TRq^dQ{Y2-&8tta+%6dXJ ziAQSh`Oop5rfxa+{B`>z;T}s_pK!V06d+!nyVUO+n)zP#1nwDpB5K)7i#bF5={^{j z0QcE%ufB!Ccp6(!#25J5B+f+}k9IbOp+~2EJEuQ!OqU0rM3ITpJWn1N>XSL7rd=@1 zcy$`|lPfR4)~Quvg~k58ooH>)^AEWf!15tg`?lAmFj)5Fk`VMX^$?G z)cU{!s!ZF->nPw(bt%xQHeEr^yTLV-=Z78n{-c#30F~DaeS#usBAXrj($V>A7_68Rl>pdX$R^HNp8I# zg!J#WUUlHsPx(vt%j-fc^6pF3^2X$b2VPhQCF+U_$j`b^oa)s%=U8Y|*sPc?dze3h ziT`QqiXR4si*pjy{2a3^UbA5k^Y9u=nY|l+bSU&IrxT`9VQKg}?-|GCTsffT-Km1{ z{B`#SJ9*Trlw4l4J2yWyXDto`YK^-HRfB{yw{&|6?zpj8pFP`Oi*3?u^DEk7@q9;4)2ISs?Z;7l8UlxHlqgJI@g^vVCm%mJ>L4UQK2#fa5Z@ zSplJuOgk%m*UNVe#;)1Tq2n98xm=}4`c6WH)eC=&CNMm`H*`Qgsgi2>oz3NUud~A^ zB+#N0l8O(YU$S62bK7|Q#x@6O(MU7R{2KJdF+890)YZS96Tb^fBIiepxnMs*>bY3%FAoD~qR>QG6wa2!7GDz+p>UwobQY;I zx)p+Fh31@I{{CxvBqa{Ytq1ArK*L0biM$drBccNS8p*ne>DDF`v1Isq5*xm=lEj+* zbC9xiqZ(3u%lMbU4L%WitM_sMv4aZ$E4N#t3|kk-j<)>1VzO$nsjfphZ>IIDSABiQ|dmN6d+6C%x8v<6mq!Fnz|wU2LmKTF*jLrpYq7>So{2JDEjvL_U+#biW$ zA7KohuRBxTlW(BN`Jv%0i>e%M;=II&v^}-vm0T_J833^QZKD|!jkd8FqB4k`d)#s8 znH?2e*7c3+<@no@_XJ_8kSy80PL~;25Is+rCAg>{a)eJ6tFCDhDdcKSse^6y{DM;% z>kM{+40iVOx!}jj!v8xLOztT$i#TaaC+1V$%#$`^7d9Mj*ihDiWGr%JFeK(s)fyZ2QomzGixxY+at`6(2m3*Kra)V2xi?;Lnvo_{aYdSU$aXy7L-Ox?jcYJuCa z#iTh)IRgP-aOLjs3Kidoz5F%hj7j}r-fq}Bws9LNb}?q_=cG-GfJp9Nbd}>D5O_3Unfo@AOQu9&xkaj%0uo%P{Xkok{rMMw$3r z390!3XEIlGzo4V>88s(vu zMm_DVm@CkvOvhQozm=pWoTBTQT!<^D2s)OolveiKwN~XSoNv`ITCZI~>2Kxm_bpp~ zgc4!;TAHP9>BeS2tdXnLAbRhB{L&JYEc1$at-=4G$-z4H(M~Ek%@Ngo?qclN7cCI` z96o)H)m@}bk7#9<_Xx->6KV_-b-E(a&xnIx%k(Fd8;Ve@!<<648r)B1$|uXmswFMN zW|F|b3kA*?-bgh)IQR=w{S~oZV@LHDX&+;INjp)1SJ3O~{a_JzO6*gn>5*4Ott%;S zUSf&YJtugKfk5sV1$*WI`k&1TKSwzyUSnvB{OJ3VAer(7fRoKtoh-}*Rv8i@wOoAV zZq`yV{t~{nB{uEq_jvzqM@y){z(nSL=8KAk^y!TP@QH0<{>*oyhvRMVyQ04lfM1ED zMpVLDLEtbLz^sE}ITp;+YDIbb;x_y>m$TUh@-A5YI?wxEA?*_o8Xxg$ITILC&oeuo z9n)@}#3%b(m7@Lm0}DiU!p)=TUc;!uN?OjG4q+9la!rr#@5!9(F|Te6&Zl4PN2-=Z z$K`_2RPj+WWnyt4;AoJ;4Q#Cd;EK2w)fA2h!Sk+C+P$d1=nn2R?;08@1`a}n^q<=q|T_JN9dGkz19ZzG<9eez2)2J8B=#(DxJrMi|`2Fwbh zwRfwtKT&u#l_BVge{&KT;95t^i`0L-(BAJv>|NWke8a)qAG?)}q)61y5FZw2g~uY^ zn^BT-%oi5B=n14UfpyR`Yzy>Umk?9i6++wl4oT`F^E$c?f?ItDGG#IR^T`fII}G=S ztTUXZpm?JhM3KZ|#1hyhq)s7a_25+6eFaTRJkZzwt3l+i>mi8JLx072*3A85IBT9gS~r4W*l4<}MH6wUZZqmCg2P>}!YY@*6MA^b@q zxs)GXI1FpD<(86C;cNrX>LQS2_zk2;}2zo=1P)YhFbk8iPYD+&XM1or}&OA zJ9J>541jWOGnxi~1HTRsQVn2I9VUWM-Pt=LnfWT_3jVkgP{B>9E`l>^ix5i}qH(naD_t;=X5;ZCeh#69L386ryg*w#s98cP0)pvo z09@9rB+WduQvu^{xiTFOn+Y1Q+N71*p>smboO05>%2yFj#tc0~MfXh7&C_Jp+v23lGal&e)O zrv^0x7Kn{_w^2Cz{m~70)1RX%y}vESzucr^*@Qc?wZQs_FR-|qrPflukQY1g>}6Y_ z;C1TU&wz7c!aWvj0aZe@2zFHHLN{yx^LJpSTuA!bN}H-s4HDu9lM373&gy1V+> zDxAbZ8i8EvW-4vnF3KyY(2}26Xgany`{$3FO?b>)nSQ3ye-r;~=~-IAZ<1d>IyI8C zbw4^>yMqicJ%=d9W`r$U^a=%|bq2xPV$3pQ4a;VKCT54z$ zHC%%mCc%Gi1^|V9-m2u}Z?^#XQHufu4jMB=9zVrUG=(ppc^bdTCVNmRdJmSmAygkg zh3D-g{iMtX8o^T*;p_mzH$J{3U_@c9Lm0VlL=L3#yM42?4lJ5PFw8u|Lm*%m83%HEF$Z^a;^x==Ph>)eSX}%#; zSw#8F8Ic`msJ|Txd2+aZR}cfi8xdb5ZHphL0y_HmAxa`MB%_eVLHM{HNDD!#5o!%9 z)c+LjS0}K99#g5V!-zUE>zb9!Ef|j6zFj=nPJLk&Dhf6ELXa^yL<9Xdfq}yI>^WyS zgz^H-z_=p8ys_O^*rx+bQ$yWOkm-EJ4vPhH+^aJF35cS$1r_3p7^H&rkH`5Q`@SFz zGdk!b$1I%?$_|=B<0r7Fn1JzG%TP#L^&t1a4OBNKpc?m_k%apq2*?)vn|8c1dBH>b ze1QP{Z!h-hXnP=~V@HPChYS_1gE=yDFrU^5hNJoSuWkV0R_liM z#4+OhG9$1B-5vNWgOA5=Wm1pvioQQZ!B2U{StO#7N!P>BQ45nGT3{!y>;fyv7H}o; zPxs+M=a;NKwNleC@X?6>0{7pn`=Fwv1#%%ch&A8(6!z$P4}9j_R?9^j6Q|=BsxKP0 z=-_k7DBbZbBD6+<=-4hFw|YL1(H9|X&%tCkbm&-R+}#TU`%X0jHE$@!qk_N@CXu2l zwHAV}$acsqe9Y!b=rds{R9(hUe|PhjpZY6Q6;emTs+O?ofurCThal$R}IKBgtcq3mmnoB#N^lY16R=mYO9l#AmoKtNl7w6*d- z8*II7Rto`U3rsS+6a@nWZNY8z0eTi^50uVPO)~xCM}q1?;tHBt1LOQV3Ez2Za=9B6=0i+Z|i^XOHU^74l?T&6CgK zkMGK>S?sTGoEmUhsy{p{YA3evMh0OZV37eN7|{@#b8!1*76G89A=8Qpxq-xf4NzW< zfzQCq`Cywk{pHvb3AsVaT71WG|MMO9sBB@I+Lzmr$!=o2AlIdHKybW>TwaX_y<9Dj zXuE>1W83t?ehq^8^GLX#d;uW^Kyx@$g^fy8p*%FU0v-ya|uF z>;YV3@lT(@8NM3>Z0TlU3}(Ea9D0bDixA6+bZB5)s)w0J&F{p?NJXXI+UFKtFW;I_K8{+k*=O0ZSVe4c`C$ zZdiu>6U1QBxt>4%c0vsl)`qW!Re-8BLwPExPY+c12$mpvE;Z)c+{H({!|8|%_ufY2 z4H97g^dS)YIeCQigYVEV4i}xjyIzZUwN@2lS9<`yI@LWK`llj!A}Ta@zo@XX{y*l@ zfP4#B=-$Xz{j^eEi_hh65@GTF0Jis_h-@muw{M6_^XUq6UpGQ9K!c1(wLr?OKmBG2 z{XX!jFvl-dHXgfy)Yno`v%|gtHtq*LI%LR^1QVcZd}3v?FOH>vpre>V)w*AvKFP7& zJ0`D)zj19A&hzZ|Gerl-U-;Xh|NZ0gS&;8T+e?;D{t$`({zG{1nFO$`k7rIe-uctA z{wYm={jY~YVEoAny{vw7fc_tTeDxR?bleuntB-#k%>P<^G8y8bwx||e_+NkfcclFH z;t6fx(@uGu8g)M|SpQwf_NkLT-5yow6}SHHpLR+P1m=~^iP~c-@6WgYe21q8=&7-b z>P_7L2BasS?vsrt*Bk!tC%o?o zS5Z*s^u)2X{jX0u31JQ2EYIVq=i;?0Z=G*bPr~U z=#vwTnjI6FfXB@ldtU#M4SiI!d?coX23ZA(F3=;Mst;ZMuk7zXS1bLGJ04e>*-f;$I^?78*=wqy+_+$S(6GUl6m*PKyLuiVEILBYx zc^a@yLGAx^g-aWefUFXU!zx<`j+y0UGDMv{V73f6{z2!_JR!}?g7oV*Kwx@g-buj- zLa=KFx9}!8-uhoj+kbAGXA0c0daD1b+ZG9ZK;Y)2@9ZmGf8QE_+X7ppD{ue~9x;Gq z?7;<}RE~Y$z8_qxA37M?{|N$43C#j&2os?c5%O5;i~_%;L(NPCqSW1xIKOCPmY>x$6*Bs2#Dx6pYPk{xY1QEYiS%2}T1bF_S0wJ6FuVN!S zgK{^~L5+7g`3aH~!$*c6`~)wN-15g*Wat*s-{Ji2?(06s9(jalce0TtwSR=>ZxMb) zNGoKnkxGZn1f=_!qRIQ+K0wXboP)rbapT(=5=$(oD&=g?8NG4rNPdV5&Gpy**9i3) zp(tm`6%3UkzHAFjXU5qDsoMhR;N~RAOvc?LaB!O4_ohbPN4Yp3d+6b9IOj)&KFr7b zzMlh4UUvej=le6hUlRP`B6IQ~#B|fvWmMp;>G-c3MGMqEQ;zCp_gw#&d? zFK@&yt_A=pn2p`>uQd2S4}P_cC^E)>{`;RhdxP>PasgxRINY|Lkex>`#^hm>z={tM z2!@S#_8aR+fj%^}gCZ&jXJ9s7#@9V%IX;@BJI7(xVQ^nUcNi^y9Dk91D;l{Lr5=)D zL^@+#M0|SY@n>R4r2^tH=a#buM9|kWk-5DmnLz`%Zo+@33HR9P6SBbRw}_4(kI$aI z62b;%6G1alcmGw^m}W)q%ROm*WMnQ|q-6;>?0jyYg6^D$h)-Sv1}x7f@r>#}xdzkw zJPq&O{ZqR(A@Ntt$dul(cRv*hJD1K`{LM_ss(ZUqoXnkbxJCv+<$R##hw8`);YN(; zJdj0*!7UJejlUmuJ$?tN+kdrRc&;IW4WBh{Ej-&V(Yj;RwILTr>a&SD_W z=xBiE%(Uo+Qbj+sNaRknAV}H-%em;xAky&eWywD`*Z}D?)AQ;^;-7jHZm>HFzz7ej z0Ln5Fhv0wh8kn(uIYF4!d)vDUSB{q=A0~GVoQwXR@^x%9>)ypbpL+{bJKsI^$)i7Q z5;&gn=O&@1rF^-ojo?n*RsE>MZNxMpIKW04)cJe7{-NATL-JGl5i2J@@$opE3lqkC zy2H~qtN(Z<+*#;*gMiI3oMuYvBghz|qCR1ap^f+Td($Mtx=vH$OKlM)l2r)+bTlJi zH!^FBt)m(|N&oN297KbQ+#k{-6X+aXn zHfV^7nj2J>57~(2w$jRQoU-d6odbyezSRW8yQUJTroc6(7XbfIoj;T0*cK5Mzz2x0 zMV3w-)6~>`>Zl&OrAckuMFhI`iW;Du7|TQAA`xubO55CM5jzHmaQZj_Ime&422bqR zzf|$)KToVgMJOKEpree|!Byx!zzz1b!D>^2YaLPYV1z>izaK!|;(0`keSZ|&3z!O$ zQVJtO3aKd$XNXg%98TZ(qiQG@oTL2q&Vv7O%Cxr7TG$U5bY+f{2#zkXI$D@y8*v%y z)@Mj61_)jgPG66rxJPM9Y54g`_Ob8ye{tZbZK0ik4**7W!(R|%ZdvwD?34s$>vCu3 zb6P*f!YWEJ)kSUu82Z_{^d91%y>Bdb#g14{3iaHIJqp9pDr5Ed6lAb7q#h<17<0;H zZS1$u!Sg01_&(wUEt&uEee|)R+Lxpmman)bGek&6EcoV(;K%)q#K=UbP$ zxzzz6L4@1@A~#rC@Q{eEjvy^z(4*)ttk=#QejIi3P;&3Geo?xvA!10WkF@`}Eh0n7 z2N>jL4R&I^b@a~`XuquRt7NeLmj@Yb8vk?OThHk{+$c8y(66xkJ&Bt8HFlT3wHsee;DL**{>c*1rCD-s8oAcawm;A-~5+rEWWbG6au+t6QwO2s~?!!!r>Lbunr7BT1b_#uuy0^))mbk^Ji|ej|5Ca{oJbJ@x2m4kv#{Y5Y0NwS}xxR zg|Dn#2X^RWY1;-nd}A5|Kb0wm`)vU$kfTquxcQtx%4u{i~=v z6~TGEKd8e%g7g~u1BWmQIng`~*&~g$;k|~v`XWd_h6Py5#ayGv)NYf&csQsi?gN1A zqru&f1}sm>eT{uPsN>f-xId^TOM;{hXMdDB(Qc{r9{lJKj1_I;f_9qLoQx(>XNVXw z6m&%W?_H%2SCC2(?iWgJdYS_DygbT#HIwWY6+3le1HTGRuN{pX6&&7pc>V5X>B!Bs z9rx?5>7^rEx^Fkf*PPe)j@%m2$RD3TLGxC9Jjb-VHB{OW)IV5oal=kjI!nKVm) z?h>c~V?^~T-l@!GWQC3h8a}DHxIqP7>|-ag_mwV{bVo58t&XNPNUvN+`RlplkA*Z& zqp`H`vu|S{V|q{g^G9+sMo*%^2+X_hA9cVa;qHOq*D7Tr1^V(S3;h|=^*IK2-y~>K zhM91BX@`c6zTru*82?2}`uB8zF?Xf>!kgS3te`6;CasVd9?lyIw z#MUOOA7mBcF8tyG~jbT4*H|NH*Geu~hVg{g`~F!j21myF*&8-iMwjGKBXj=@^@Rs`P0 zadq$SZmb>#J4#OtJtlMu-Qz1O*)s4;(p@a6@uEF06 z6`w=gPrcEFN@$5NeRQscE0Wk168v+PKR2D669ocM|DBIh0>eoZw|z&E+;H4c#+n2o7AYp8cLvxKMX%K* zuo?MsnGe!UvDAhBewd*rnrJFf|6Jz@l+!q9zPhWzY^S5OCKL}5*P>pkJ6AOiUxMv= z7U>PwvJfJU_EH6B%`CaZE&I?H|HuX!xdJLIRP>sJJ@%gqO*TZ&mMXr}=usRru>VxI z#{S%4+rFnoB!jBVh>+0UquY+)J&ZN`VBC1_@@3J-gK35Qw^bS7kXM}_xYtMUhcut? zlsJbp!2H>QpwMM&wrRNBVnl(&4dSFsdc*Ptnl-+t%M8_J_ue(DCSPUHir?Ib@Ya`GBNtS@jMqz1X~7kfb#HAxqWwiY`}GA z^-^ZxyWclem|n=1sLLQl`S)#ttEh+-!|#x5rYqf;v;P?q6z;vXATYx)~M#GmBR_f=Cb~|)jE%7pVglFQz%Y&qMVa?aed79JQ0pS?Qm_~hiB60 zgYh4-4xu4Yy}2}(>_e#&(ciGlR%d)L2(;iDS`KC_#%{ij`fYh43ZO;3p1h%<`b%Z} zI!o`{LYIPbjkhPD(VP`RZ{g)9SQbsX>23+%?roDnNF+R?lw6GQ&*2CegJz%-*w*3w z=T|?UC6sa+9KL(E{V+ou-sk2OmVBJ5C8*K1TcUMlJg~+39yRrp>fak6avPRI{Juw( z^4M||^Z|k6wj3?ec)P#1X%nsMsG04`@49`Dgv0dwP?QNFOu-t`ijFf^e>Cu~3}Fc+ zr^6j}`i~5q#;F3cfDncQ6YJq#IfBp=Wb*{bYd3nhYu7p!^v1BY5ObO1oxyvkR|@bL#rZ$flip&ZG8%VL=6 zV^D5#{ly85L-4V7Av-kz^eDsCi4L+ymhsj#IjS%9rVa*DMKJ_`C@~P)o+=t7vv%?b zLi^zyW!lXrKcpfVGNxE(NZfuL?s1!R;UP`N)}f75DYQzu#<>{+(Ce#@^?Q1(OngA) zLQ0OZ`Wa_6 zK3ajdzc5VTLxyxxS$6{mzZ?R?hRHW?1K0^{vtGE(dY)Ye=1cGQ6MU1OcSi2;6l`Q3 z@MsR~dm#O@yGp^~t3=udv{`XHSd_6CEi%A-s9ke=#XV)uZ$>3Nv#abHr%yAe=^zIh z%M=a5VDu?Jn8h>Zk4+^kAO>+uII~Vw-o9Q3LJ5G`3YYVOwQ z1M8#PpNBwnbqXAovhM{R?ymQpZZdd~D){g5zPm(T75s7Z!NT9a!ZJKrV3%oLA~OLr z2(udxcgtkC$EXK%OQu6-*)2-EHn4nWKZ6mjpbF(p_4dq+A54O--NT^Pjq=nAN(@?j zT~u3h;&RWZ-XOG06A$B7RlNjXGu(TS>N(>$<)F^`;hlX|EPPew*elOdM)BsoxA(L- z_(E$#82T`8ex0pSP#tIkyrLk3@AAP1$ysnmixktu+Crymooog;Zk%Uk8ff)^5Kj*+ z=LnV2Uzx~fg2E8M!}dbZM>9f zE;j0jTC?x$ORrSdk+QcL5BwC<^aHdBsgmO;NX2{{%QNL_Xa z@5rxTr4ZPTq$>-hVM$nM=N^K3m{tjk^BPFHDo+t0oB$uhWHTae8_P4EKvvb)lrd~N zbvMa_AyQ%_xlF!$!OJVyxunl@H(y*8#3ALOP(V?HSyBB2AR@`Tox8Pz6FW z4hU}e{^+Dn6gul+W}niW385BZ54obaB8wSNF@_klY*${AZG1;U4ttC?#X60VyFq9*Bih*jZu=Jx|SVa36NbzUbrm1Sm$P5^=FZQuMI;a9&xC>1g@yi5qyrg?YW((Q! z-uPC}D@DG~CAS$V)ScvPIxV{R;a?F)^A-WaLVfBM&p)DZ4k@lHf$QFJP432yiZWen zUWMDY`!?=HFxv6;L7GF0Bb5100D_(mvgZgL0BH;GrSWvk9~O-40Bk$n70GA?sD2rE z4t?6{n{X6wt0tf`Sv|96Nh`#`H(ox1&TK)Xwy#c3*Z4u{UJUe<(31??0lhZ{#Iq0e zg)4ix>MyOKLsc-u6w(#YT5}vSi`H;gc5;=M-X_@q7djuAhT#YlS&evZt9+#lDVosw zd+#W19u*MfGN}19YdtSkE6^HFzIEzt-cwn!{zhP)WztJsqRUQ|8+eqDbsY%7^40b2A3zbw zkc%!{dRB**D>kxzd3!O_ z$~A&zp=P;KHaCp68@_0RiK;uCab07M7jP<6&)5fh^KdVAUAx@eMBB1hw0%!XbhPgR z%Z}c=hnao7cY+k(;>jB5y^f-*8axnr>$I~8*{pJKe%c+_dYUqR1RE!I3s0*iko;r3 z$@9VCHXtS1|K-Gn;{iH)u};6u21vwNJj|Aw6TntBhs5Z^ajs^S>Qs7+ODt8r@J^dU z7eEYg9pzK@f{CoV5>%H!N3; zIzmaMQRO-_jn5Zm#AI(0M|&LZ#}+EinYwWMn1Z#EC4Qj`SzbLt=7)_)+g&}n=se=9ySzKA)x-B8JazhGR!Cj4<^Hm9 z2hi!`H-JRkxjcy8DjrNYSO4}oSAk}(g)H}l0+dfkn^}Uo3&z8;Jc)`Q@43=F2*{Nk;_t zZvur{_#)T8CKXGFTm?=fg&33n$=^CqkwD>9cGpXN<%~{fVWn$MtSWg3?Onrs9>!i? z248A#_Z&A*l}b?ri^|a19(0OlAkgTnq?nz!LzP5$ZXsIr$vV{ zy#_qxItX!A&MWvaau16Mjji)#thJyGEnNBCq;T@eht5d=)Dn>q~7W!*kD5XU72+ zk5aYav7TI(&4{LRQ!sg+Y3v3r748*f3yU+1>yxwnGm;cfGI0uYs!J-bWC>^KKZM)` zj$C|B4kfrmB$WjySgIi_KMPExybA(QuE8F^a)Ey{j*!Mi2oj~60(0q~3_0fZ*=N03 zpJ{A#I@(zqiElRc;q7GDF53Xb88uWD;b z3hSxX7Uvvw+>ZsSI7?xhz*6 zy`gvraineT=&dJbTn4_MnE+nfC8u>6Mi!ge4epmx6ISSjf`{7!I0lt3F6F*(yzRzQ zb-qwM+g&9mXX!b~i@Yu^0~NeD5}j3-)Fr7DZE!oSR{ArhIg4Gv>|8RkT{+v*HcPTt zoMrQ`$+;vC@#}mDz51`PK$K-xj)0`@vU^`nj?f;+a&Y1O{p}TQ{a25xthi(|a$UZ2 zea6EvyHGPbh$Ifu3BJ<{ebP>yRcacUZF*u9sfhk<`{ey==cUUU<{*AF(aK^a+%2=2 znHEqR%2lmbX0TU1k(n#gNP{j8!Udhp{Rn)knu3PksF1I!DX)$X6j)?`VXRW|N%hJ@ z7@Thg1@DSIQR)gY&X{E%MS^Np&fR!nqthPNgo$)jDoLvKD?B0PJzf^i8It=yc{NpL zo>slK#3LJ@`;dFItkBq&R6_OfRqZSl<*y9Rz3D4Q5aya&4)mi&k83L1neR(O<>G#b ztIg?_70YJSg6BEqR}Q0Cs_b7nfJ-|~fcrgj#ZtTMUidl5ajUY>^vs4G?W^FjWY{xW z?0Bf|R1V{-4iKlu&GU_NCKe5cT!4E%XyHm0Mk+9j5jwPe+7n?We00C$>u_mce(~NZ zo~rff`dVNysPgfQI5qog6n917>d!NQkT&vJ&OB}>ObK*iGiiFI2}RtPpoNz zZ+n|&?{S=$Dw!qN;*XMVGi()_gsugz4CO`dQJAG>I-F45Sn=gQs(=ffEIojhYtAvM zlH&~RRN6e!U){cB*p!K0iTD&A=}(QpAwI|W4X=*j@lO0c(Ru49O&Ph>DirDX>o2_| zPe+)4+%Ahf6Ww%Vu|F}(MgKw7LFs0E`v$ZK3QDBZ6phPlaL6OUJsk}J(F-2ZteHuy z!5`JbcQ6XWR^)!1nJAzT9@xEHRe6TB-2P=KYyy z^thLRr+D2VXfpBeBbTx8U)+=8&3-HFae8W0q(C|)u&+rQIzN!vD-iO!*s=_$hpr$q2sa~M{}O7C{C zC@Z(0X}mYvn0z32kPDspVz?dDiNWN1IyH`lIomwhFf70?Mi4C z>*M8a!hq&w`vd&>9mD)8>MEwz#AZJ1e{^sl0$CA?1>F2wqJ+{Yn)CKI*b z$>r}#YJT_Q8@S!paUNV%i8tw(tsXq-2A62)4J5q0{V?P(;^?w%1+yPPGv{Y#@U)&N zSg`ENHW@9wNi_QL=B{lH*QN5KWZ_xBiN=y19jz2_pl6m)ZoMA+r=vZTBhzkf z%MDM`U!R&~e`jAZwoR{_obE7D<`F-`$(1MIZpFRyrLtGlHfM)FPbEibej7FVhd9pp z&K>0`R;X5ENvJi=XNXswTj$}alHt0*(6sPl5RIku5kH=ht)}Ii?QUB0m0x!OJeLSL z5Cwbd^lV8fC(Rr0oToHxi6*o5o4>$vif*b_iTy9 zNZ~GU`l50fKjOWp#$@RTCVE~*$)xG`g;b|dr@B9se>Hn_h07}6YslE@Z(MA&+uC-e z@OIpWP*7^XN&u zE2tx7I#sqQD=j*WW?Jg9n_9?s^y}VbkH3jPw00wr-Lsz&o)+8T1~^(PKE7J=_aQsR zRd8w_IueaTLkr?H$A%};PUa)XqyXzKl@V55+i&o5T;>+DmGmX!l^E3MtoY0kXt?G6 zPG7+Rn2#Dk_{{86Hx?WLY2#Am+ur~`qXJ>cFWf}NQA`q0JyXqDCI9G}yD!`fh!TOo z%_ft51N*GNp$FP;tA!!*{JusIZ+9fhd+ImU_5<*|4|4zBYL!_4&)8(ot@1eaN~*Rc zx_#~}9=UXtu4fYZ?n^neRi9@e11|*W4rX|W@Uk`_^k-3RyicT)8}|ju1wwXix04Du zmOC!(a&B<7-7O?;GI4BLfK;I|I4TbP zd-pZlGdP&hF8i|g-Xo(TWEB~e8KSHUmo0lnQAYOOlI)SJtbWIN z--UX=Kfmwy@%aAx`Qv^(ZgQR1dA`oqIF9G>d_E6_a+7}XG_^=y#fYIhAD^AY+TYp? zHUCrv$l*J9%9Fj8Aei~)UZ;J&KvL)loV&?6xzT24BdcXGn`@P#vGyT{y7NWHn5&26 zrbq{Yw%3s$ak3b(bP~Jmx}ELsBc^)+<%)`NKDOV_E+Z#=tpzA<7QoPoMS2+A^5Mix zBL@9EQ6o^NnueNyU+;&gYT1fP*jgpXWJcKwHlX%718RaSh!2=MvV`AgSGssy9AFUFk9vO_(NT8k||FO%QKzE#&^*`|NAy-JR z`VRhH+```b+|7h=J)>Z;lnu5Z?+|#d7qW+G0I*px>n2aS4C+FwmNtMzkqx|utFuOy zG@r}wL3Hg@5yP-ds!Z>xoNpNgX+#TBBqBJzc0Ut_sm-R}T2!djesb?POyf2L7XlPlMno4qe=vJvVZi}jI8{HK7+ap3-&cxu0-m-yhCi7i?NJrp%|`L&a}rJpCc!kwPo= z{>ZWmF)Pu7TN~e{SvER)LS0(F3r{gmmP|g5-VePL4Et;p_;5~aT?d$3OV#R~ut^jH zRz*fjH@rl6U<(U)3jq4uCr-fVwTuFGDwHx0;9Xbo7%r0k+P4`pAL3q;jvMs$Eirkl z1J`>7h!FV(_Tm_m_B1sf%UA0A!}#yzb*AhQ!^BtovxA#82{pooZBiG;^7KlE$UDGn zL##3LxQCah(_DXUaqCt4`qgw1asm!aW(HvjlzHY*W3$x@-rjSGWpc#1kH@ z`mtXtois}r4vO0fZxf17V`PvWa>5X&E&n$36NM^@K9bAq#NGM_`#>wV;G1kJ6=D~j zBib1O_ZVV`DpKGJ#U)dtI^A1M)_TV{3KT|102B|;c13+er0WGv=Txkd10Hr&IxK?n9EJbvPwUZ`>Tvp=t8gh{!Q#YmbGAuI0C=w*@M{^n? zKK?!vD*ZFH$JHkzI5E~Hk-#+~3yYY&3&0YC&Aq$0t}^F!)u(3!KIh3R^V)oVFK3UI z&oU+IRro~x7!kQy`pey=c1{+a`=D(G8ECDuT$EDdaRJKXT%|l*7RgvI`@y4*gLZQR z{W;ZU*i9l^HlX+24qMHW%ypUswC1=OZ1Gg zysu4@fdEpKg1uaqCDEV!vpjAMOC#5rT1Kzm67gJBh+TF_q2oJkuNGcR2WMN+E8P3w z!VYHzx+JU%QVofY>Nv6Vv%@tk?3IY1O3e+k3zKJLgCg;@moOnN-35SFY7N+@92YlZ znN+nqOkm|z#s7IXO`QMSSjLfW_^0mg)YOXKEK&HX35H4%LFZ%W?zU98K0+* zuYDm5TNCjt)0Q3onsGf9w=9WGV%B%{WrAf%zUbwqo5}R0;Ev_*>9@zt7)7iuX4_Y2 zy8Wwv_8~Xo0|gMCE{W6?r>rfwV>#MS8iI2|)PkV1;@IW9p#{jizhn1WcBV%u^-Ev> zU2TJ--0h#p%yK(2`*pg%7RI-yI} z)?#7`j!o>4HH71V8*`qkPn~i5)%^oF3cuI~;0d6gSwJu4Bh{F0O?>((TA&w7iPLGx zr#lqQD-z1f5*So4&A0!r?+uR2ULZ2cA@}A0us{ctOj6_%1C;Dfbtp>01s#k5RkzsM z_@P6ap!NH3hWjCvgn&(Z@#XIw{MQrHOycVtVTMA789;6;kH%q3%7SWj@Vd1{S7rrL zLJJ<-gh+*?tPCk}^;`W4GVYW|>n7_m-6fau{)Mi>cfI`yoTM*`|8-Qmv-ECK3O=~! z<|x0AU`2haC_po=>ms^ z5!)P_v1@RUYmwc4OT^sBKgC!KTkv&ze}77zJhID*U4ExHkh0BaPyQ29b%tQgl6zSY z=C<^}z^4biJp4P6fjOmn46YpdJoeS!SC)b+lNr-@q2#}>ccp7wq2w}ED0G+sDQqSo z5ruNLJ)=Z#I0(|4P2r99^uHOf0!*+Y+dv{kF5yWjkWmzYpOyNw;>fNGivmKxj*6@2 z+Qyvz{YOtZ__9OCj$l2003MSDJ{<#wsh?g*UH&%ZytbGHh=p2Xv_P6thGxQ*15Ue! zlNb*Uy90;W!*;z;O;qerJGM59`h?a*mj5ddu{3=YxGhZJ0QNhw4szXh`_IP^= z5EH^hSpD#w|C$5d@$i};%4>>aRkH=g3~ zbfO;Dly-&P$R%TGhX}(gdUCSZ^r}6NSIn6~b=|z$?^MjRUKfAvy>Ghm)^M#Zu}53m z1cUz{dLMF6mi;}JK;M_qT!9n;Ii=t?{FHom;&ZJA1%o#a%O~b=C*d&r|E>QM$#;(L zFje;8$B+UcYBaNnetUlXyD`zh#x#oXH4uYeV;1M%D^)cYR|?APfx(>j_*-2a=^DLX z8($qI*}ZY-(?IlvcRX_E0e;Q&qsV&zY#9kD>pvvlagcQVRQ>f+WG`rU?l|;4v7>Q) zqO3t;=B^n3d{lL3;1-zRf>t^KPq*LFzl<2+P`{Z9yir zyhW_{O!u2a2)Hh{jocdBxft<*AV_{*h?~5Zlv(*#f3we5ogzdWcKd<<#Oln1f*ccRK@?rL74Hyh5E}?M> z$)D5ARN!?R_kC2S-8Eo*u%7FWU3mN#5lfgx4IU3d?+|}#A;I2vq~=bprFFowL>6=j zF$d1#*E^Y8FlbJajZ2cV?%%b%f>mL=Y_psGYcU{1NZ8ba<)yM9F-$Sv&-9@*H+lqA zN08hwy`g&?R?&3l%g5&(5I@{GO(3-m+CVA03buz-UcH)-zebKU9GQpa-7T|lT*Vfl zyuz4Nb24G;5oIsAki&@U4QbdlYQW!O|FTzf<*!j=NQa9s>?r{*(vw;DFlLQOj3#g+ znW5Uwnqv+Jpg#o{ZoE3+$S#zO_#(1c{Ry4_8anS_xE8n7Kp)tq%LEgmSe#a7sUo=% zMO!YyWE#!YZ<&Av_@Y|c;uqOZ(m{`M_Yx34D{NgeP%m65MTji?w2aO$ORBae;qwTP@-IwjKE> z``3^Xe^iAj4W)8Z5D98^d^k9Aq{8fr{oJ3r-?IA> zGYn;6K@0_kZ|=q!N7I8YRJSuvFrO_gF#DCyPBf-6)|BEx?Iy{ zTHvG<#%)+n4jD@5hZ67!al(Y^%-v<{BU#+V4|UT=GJ49tZ5EekUbfg5HnzZ^06k% zcZLS%qMoVkrnEVp;c@CPJDDcbggPUoxC@xE#bVfiI3d$5*OZ6_kgr-zk8RnIDXw9n z*CPL5ihBVoi!oKmXd4Xwcl^9FVVR$?t!IODE$4vm##{>-FYa7RhTmf6yLtjYt*Qwn zyRV;Loc@C>dgnYGCX_b=-2YzM9Z2L1)Rtb4qk=D%Y#7##M100zf>0do8zu&wsVefZ z{I;-;@)2mLHyv=4EFoM+eS7d5vi``aUE%L*8JL7#fnz1RMhjUr&re$alf8B=PPCq% z!~f&!Mde{rj2LOa?AqLrGtN`^?NR2-7@R&(WW%^D`?}6Jj>ix$`Z52x1CIhHQDy7v z%Uw(1-}x#HA4d$D)*;_PPcaJu$9eqEk3NU<-|tJNZS!Aut3y2RA(2+I3MKk3NJxx; z7ejQtb9^^83>@uQc+^p;hJ!nGpMT%X>Z^y;>hMZY{(6kRfAr3QZ7BZnE8oA?81l%w|CZ)7 zJlin&e#c!~%^&0TYkEPw4{}O@4fP!@$$$H^J@~`G-8lS#x9QLyU-Q?u!jI=52s|}0 zJofpI@%r=TkWU?W3=hIv?WXzXJpX%KXoyW9b`U=*bn=g>@*jWpP?#4E*LQ437V-b{ zyLJzbn>d{BVvehY_x=wLQVEg8sQ{bTC;#)i{um)q2;kUT-ZfVJHEaKVF5)BbAl80+ zOLyPRf4Nth7G%MlabZ65U;c5|c>nJd{GSH$f2ZKzG0Lwg{J&H1e^|@^y9@qLYw3S? z!T(_w{(m`$g}qKH7v4!I1q0J%P>^@AlyB%lnBk9Ll!zaL;;Pvg{(qdGG^KE2I-cL* zPr`Zc#(n?N6VG?1?Oz)bzcd31TJpscL`23Aix6*gT*>2BJhk@;S zh?yi8ykuIS22R!z-OBVIM`nIr``<$LPboM>2uP9=5D7y$cV-hZ`h5jIXh4 zBEODBB}HUg4VGoqr6^@Y_k)u`9>5lgZ4zILHG#AQ6NfG_ zmArjIb~zPLy@Rigf{ZY8qBZe@W-(}Y(x4vbHODEib6<41@E(#Uy8g%lS`PY=vsHpr z?_@8cJblBgQ54_n-+4LdVYMq*Eo7y%ZZ^kU-;ang5E+OBxnuz;!o=HT2Illse{ntr zD$VY-im7#XJaAw-(SCwOQ$340Fr`L$2&w{Xn2B|!)v8B`Viai5dKo6B3jtJD3?GU?JIL>}0hCB^hH4nMPUXIJs4$G=0OQ-jqBqZf{s8HR zgyL%<(We|8nqui+b4hQm6zp?v++Ugq>8-`a>bHR~QsbQ|Uv(Msw-UXZjl0(7fzpS? z*t29|G1S>54%Ps0Iv{4C4QwEkyfP6VWDZ4sn<*JA3fDJu2js>Bs-c$aP_R#vekF)V z>ON?;kOqMxlxrH4*6x=|9ns{*<4NNGRhVSyz1Cf9Oduyf%}G{>eYxBVVjd%OBsWu! zS;EN9eHlU;L$LGX0h(t#ZV06s{Am}36hdHcNQk@qttj9J_$74%Y&Q+9HL{_YOv+e* z2ykZ*L(ipeP}^KQ+zzsljDMyatOWip@OLpn{L|D&6FP3UDdg1jAXr7CmMi!;pYMg_ zxYSg}-)#+NBh?1%6^lbBnI%^vI=;1SCJS2vM2o0%5P{4;)C(K6d`k=H4s^XX3$rv1 zj!x74u0k909xFG8Q$?bnK+&-N28@Jz!Oc_;ah9spGodaGvwO;u)nRV`e*PRDO?e7L2d`Hg- z&|cnr9$9^D?(Y2`4vwxDDSOQuBaPRS1Y7mfoo=l3fLv=Dis|15q@h4to}X?p@BFHk zulu_Ww5s8@8*l1LKYDfkAr)vX3(467cG5o?8rUK6D{NJ_R~b|}XaF1o?NHK+j(rh! zejH4ry)C%!FX%f#<#nWk{m_}~ZgtR%V%V8HIyD(J2CNauwKXsxH<`4GofL3Q)Fzlf zs1_)NrHcER$r(U!Q&-nc=|qf+oPa=aJX#CS7mKFiyF)C|&icn40m*A-ep;$$FB~RpPVG2F#_x0p0f=)3ahl`Oza_fa=Lj>tTA)5QK=sJ8*wk5!;J%{VhqpwXpJ9|BlaK>^^8ZoGvSe6Fjd?N5SLF7uG zG9*B*5CtyT*cA*Gt#!J|$pjeiwAe#Z4u=Ugex$zl@8Liy`l7_8P&6chvBTX_5Drc>O zaUA_ijF)+AsD*X1H8AwcvaTodPl4&V@0A>U3orYvAFUeE_{Idy6`>3T3S=+RZ?+fK zM9flRW(oJ6_DV)wktdj){bt94bjCa=9e4Yde|hn;QLH~%1H)Cts^s3jA@-mZBRDHQ zcbt)Y5%4096O!j zfMX8?_j#DFga?{nTrY0w?i2ysDglZ)Q62SWr*=UjGfY65S77FEQhE(-5T0I0y2&7O{^kB(ET_QLlR8ckIe`HCx^;Opv<;$zUnW& zyHcs*CFLS-pb32D(pnz$({Y}(Z~w68HMXGHp~dR_u#J#BK{SJWyApw}Wm9}DH##aZ zeWDL)E8e}5Q-TaK%%bv1uThNUCj8o%I$UyX3w6-1YT9uJw%(x@B5wVDpp+Eonc?K~ z@n!W=P?~7G6t)B0Xr@$R#~?Jf&*mybA(?QFcdzF&Y;hXk2K5DDz0AEr(b=b=Xt{Dt zGwEL{(T(u**ZE*yVFrPob~5AK9XO{~X6FFXj=<4lO0iBXvhoKeHGOBfN-vR_7OjNi z4-9d>++qWI@#kuKP+cO&F4JAqS&!k0CXxSpz`5Y0Z z`D>fN@YxEGWgE4m$yt#k7)3BxBN`l|1Se5FoRcE>qrk>n+N=MzV(dEWN2w)A`X%kG z^V`#0_!_`YhiV&ZcAcb2%J`sY2|DR#Y+8@7z%^c0o*>|w!0v04nti;n7LG4L;EKo$ z15#w>AVl(2TW4-3jq{1m9q0$M8kP)pI&w~W_1l~wSW0!%9vLqNP=&t%r}YaCD70*H z?uCAn!~Ld0X+OWFL%*@xn9Lti;F|CqtKFKy+d05{j&8O-7}^EuI(*e!HhHW9P^(|vO{c&@I4^PUc3*fOo)gZ%4A05x!6q(Bk{`HiCdJwk-bj)VI#bwupeD0XVa-o1R)`m!q(fsbx9olFKP;LsQFpvT+ykr)5Jh~x=E zDwZsl&NR1W*L0jB5~Oy&mbWfhvslM=pVIiq4#Fy92!{qc$(=phdln=U^90Ae&f{{L z_jjVy*`w0AnjddenB3U>p6o6s@30o4i$aGosst`Qq`M&ZleA?OO*p_DXm1D z7DffjN*6T5JF?fPFb3P?3{*Q^C;Hcm*Lev7QGx9cH;fXL`>1kP7{tOKixDQ0C}MQ1 z`-0Nken8CEi*&K>T+tuK!51Y$%F+8yU7x_I?EoYU_ zz`m8dTU1hhCd4SB>_7{sQXwE}j7aBGj+!p#)qcCjib}3L0Yw!gK)on`-D#j}Ni zj4P3pk_L`a`FCzkt<&#Aw1tZ)%}}j`BY%&Mb60n;yKoE%3+G(9?vfT3YEHyYkWjHii}4v(bonu+ zFd8dT4KZjBP}B_~8syCOF}#(PA${qB!3%Vc*RHd-KBaD(iSP$a%k|Mys;0G)e9i$g7drEY03QU9S`D^J*96q}& zD0qT91SWVe?+7omd$j5iuE$S!Vf>S6=Dp7ch&z^FDsIr1KWpZMVd(?~i~~P+>{lhVVoN5#B|a5yic-Sl6Q}_Llt6TgGyjx5LuNCG$BB9DP4d=H zPAywnhfmnAPCX~gzk9iiT_ChvBYVA@h*_S%E4g7pap7XGV#v!ixwp42MFm@vq-7)% zw^>bexZ5Y%b!=78KZ1h?lc0Wof0$WEYL>jT0M~HXC3jvpua-Mj+QQB;D5bxgX)-Ex zhR3bIeKCKY6JQQWW_G{-%+9gFfN)?vKi&$U7*0oZSz?a@#^exF=gn3<69L5Y$VTqZxb|O=z%;f zZ1LV~4r12jduRp3!;`TJ_rc1-Ts|a!LoOG|&{LWeo`pA_2r*IxR@@A5yqU-hpu|=3 z#CfOYE}VlzP1 zzv)44($!W(+53;C#_xy2r_Ose>f6VrTP0EuZHVUBg=j#ON_d!D8@R?#HO3(5dM@jw zmM-`OEDz>JL~f(?c<;>*dCppr%b%%8P$R-E_2)EDG8GD^d(<7F;=hOYdXTef9NKTm zV$ae&9cr0-P1!It20|Mawx#5bg2Rd?KpdeTj(8Dax(qwDzlJLW-;z@vglbuU8<~wm zWt1l>b@0&ICLMy^wd$fP5?u0E%w#HN!Ns}3LBskN z$EI3->1`x?tJIx8W{?mh;Uuvl*OvIqCo%cVeX-%9!l8ln~F z8^(5&+j?~UqG6gqHO~aP z5VLX+NZ=x-y0YRh>FRuGaUx9UPbsVwR#k%bdazNac_i5!Nw;FSyeX-Z*HPbxpp@?( z5n;L5gx04ytQh;Oe}M537nQ^e=w&j22=$?!tKT&9iOoORLZreBfD2j2&Wm-Oahe|9 z>;_4XImD7n9pBY~MT?j^41ht86;U_j_^h)s)DPy}5d6kQ$oBe!(xmxppl}Uj*=wNY zHcUd6iaK1{93w1q)n9l>VA!Tg)5?&@9ZRF?n&Xf-FqiEJA@vM|XHUO3+v0%xYBDIF z&`)!_kyBs{NCGErCS*0Og@Eom39*_g7FrSnifTrQkt#e-=@=u?&W`^d)Sr)$4~wf4 za4bL)e7XFfaO2sa#!b0PO3z|gYl$u+W*n&MKJ4vW*sH(N7AU;9135r7f!M^u<7k2i zV`_`Lpa}a=+LmXbMk(NES0wMbyacncNQU4c#kyqV>??49tF}lNzDT~7z#l4-3q!^? zIzgH_5nY!zQHPJaDA^_!aM}d5%x=-cP5yCx-HWflt~}{kP3{>49OE(-Oyuxy;Xr>y zF_UQ<_ACfstgxHfyMTd8WR?2Wc)%N0)CNS7`@!mb*Al>hE;dRc1g3Uc{s7 z>|*4QlLPe{xM`u|n&65PCD~elTeve&a(}|Oq!Yu{14P)IUKila&~~sXw2y;3b%euU zrscDzLvo#Kus?*mJLE60hrbJ?l_;6-wE_v}NhVc&I7VC)-@Rs#O_NngmP_zlPc~J+ zyhNR8*(qz0^#?FdU+;tRR>=d`Muh}MS~MxwUwPb(4K`<5n(o5Y9=UnQM|CX!tMhFsZ_U8}T|~l0&u^T&Hz~6m#vZKlR#e)-NwX`KQH+ znlMpJ%E1qbJ0ej9K=QnQlqqgQvXwKlsB^LEp~BozbXw{Y?Va;+4?!F26yIp@oy|;DG9#Sj22wg`6#BW?@wAQN(H7}*)ps)T`IgW(*2Z;# z&r6^E!O|#rVDhBNCzqM}qtl2I1&w;CHQrR83tGDI-GPI&+buUesl!Ww9J?-dtr+W# zSxsN(!OTaeNVFHE997*fGo`u(M^_$nS#lhK7^K(9Re}D+%;wOF^tCj1+J3X5jGRzx zfYvnH%x^Nf)Aj>T>pe>_3W^Tj>QId#iy>>}KsU0Z8`HiumnO2U_6}|tg2GYF>(tXH z*3vdazn>J``2ac)z<)`-v;5sq%&$#Tp}Is{QVZ5s?~zr8VtzZitB~=FihL19Eu1ck zXxF+r5PVD6jK|bMB7Z@S%p577wPtmn+PV0z|6_Orfo2u?&GcOa6St;A#u35*KMF+* z$h2m4;4pQQ?I|bM%37~&Pxjvz`dAL1-}2}ClbZGKZT&$gi3j@bmlJ1^3zQtjGW1{|+!0?%z!``fq>sdI(0D zfEu6wzdYUUZ~f0H{NFwxV+|>c`%yfq{}_YcL;L$5{u}@EpL=C^`1TPI<#f1FbQY*E zE5W_a2>b&gW*|X!Xu7t)8^TB#y!2-hzsL3oGwe^zH(%`L3vNHCx93}g!4I?TV1_-o z{5dr6jddYSb`Kt6KK|>Lo&ZrWoA-mi=1#arDz4oedjm1hM5607C=UR4+X)fwyE(r5 zmt9t?HU#K*K7e1~P$ggp#`-Tcpr?T%cuFnI5{_M`Nfw6EOo?fos0C2J2Ywbe{eBZ} znFx|>uRnfrdtwWZ?NVmd1U$>>v^ufN zXYd4`t}RWSoU;C9Qd(t;r?SW?%N6PV8nyrt7$KUH&#k9Fn)B8%8V9%>nc?xgk6j+4nFz|}h-tM*I&?QCf7g!TROB7o>u>i-(qJP3rxf`6Y2d+@6+L+#;T!$S>W zXk20e)mTKDee#R@oh2QUYy4VxQWPN|k`1m%CM0VDXW4hru3Puzm8bF*u3_>j5?m!Q z94WZ)6E1j|O%3SbkrlvT*YQ33+&^4UFwZpr@GJ%?ze8D>)eO8bt}%xUWz$UCT>8{L z_Zd*y_t?)J2T8qIG)ua!bb@+L7s3>!=@L*JXTg)}Klp|a3ryCO%ah$XR$V@=3Y%}s zK7u{qc;KV4-C=^eJ)xb4Y`q6UmeQlChq{qxIGYjkqLB)oL0%fMJj;q{X$w}Zlf(Op zv`SsSk9%E9#O%>-3av`tAgMd{iH;YmrMz$v1SzE7(~kR;6<^E4*<^q%8_H=0AuH!& z$3Qx7sG687<3E%wv+(oofchr$7OX#{*MuLHa>xwMTB&vV2pA1xO$wJ10wp8W&StC_ zX6g#Ij%04TGRT~P*D)GX&$lxzi9dP>rwcNEy54!t<|7!4f~jL3qA|;2+iY~*8c7pM zW|fb@4l);wpDTu>lO&)Nge6ImZ4h|v&8TEAzev`@R>z)SvwE)OF*X+Ra8Q z)t?Gevn>lyix1ZtlnjVY8LV0p2?j?8lTTU%0j7?Ggo&CR^+zL?&bBUtUZV#(O`qvi zZ!{HnvkfE)lf!ZMj+bHa;Pw#$XBLuLrFDnmEMRCgt>en&a{<#)zX z=Md`PfB@sj;@7hH7`DV*#8&`xcqY&i`(b;s{D&iaU!KJmU#@qAolv4=E5Ce`Ns%m; z6_M&KgZerg`yPRtOU+;(u~&Tuq3r;`?7Bz+{q8432;P|i9gPR!C}Tw;O!hJ(aN<2W z$KP&5rQ{0x-hTO9-V&2hu`YO){ahc(J5J z(nDe_xGTF9C$MeBsFe}WSWjy+0UNXMY$R?biPKITX(au}z za&xknVXR}hfzyC~Z%Q_ALA6=z44i1%`>E_?YUNEgMT9(C9;s62!vfY!_yzz)=_I;z zWu@0j$Vw7}&IhS*FfVq%7lbk1o9c66L#iJf=h~peYNEv{Hi6~&34PQp)^s0jjo(j0 znF(06=OZ2CO}vtEG9(yu3J0PsK^(BK7MyAGFz|-2oR`Dz4o>p9Kw}`<&HgbhNfnoy zwD`?SHFMyo>fdj)*3U@YueTPrHF0+gNC#)Ghzh!%-0vDTaX0 zLH-`K>eSvfQ8e``(u0Cl@N~n)m$pwSU*2JE<|9*N&_6&JGwXnAA{D0ZvyD_E`dp6a zZ||KQN8Fs{eE`jS>k587nn{sK0th|xS?jc z-+8IRG2wPd33Wm*b?iJpr4fp!b5RPVoxR8KdrVq`+l*2#N|rc5W*0k~(j@i8dnS+E~u2rh(5r0!H} z0wtyY3N(*>X3qZaL*zg_FR15F=j`!6ar#^_&aI(gFlnr>c&cUfOd?m3UM z>I^)X$RATgiQk#(rl6ehCY-|vd(|`j$|~(6QOe7Q54+sgU-(k94Di*5G&S-*>ETg? z1L5U}*v%`o&PBCwyW++~*QXBAMu>2>lf%TIm+%O2+sZ=RFdzk1tV~ZS66Vup>1!v%i}=(a z;aZE#yf3Ze#VO6$?f6Fr(SVL5HC@`yEg%W_0`7QkU(+y<@1reJcK%42%HPWUV=Ysm zT@ZDf!vxU|DsjcvCTdHZ*keNJ#fzE-?O$r##^M+$vP5nL2PxNt($ zfXYH`mT8zC3ZqMS#e&A*7R;A_obTj{nBc4$=Bwdxnf)c`F$c>NJ>Gq$Qu0tQxU?wOjB9ika&SibbAFz-nn=@J=0*UgYTI zg5XjZLPx3n>o``8kTQgg(8ig7(>|<7bg{?*&6iqRuU%I)NB8p{092WXv7So*Im@od zjKQkOVBGX38!vPl(_8U&zuoWaB2Qp|Qz7mp@P003xK>5o%$Rs5w>iU;b~C|5 zY7>h})Ye)&ixni7Y9n2v+hY@nt&)E>sYo-a96=%7Zf{A3#TUkm?nA2XI!^_bU#y%* z`=08%In{^gR-DjeiEG-5&UX_-+c{sL6gIH*q!8b!Ro+-nS(9pgK-ekL_6R(V7qF-8 zkVv)ER5pL&K?J_6{Isd6n7AxLRTaVP+Hn{`Y&pA=Jg24HYF~Jd$g*qtry5| zg)Z|2PrKalm=@J46VJZGkgXqRktai=a?y)GM9S$JUlY035coELB`S@EzcK zpI)iW17#}`@-`FeQ*YM-JuCUqRI>wDEJr6HU9iLrEJorgJwiMjv9ytSit?`~RbCq7 zxbxnjq<0PgJz&! z&Vf9t*Qq9IlB+q4Pp-dyHg$iIA-d-DMpWeTr?-T^LsMZs0RKZtI1_i0`Xq9E%RTZD zLvqI>LUWB8qq8Qo2k(zjW%SUJ(@gHwaP|&QMH;Kv$gk0lBks~~+&dn3MFcci`C#!L zKdokaU{?TZyz9lD3gW~DGG zTyvxfxCQ2W%xLP#42g-KeYvIsC51$%*xw_P($o@n%FFLNg3DdxS>yFWb4^Z@eOw(# z(72qLx;ej64%SrWP#75y+Dvv`8dQD5qHM$UVAUEhXuZl-LrQgAAP0H%YHRD)k%5n+ z&QntR88Q%uK($WpW=F5&YVkQdG!w<)RkC#BTJbc#BrNfGpKYW6q2k3dIpcA}MrNw; z-jN?;HXi2aOFe+e=H$mj6I95kUP+>m#8ZZzAuY37-bAD?&j6JxOFrykL0f|(H}F?G z*71EKDve7A-L*)?c`!L0!3w!=206*@TcgG#AM`y`*9pb)D;A9BL)_iA&i=F#Q*Pqe zr!eJ5IQU^}4o`r??&vyR8D$bd;?4H^TUt&=XKX)oRSze}o@=`)LT`FGE0;p8Ip@MY z|JZY$qPM5Xs;Irwj=iT)YpRx$kdZh~rYcHsO@>I<=Qu@j55rcbnC~-|&I`8eWA}|3 z!ugE83e2f>Msa=?Xyoqf`+l`%<7?R*LnB@6%DeCGPvyn;MxMCn%PNtd{FzazT7qNb z$1q>z>I*Mh-mH1O00*;+xNwD*;hVQbH9qV0g=IS_6!UFrY&VazgI1f_@^_YZHD6qn zKAt2}AerbYas4H5m^@d5$d|~EO)aGMh9!ue3^*fD#CDY>@EHA@_tNJAfOK6V)~k39 zL#N*2)Ape4wj^cl#n$Is>O#3hZ0BU4&r6SSg9OI&Q)*b9UxhN9yG`huSxo`sH2@40 zUZzf$DA1P`b^a#_dHq~z{Y^?$*tp-l4Jcz)33!X$j@3S<(lVibpZ*L=riRnNvo14Q zkFHN)hO&ktWnL{d;0MiFwvAKev+swb*4lEsWNS7;$-?~}Qiy3it~9``%VZ12qFd>` zz*Xp(+8nDh8cHAbrxPNSRZq`(s<|#E-!9PCq5=be*1+M9E@1~Gy}kQZuBM0-yo9cD z^8GbHZsqO|(zlFFyM&HtQ$|%fuDuJh_8;DVps%;e_asmIEwR@|uO!m%LK5izu@Tpp z#5%)%&X7#DxIene#4`T*rCO3WO}fTo)wY4@M5y)bNm^cAHRJO1>#7ap{#em{E zTzSB>nL^B5DAbq8I1=Y?w>A&>aY=H6L6J0Br9n|TFiuI%SJ!h0YTuF@%PS&Wdt5n6 zpZM}Pa_Txil3KS?O-fty`~2hVf)JbB@Hx#pc|=tFIeJgpC zID794hu^))UUNe)vD_GY?VT=PZd}iW;R^>2Rk8;9|wV%^|K#M;Eg$ z#%@XGQhaB$wks_AuuYVao889)_B#9{nMafPlZ{>|r{uhhkGnyGW~3^8xxqxroYM37 ziPJTgI+>x{s&Zyw#*)$K_X&2N1{uE2*!jT(5o{6fY(i&@LY^8R9uv_88gbi^!EI@s~F1?Fb8?TGl z7v3p4Q<%Ru#wPNpR8AMT#+)4rEX$8C5kB|4nCm)U@3JMV2 zM{+7eIeI*ZYYd>p4h5r7?ig#8cEPq~kwcuX1B_BseQJooVj!T^2x^QK1Y&6!x5z3- zL6jYbaQbYYZ_=wjIyPmHoO>K|eRu()0cP3&jI-FctixmP0MS#^yeM*UY>#XM;<~SP z!Q)y%eNm%6dWiq=QR;zZ3X9);{vPgI3qqPqw>U8GnsC>oU!#WE=WbZJ9zs|HK8TTh z!0f?eGVxZQvSs%0gMaNvk*dKo@Iriv)A+MWNQ(hYQCbAD$qmsc&*#p=S4&baijbV; zXe4F!9gNDZq;XB``YZpsp+t+bn__z-7u_O|+r4|m-hr~A%(UFR#6fLC#fJG#%UGP8V zM(nZRm*eUEKK|?ju;u?unxFz)?j&OXy9-%*$l&Rt=L^rq(r1DEhNlO>-BUE@(0@l^h1?`0;Sq!Bs$AWw$`R1 zTZCH;UmpsqsS4tM@v+3S8)~oN6UJ$BX(v|d8&uP8RgS_Y)t7k2^b~`Z^Ag7Ly9}7K zaGO0VYDm&&7FCLtZg5wmu0`5sGfka6N;n+oJ!5wfe>qI5@hAJ1j@r(E9E6sPQc2yr znkWwmNodm?4AZHtFNv_f&vRLl>chQ>9*?MjFEV@1p&2K}Yn5M|v!3lM%zBM^d^VUy ze?fNzY7Qn5vF#;51Twp=(k*c|;RrJ(;FKVb0fFD5>g0ky!$uw%M~i}79ZA;dm~Vjp zTX8l%(H1MX8IMkF-@hmRwDZQd50nNQz z0R}>rbemOiDjsYam`qEa>Wv@Y&u1UIAjUrh6oFC=9@ns)9?@r^%_qsU)UNJ}tjY%u zUl?9MI#K~|qh0uxsr3@Xh2hZvcQ+G6y%zDJe1 zbYCA$okfL|d!1N2TW;k1o>kea?u#QJ+Jgt&c&KtwPZgUBZ_M57cG!EX;KB@jvH=Ow zM>bA^r8K9FR@RElhL>&mEx7C_wvns&7n3nh@yGROD8F$00f??I^r0H)vxI?|>5#9Q zdl2mLHbS|$ejlcOf_!`Dd@T=0Q&ixaS36BQ-?|3A6+X`VuFa~XZOjVX7nmMcRXVzP zTY;*=I0y7!C(oQWSN7WDNzMW8eixs+hfM(RO8QBzb z*{@%rZU?71K7So%z#PY+*~+wo^7?k4Y&X>dbm|@M?bmiS0wv_YldFE-!d5p6B^s<< z>>H!TS^0I-H=nqDxqFP`#8!&-?QMyKoGsTR98@be?GW}6*IwAC&m{glFXEFEj2r{p z(5)N03`K1VJ18A37AjQJINAbb<>?2+arvAaH?vmRRhP5TQoU;cHGOcEMrue_jt8aq zi=b_E%(p8e{^V@2X}-_0Ssr9Pp}W3$d*AhHa&B|wMF8b)cwa4fdvm-SxH4;gpPbd8 z5B<4GMRr1#l^-jxn}|ZG%xMvzOHrhvp^4B9%A^H<=;y%gjHZW(Upg1v@@VxIC)Lz> zfpMtjAD5jXe%0mrbF)|T?u&CdwQc?umok#Cg;df?q*n4w&iBHHFF1n3o}N%Iz#1dt z1{>y4_8lfyeF4qYw1e^+kDTotfmQOd6BHtwFh5!sjan1|Dl9v8%zKqF_Erzlns^+F zgfkq98tXFo%H&xJ^m3_qQ1w2(G85 zaAKgAyHI`e`~*eZM$}PpfP~k@BB2%A>sxk)uL@Q_`*MVxnZ1k@?KbJ^Dw9NehVGtJ zx4y;+vEk}@(g&MB-hR@E_x_{x(=YXiQ_mvw|KUbg49+K{<@GR}2`WtTsc~s6qwFv+ zc!e06Q0l#eI#e_0BPo7TcRcR{__@c$TdwL<2;-h_0Z$3%InsUJ97w?(1E$OtV{rJU^bJvZ*%V!S+Kkdoz16F+Nk?6R%1UszZV zm&Uuak0m*L&~@zGx#y}0i8o~UK6{@>5!KuqKJ+a$V1h2;%S!f=#3806YDx@~diJ8F zbKH?~L^^OgapH^W!oEBE*hSZ3AIXRnjWNDTv!!MzDob_ne*XCM9r1~?#GAxFCv%#t z>gvhFnmcIKv$5<>_cY@*!b|=5-o$#cl+0IpbEe$zU(9(R-qKL3POax>w}%Js;9(TL$E|`5 zm^Poq!|nR>uYMGLZ0KVyYWY0W4$KwNvfB4YWKMEW@yS2l6I_obJv?<$H{1JIe~o=S zQwpJWahCTBwE}A$`7h?0uRwZI*tA9-WE|}vrhjzl_h6dF!Rsy8=)_xhqN+5= zFOz*d_$YVwN#8R?dxeRgApNbDZ4<4_Wf#0%0BtWmi)S-ZCRG|J$7LPhSiTDDrjXwv z6XH(Ez!pc4$XVD(0JCm~v{OQpNpYXYlKqag zUv-%7%G1E{`u0k-+}PXl9&5%*QsWG2MKn=Wyl z9cXvUQl#c3pDjh0jOX>{ zshKupmzd}Oo{q;B!5P`sQ1`_B$og}apqwmV@=kvDJMaBwkmg?glOyR))C`X0l@72= zJ>Uf2MUnnvjJKYm7uhH7PUXd}aF|hv9>=bDuzt>N-#UYR!W4M?t>Wu4X-F3iC1^6_iR7#{V34lLbVi6ROue%O?6J33i0!mQ4$Rv4gdT@bo|1M zqio6~BQw9&bBDf0q&XTtP%tASzV`tjZ+YWUrvLWYIDQvS%gunkgWK-|f(aRFyh*pS z)8DcK9rZ;gWLW|nohU5}bcTHOB4t7O6qG z7TGlyo597GgJ4+SxaYj0jX>LjI6^8eI;1{RWk0lP75Z9T*Q6PW?{QE*ud*LVO7>y9 zXfOSv^S1Ks#&`B-DP;Dh`#n@OOf%E%m2!@|eWK=#N4ad=^D%2shWnebeKy({Idyv@ zZo2Pk(5?Qr5T&kc+-EG_N?jIhla&rs&1IE{mW~edJk@xuc`We>7tvR?r0ch$443&| z91DM<9Ppx6Q-xD5Wb^rYrOHF<#1F%JLt{tUDzaNS7?KTY&V1rxJJNX3$l$Zt{Uy;l z^_v(Yn{d8Xi({+Zmw1EbJ#uEC#%Lpk_bO3pqDT4{&f`{e5ROxH*SFn6xRbeDXp;W_ z#u9b4be?&BoC&ZGTNl-N8Qywt`~*z*=ve4(tU+ z-XhSJ`vYrlvcijyMia(X@4t@lF(v3iW!GF$YJMj9+=L){Sn*2KtxCZM-SwYHRB4f lDnH;5<@4-X=w*82~vb>_a$oY;GxeXi@;6Zu?K{s9&_77`ND1E_+m1`-mAI1&;vHO76= zVj=}Y1OJd+HRPp{Dh4UHkdSDRpt4e0o`!ol_r_mzUUr2$-1~;z+0pD>Me&WOisW0> zw{LPRQPr=$3 zB*x#jWxc=L|ECAg5NHt?prBThZt+0lpHC5=Y>kkE{{Qds%zG&G;sdWU1r+}4RT3s| z`+uK;$E_Y>M4a$PTTjx4{v8+MlSJxI{|!m{{r{^$jMJeQ`N2QH*Rve^(;u7mPgj`^ zeEsEME9$YEzNO%IwI_5uEQ2@WVUgpm%{L@=oE}PQC_Eori*!1Z_PKoHKTAH6e(`lEQsze`&f}_O`}-qGgt4tURs=6 zEt`0Cd^{u}^hvk&NzD-C+l^iQQkX^Ugf_Qc!yhv(s z?{v9`IvxGOD={Qny=!Sk4m8T}Rj`;GSICE;uPhSnSNTxBHw(V!ZjO;8WNx7p4QI2q z13H#DDYl5&KDI5ddKUUNf1Ml?83T`p!8z#b8hj(~b58v#P}-gQ7=YBj2&emTlwC00n)#d_i=n4B+G*<>K+B$sjuSV%v&_A{~ni^YX6+Nk>0Uw@tV-jV%%bFmUXaG%01f#X{@_1kn! zHF1HF+6ivPQO!NF62mK<@?XIAhVzCU3&f+C*c^CZenkM=S+|&3Wz$eKD|H#w$nt7k^iyL4I zU;VLoC`#WLgpZ0L{lRRpHBg26xpe~drQhXNan{b~mU&#$rqfBjmxf*+wubX%3QWwa zyclhSL#64(*#ce_zkoG4%)Co8^xk5&dXN0Th_&nisbA2y-D;&|hws-%KPN+sxJ0&K z$^8RD3)!~0&4=ZB4h?H%6HK*wsKC_BgfT61y^6^At)5rh+ubKM^fVd#d~ZWk+320C zpi3VP*%Mg7_I9}f9%^@_SKm7}i}egkZmg{@Lh%LUJT;XHY8gnAf7Yx&|3XH+pAIZh zA{^rfl9-H*Ipw5HY~Hn>GQ^9$_CDLyNx3Zt>Tip0bScq<~`!U$r6ssM9j*SaSazvB#K{zY{prE2_HO)xIbNM zJ2lApb^5gjx3-RXN)SO}x^qJ~{_N^JT_d4{7~$PxL5&BCJ5?jkC}tjqB&{`?t6V^w zPDb*wWQE>5kEs259OOkeLQB)tOU^i#gU1+JdE#@fr~eC$;{usNXx_8%hg#KWkhOg{ zl}C?&QUb=(?S~%V0rrOeH>Yzqy~lBHlWW6D?dH8UVn3yByxNr6!WwJ&YX6J8XHWcm z&NPo{?uyaJ>;ciIm*rvK-~AD&vz7IJlI<+WdB+v2Vho&*yHu{O*GCI5T%s_pSarlNF{XK51F}<;d`y`;>vj z*BOiGJ-baAZ7Z$vc$4-p0Xm3IVs3r2WY}`j9svol=$~5<2=3EA+fQK(;^W8&D(qkp z*R!up$*GZcB_KQpe(gDwmbrnoM>RyBbi|b6Vm`lU9!q z4}YkV|4@7*+cheV7Gr05AQ_!s!YhmEnvldlou9L}{iFV9aAR6Hg$0Qb@6&{#b|^#z zFN>%2H0K(ehbkBhcSFU@@*fl2v2GJOMp^jvv&CiGdh5eqQf@_-#n(Mm#>OvjLLE%o zz_5qDOAWdluBcjqyDmA+gM7vwmyJayiTV;*r<*4fy@9A-Q1(`mcA7s2e@}j%-H7(m zO3cS3=Azk{+%y@}Iha;%tqOBsXkWC4)-|eEIHuC4^w~Eg%0MzoTB3Gp~5#F1cR^XvtYh#Mp58r+8B4DsT|j$!y^{#y(B$5V3_e^uvGlcrbN4|duM({U*jq@NTO zZWJ=X5Wf3%cYEbp8CaS}_sf^SeOyuD%Wy?VCKd)`^CR`7^~MLiSht3qx%MO^ogpG5 z8tv;jUl$qHh-!Q+`o7Fs#L<#A9}+GSX@B|bEWcy?1P4!rsT+Stj%9JscelEqC?zW6 z*VL+vKCezETq8_po9_9b{-Ezxh?RK3S@wH-(Tm04v7nDLQw*cYe+U=t6T=Zc`8AkhHP=kD6vV1M1Qr$;oSr%4&NH4rw*`8?!0|{$^i6+)P^pWQb zBy!%bh?;5eAWT*tr7@{w3fltcjQp&4@ixKvOE;Q_9VFZGr{%{I`9L9$PEzhtd$?Y) zu|oaI8Bj+_HuO)Uj)UYhzki%Xx){P1VYw`PSK=Iy^-ou$y0m>&rhFX_t>sD+zk z!qCHAjgCn!l}QY1yKHE0_&)SKC{Y+?CdZa<9chCjKY~{9e8sty?@ZrsyYzZ|?AtQ@ zC@d1I{mFQs1-?IpQBC?#%tLp6JazN&y_Sn754%=j>B?uFucs(~MxSUIPevw4#`dn0 zvQR2Ujv47Que0Xmn#PGRSBs9ro5!e|W&JAR59t$g+oF*^W#-jpqI>C^$th?RDBv+8 zEzF@oBco6-1*G%6LUY5(1#Z{5_*I!zGR*{_>XGy{jN);eAu#kBPG2@zZ_ky^p|;3x z+&0C9b4uqTJL0Xt9wM?$C1J~a%*ID{S{~kL zvfk|V?4Amy=bgoLik>@Rr}A(bFc!j&cYlxMe1gtdm) z3-+BUt;EEaAEnBD$B1~aoo}C8sohv|^U(Tr=wy4nGYrcpIfg%fmY6HNJjgeP>6+Vd zuRwS_)6qIHm%U${-BPf#B=~YU$qFo>;_q#|&M8J>(Lfy-%N;#Nq;|A#e%%`FCYD5f*G5{++moaD9R`uF-N`y z*SItjp03wu(%8tWQhx>nlSWxC>=Q18F?ua#Mc{?qkztRqM0z0IHV1;ZkZk zD4xnUd@;>b&X&X=BF_6qxc`_G* zl~|H6e)#^>^(+L2(NX}5WNS~Hda@PiDy!mxEuR6qzC2>O<>1fryRx?NmJXlHmkC$h z2u?BEeJ4fKrlg(EPx@}H6FYpL%p#r6`n192g7>nh#YZ=(fP-f_$VEZzqI@9jjFvw% zUp!iAoIEL4QP_(VS*M1LeN=O;lzA38Tv=kwW;{!-3f8^Aid~v6=Y7NzG)(zobFX%o z8Jep7MEPlNu{o_K_wfc}RBIUIL3*EOG^bn?op|#Qru~|dYR~!FPe5(RA`j&D@i!ylcB~7iH{UKzuoaNC2#G2J1{(} z@1qmXr%|EXSgL-i_a0^5&@0+KiN{VPW;5c51v5u!McUvo-$gHKVN#QUOD~fOVQW6w z>jxbS3IP{9!*|v40XHUp-C_mT&<^+TAM?~ zckbtEmT{_D=ihGSU*BM2m>rz5NgeBcUBMN8A}d1KEoU1hg$74ami3Ox__5m;bz&C6 zm4!ZwbQGGv%B|{Y${aF2^s(I;#YAvC;pC8aRX=t3W1f?lLULYFo=S3Mx3JfrPSQOA zRbD-OGR-ohh>yi!wW#uWl=LlDVu~uEwI&{O!bbO$P0R?+j>BY6vyQ#5Yhr2LLv*e% zSDZXIV#gLD3f7-!RgF2X6B>Zn(oyV-+pBm&(lc-P_imS(qo`XeXV#pTXqgC)NE0 zpM3TZ#1~l>#vsZ zhJ{{=weU|wOwcc>JXRY$CDcsx;yQz#M~!mcNv1EfpcwH#Q`#tq@p!0d@o5LumSgeX zih5ig5*uP9{Jh>GkL~M;xk>LYOV{(fV&GuYy)SW{@mzoReD7CX{GR`2jJ%Yf-evy- zVa%gOV{CdsR^+pJ3ib3*cmt+>g4fuVWov!BD!qx|)e7fkweOk)M?BG~X) zM!&8w$WS%cIIAb8ymv}pjaBl*FCbtbPEAht!j?Cn%Ebo9t->h{H6zBKK;;DSED;m# z8x__C=Ag;o{x8kXsbgted!*_v(49R` zAOZ`IDvWcNs)4#u4C(SqG~Z9Pz2Fu@OS3G*me>&4-cW1T+M5cr5acIIiw>^HqXzyS zqkWGeI8Rs1n$NkJLmkEf&4H@CFqxdGZ`?v>`<)Y_+5o*=O*1i-m!o77i)pX1Ti)hb zt<$n=Dd;2W%H;VnXRtCT=jz{WUE(R(B_jT;R!v7DxytCii!7u_8r|Y@ssqj*!lKn} zBL6NfkESO@CitFnU(ktUBW;uVMnvZ+-=K0b$rjZ0ceTlDtW%X|a~~607)9Q;?KVRX zP$xxs9zoHPFn|1sw^ul-PJ!)f{Cub6{fMZ@YyoCa!lRD)Ovc{5S(LcV5M#-$nRE+^ z-o0hRcGh9b9snoOrAT2DViXs->v=<7DIrXF07uuefO=aAu{|#7 zKn*_jaMH!8Rf9nAsRiHOUDpVY2DghiF7?~fN!7;n@);#P`9cm5s%&><6?$jf8LN=t zxYeh<*(LO|rcGfG{LtBn;`5!tg1H zgYK4G@5uK{4?tpG=JI$?T@dPq-i(qSi)FvAQ~a&C1s_Eajo&N(EAlbkQbW21#%B+W z9#TwA0(q*7ZvA>&uY`if`XKBPN&Tc?54R_bC8G8r*G}L`L2foNj!Sl9#iyRM-K^Z} z6B$MQvx2YAGWeZjy&CF`BR7p#$L&-$SiBTS7 zzg$Fl=_mKMas*0Il=TrnX8rgsIkL1*g?9@D2IPx8M}EAEjtm!hkVkrgUE{&#<(G8A z2hHGDi2t_Wb+)|nh?;T$8<$)~!Dud#-$Xau$CrngB-3n;UGovJ=e9>F20^iezeE^A zLqe`&4XEx=9a0%>wteV!#E3-#$U{U<5tHUc!SPjWPO zFfoh#2lMe4(41B^D^}b!e0{BQJ;Z#w2e;)Cx>PsDirE>X73 zv**xHNLoJIlt(*3Mqky52??jtbAQIQ@1oK~rtV)rz|sfS7+gD1UfYwOvTiDwlLqyg z`=>8Bmy;jKwMI3y*nAS}Aa|uZC%JVh=YrnQ(T-}exN0ls=8BRfHL{_FND^QiF1tUE!@&3&%gXsa6x`!_+8c&i+7zafK+i8C{EwQTB?BGxI%%-|*YbW4 zkr^IiYMuW_PXw9)B#*Hj`r&^xh$td&7-V_=oAf{8BMB^IG7d`$;eRdLl)$P)^CaDk z{^Lp%!T@OeBwI`Izm^hLkonsfqe$GGXttXKssexvyQUQddt7p8{2pRdk~j3cUPorG z8#!+>^&A193(zWyJm0gCA0?+xb4pM2VJsw%etuj9P?LZqAT=tI0My!9Ue!gu_M+_L z-$@Km2Hst7NLYMsI^wi4-2uYQk5&2gj>CSgtFe%-LJSX0(Iq#LGD*% z@Xf;_jLf2Bg1gL!L>lahwB&~EGsM7vrBR#Kf)xzFFL6g)uXtrlyW<2te?N-y0?-oz zl(%G7&9-`|V@6N|;P{!tUJV62)rRqIHR*YP!+F%KCMjhCpC7*l*!q)CdUbJ>GllUG z8^4>=msh_5g<~f;?`eAVG(YFfoU{CX=7&_>KF+#1m$QR^k=up?ZGC6Cnv3P=joaqE zufO`oOEvcvesb2t=y-2^7y!Vk{eIKA;W6M=X3sEw9E=o(&sY`4_LtWTWkk9Gb&hUn8K!e#g0e&q%XM?fr z1~u#?_G$@Cw`wP*wy_P1gLrR8WBZ0|Vv5H9dL%Qy8H|0d2{3UjzfSwfv#g6Uc86YQ z>vWz85Id1Qel2VGsQxpcO$_kM%5$w3ZviZt^=96ydyr*FIyd8&eO=Ckv~wN+315Eo zzddd+4u3)W7(|uGeD+VY*jWiszh}M8SNpb9HpyY-$4H2|4ZN2Me*F|p2SFC~e+T%u zK?Z<)1uPeG9h>317J-!}hrG_C#cDKQ9vjmVbT6|U8m;%g4Rh~5y|>?Tck5BOfWUOG zP8$b*50Gx94R)>%B}G_(M~$mSTp>M0FJKaX!!QM3g{R!zqYz(d*scEA z4_1j%gqhlEBloSt>iw9ZAAh=O7CL%G%R|EH?T0@L2il$PO!1t(b+U9gn}6%5^k@Z* zaIh$t*P=%hHkfV8A4t$^sXB-KfJmw3k)ufK2Vzhej+D$Wxl+dT*EZ zd?fqX8i=}QL8hpE3~lJqHT(bojnPl3 z>gQ7PQ$F4Ml05N{gwUaY!twh^^WLS)oO3tX7h2Uhy?c0+)Lp96y!(wi+*d4FebjGv z6R-B$uz9{XGOJ#rqcH|lt-h^vp>o`?dR!1<_PU$WRXLwd5Pl1QxtiI;5(vqQ#K)=n zj7;KU>4U;6I3JOFS<(PO?KaEMgO*+>0HwDY%+_Y(q#^lGTK!4nUo8OP3zpeH1lMHa zQ~+>nXW!TJ()6o+M_;Lm&6S^Y`It+Ox&j3MQCs{m=88EVjsTrAtT-DkOVsJBZnZaN zH2LT}t;M3VjC4lzbC#dyt2d{QGJzMbbs=T}Mxm6Jr~hIYyJa z__)~JAe21y$6_AmsS0YNB}4l{zwAsAJPPxa@%>iiLlD)cVBz9dgB9d3`$;qT`oLc*s(s=XJ~yKcFs8m2xFf_QFyTgT@6M0P#B3A5vV* zeMeDeT=0CU+hRz#G%ku72Ss;YX*|R#TVO?zYD!0cj(i*@WqXN)vnS_5QtSfZ>${V9;NTeh)T2cxAlJ0Yqwea%TVopd)0oPdYNl|GWNE)AnTE| zl2}sdz*6KF{`DRusIl7bGO*mmb}ZULFdvfg2RWPB0G{|uQDup@^k{oUO>%|m2!Q6$cq-Pzq#706Cwq1GMRnk;bWubfEf@C9S@k4 zxo&ko%smjAGe*!o{rhU{U0=es1Z^noF6Q#MBWZWYHAyr174Xib$7@hyZCIOoKN72` zH4O33ZkSJy=EjO~@S-1QyUTVvD#2K`lJo0b=*2siQtJ6B9^y$EO{drQ=2S*fx(!QB zcI_w;Z_!=S`utq*_+|fT2BwlFc;<5L@Z9JsNGC*yga!Yhgjq5Lq0yrV{E!|}!8g=V zR%|}CoVs)lx-9BNj9Lv@_Fr`>loiS@f+(8oe|=S%#Ss$3=s?ieDlefv8q3VAD)JV} zL=cn0_yl4MObW`j;A|BTX0VTdqrBpJPbA2v@af5iamzVujfTD=9n)C!3sdkhN~A zr&;mo9Zes4PNBHKt0QAafFX>AUcH5FCNAc3f3y*hZbEeQbB=SG`(#L4-RC=iHE>w& zZWDtKQT6@FKE&joWvy^cRKB2e&T(z$f#%i&rWDJK6r*<`x(W?^QAeip#e50-Zu0oQp@asTdqwsej$hx`>j z76qN-XtQi$F5sqL8Bw#$;U{D#U>rVNZP}|tt|Ab@6w=$kUQs~ygY3sra}nr<6R8US zn181_9dqL$GfE^hdAJ_$y;x}CY#}haBJ~~IcixFB0ggg6+(z830_Vl-MVoL<(t%#* zDIJ{$>a*W_G|)C?WD_=R>Eks3P&ufAXtq`M2O8fZb&-|RQY2jiWrE_}w3m~)!;qTg z`Xh<1iw^UmlM+teJ;_^|?efR6ggSuf-V!?f9vfdmq z%6q|hZx73nT=hO4wOuW?Y{22!AnJIA&9H@Jk5h+7k+9ZIz798|c*#*Aec|v!IECO~ zLh~`dZ&0PwT=pXg0^OidHI&r4OCeh7-~&gS|R_S<>LEK^C`?AcI@wC#)cX zCw;dR@0B#bL!5VV!K|8hVodHf;xoZ0esR(P_mkrFZQ=7NzD~REKc}9VJ6!xR9S$oW zO9;IPBhP2$9RzYUK0n$==c&}fa;`;08cZtF;rdFDs zkTut@JZFK~_qW(iGakkzZg8NnD@q6H4mKHbJ{u?nSpKsb6{|8n7FoFK#Xzo0ADhbf zJU~)TNHJ8D{e06Ue3COho3xV6;$_HD#WgLIPCpI@WUQBVZ5%UFY=GJ#hDf*8a$*yjgHfviX$ zFv0%LLC~LaVI&$ue2yUmM215L1V4ev zJk9gdG7~QDM<2S+q8>tahqr*YmSLG&#fGj(pxiz;rh4e*Vv$k_=^@HAU{Tkd~o)U98cQGyJO`Q^ZP0dMCbHm0%`f!!3t_@9LP+G{zVsHRWw7;tMINBj?8?5#I(iLf^igqfj_>lyK(3^Z{4G=ZCEAs0$B0z*l zsDM$9^J;0Xg4#RgHkz8xDnI3ArAIY)eZrV=B#s{2;7Sj$PF?9Lw=C$>tX-wLHC^bo z!KUh6=ED7)Q18^PZk5U1F&aAi4ea6=F7>&$*fui4;t6LtSWIjl_^Y-~o*xiyv3Ybd zBS>zX;sV9Q7@XLUWa+QljApbx&l{f~m&E-k+RoyZCcKB~>UdqEx0T12ViLBJKSY6P z@fk>w2A>&=EHN`ncaXL?!X3sLSKLmIJfb}q+>YYN9g|epl}3p+;)&|4B2xe4e2>=E z?@CvLz+t_=L@@`VPT$(#!n%z%;QVHEU=)z{h<0fn5xP=m)g=e2kaKSlYb2jBx2hc+ z8lr`bt`^9^Rq|;}L-TMn?%oC;UT5rQeGuaKxK5JkKLmVUdvKL~qNBU3Uv_-_1@5AOaNEW$3IO=sJS*}2!G$D?0D?8*S~b^N--opg)M{6*TESOqn& zi%!G6tdN3i{~PS&1nRsJC;Mh}*GC?dnKBr$A=9)MqKB|mi3dH@j!Gu)(DN_N+G3NR4-Cl&7=^9){G^{_?Cq6dzkA(bMAK~Q^w|Bmpz^(d zhjGuhcuA1VZv@6S0aJ3ieAs`!tiJEFT(@>L7r-_9omJu;@3GhTTsRsPji}F-Vj^$1 zc9m}V>sii?Tby4%#!=P74^3{*k1CT)I0JuDx3gA>Lt4z;X30qMX8 z)3kHNemT#Tr;%7g9t)ua1nLm+rC<^E@svx9B_^Kmh;Q0QcKB+VX;aIfwx zQL+$D(|zBA!=Dyt?Li&K8CSWt+(OjZ#1u*-RGqw;b{C+K@HS~*);Gy8KOf@jFIS84 z!5&r8`=rVVs;^wt^9O`WFD?3>(TY10#26zJoNCIhDxsx!`GjRo*RT3ryaCk6@5juy z-@|8-({@MKR@g0K6F5hORc#;lfM}4-ei0pBf?-4vdFZ5_Cgq&dhA>4?~K)EuM46c!@C&$`}hb zeZ>NENS143H6x^RJ`X-A zJQ8uM;~Pr<$TspMq)6@ZK0%b?CiyxS;TH7E#6trGSg}=4l0hr#;81h_3w#;{%rYmR z$2|&BXFvRfHMyfs-b;s5!W;bS;WHPQ18c1J1-~9{29-EX@v7%gB>ESrT&yc*2-_=W z!omy5J$n-GdFFbpSLv9Nu~1N0@AUG<^gTUEq>m?HE*HjhHGUo!jz!LQsrJ+k-wwBA z^f61lz6>Rn;^kWwVbyQnMXuvpZeUK7?f4sxYuf>@TohpbM^6|2NQ@HkdeD#ajxC~w zoW%*QEHVc=)$dswZGo@b?{i5K97P*5Ox10@Kt6aZ3mG$1_*&-9BB|3_S;?WRw z9nf50#-*G8s&jM<&+YW7hDfVWb%g6n_uo5~Z1Fh?%)_;qWId7^CuW=`?JI0r_G5__ zy_K3H3K4ZRI&3K^WGKKbWLi#aJbX)U!fdiz&ePTt$=n{#qpYl81Pm+pJ=9Z#6~F3;Vx_U*iTyS%;m8zsQZE$py1;Gui_&!8%?hr53%C_lWovTc$==~> z^>vtMs2n3Ft{OMv%Tv>fO|Qg}IjxC=X1hQ;-UxU2)8@52e`SGz5<*(Y`nph zJ8KASTSpXBawzSpttD3r#Y!V7+a_)ynSoKqKItS@NGB;RlsvEAS>F6?T_ydv_y|)M z-ACk_mEmQxArue5Pt3Mm-6n-|dd(8b+Q*UFadoGREQK=cwE1vCy8`Dx;*0fdAJ=Ri zW0Y8YTTwA1o{DB5rGDF3^PV+p{f6vvhN)p{rRNn_GIducHWN=Q94%=JMVa+wdAkSU zwN}?7z(l=7X17wtPB(2ENJVpJL~2(ZdW;WLLezbSyd8MRn%Wo4J@>mP)beH6+RegE zCP#R;13bL=^nk9mSf-yv98Oph8rQ#My{EMs*Urk%W%Y#jly{Kz%5&~CUC{GThWa>> zrrOL@P>R=ljV~wEzG@2&!3gaND#f<@F(+j7%z%_6GoB3l6D7Orf>6If{QUsNcOjMX zRQI=&slQ z%b%8GE68Z@rTcH1(L)=%P!sWARdn8=QTS+;xa51=?7iLi)}BcAT|5{Mg1$KRNl)l1 zQq-(^+F5wFB+sLShVl;&>uR#5ogVZjU-3owVxd&tFdDD64=*hpoF7&H3ZM+LrvX6Y zj_pb3GjwShTaQPM@5i%&b_Fx#sMFt!*|RjufK);o{ zU$#ML zmS(HB1o{HxFf(S*;Mf6mR zsuPg>K=G6&7y&dNhLHGTRFqW+qgkOI&H^FWCxlm73$5?=%n;U70H1GeQKiiP$bXwpDZfAPQi zm0E<{es3tzoAv*mk;H%nfF>;3GOYjJdw+%?Z*D|1@cu{M{JWxb|GhVkV6SQ=7Bz4!bItjzz%i`B<0t|or^ zE(6r}kO|nl%K%zUm7l3x1Atp7z5*;j=j{O?76kbEd1dy`RdYl$dXrhQpvlZLc`esR z(av`(j^|6>=T|_kZm(CNdY*iFvrt+Cu=ub3J{xzdZ_b6r_yO=X*6jQCsj2=-$IYPH z9iK@X+E~5}`jtf2?w#F4*^7Ngzm2zkPtHU(vd6wS{GR<{J;r>VyzuL*z3tspg~8LS zrxL%z{rg#&_l5pp;3SzaW72s1`Wx=lfORl5@4yqppzdcX#t&~aT zOv5Jci8FRc`<{2}M7iTv6pMeqn z?U|gvryNwIF!6s@N}_ifmO{@!&^|ac5V0*Lc9#Ea))V8MiZ_C(;&WP*M6gwu(>Mge zploI!%dy!y7nE(_=&4Wq>v`m)fm#YfDK8C6owj49dH}%hnCpM*b@l(K2kT@2W8DIT zQ^U!q(m)r5qrT5hBmHBP;vA1mGEuh@FL2o+G%5`Gx66>$JH&YP-A3drY6b*}D#++7 z0dJB8DnvRn>ly!hO!Cj)ks7$Ey8@|w23Or&^~I#S+-tN!i8~*_TAZg(8NTKEBf$Ru z@Z*v#9lNT|et;ubfBo&Dx0FeTVl}p3zk+*wx%1g_I;!m9|GDXKJ;u9FTY)FpV7?Xr zSasBYXJc%ZrtcCt;By1!FfQr%4{3^sG^gQ`GqCWQI#meVtOf|XCl)q$O1jewJTe-# zin9895*c<%rnbxevkEC%l2O+-OqTR!->b>N67ouxm-^13pksgt3oLH(Gyc2HGl-?r zc^%2r|G1md6kw`#yKjHR=sn^!4)|ODgka-h)Q5-vjY03?B-vrbKZ zg`o<93C{mBv-g1ntXU@~yE9{e1ca;uxXju(y-b3izE0u!#Se5EtrouRz<<|ER2Fzq z&8Q+r1^}^#nOWbNg|{N$Z0`C+l#IIS;8z0J$cPn_4F0=Z;PP%?e+l^InZ@TX-gq)H zivd1h08}Jt{f;Yf|95HKqrg&Gf<1bS5as7cKd1Rk4NCAtu6An%x@r9NX{fY${$1Ab zM=0jWx#p=lT_;;(-Tw>#!8hZ1+yKP4+E(`80jy&nRQBdqmT}bLvxiW65JTdZG~*H? zJOOdcS{#*GFGw<4(~kQz9aKFuGv6OC$*EsU)hk~ZFV6FuEs+_o|Le3bq-YuO?GEvi z6SNSl?AU3%i?e!dCw_7nj#%ik!k*z6zCo)Ce^%$!orSyGI;KG541os4&=b45-yZ87 zL~6>&kQW5uNG#=ffc#nq3rwW&&)z)0kCKpvC@uUBT+!TCzi%^)n=3#P#7;zQn6ZSl zb%q9=_%FMfSaEBEBr22yYTjEALjRi~`6J{nm8myV>y!rZ|VlDUJY{9obepdV_`SNd-Y9<*9er~MbY>#M97)tLC8j_Yo7ly1%vhoZZ z(&QCH;ZE2C3he~66d4~-u9MMx!oj3A#8#aB+Sgn6t6hd4YMDAB(HNG*kaUaS#1#-T zXmooVPUBa9N{~3tjDk-?IUr*8RCPF49B2>Z@IVNtnb6jo>7fS;HRrh!D`=J@CT>6b zrMH6aA2+c=LZcN^$NdSS*r$no1&{|}>;T4-;O#YG+yMUcYna_=uo~Woo)sPA44u<& z_oT|qcA)n2Y|#RGLmSW)2VcGJea`FJ z1SA7;p*7Jba0ep6Mw>QN* z;+XpwZ&Js3P|`Ub>s@M;e`O&V4MrCeF$449HEu<6oiR_#K*VlR?F<9kG9ARSKF`B= zzZNN)9K~xH_cUQ#2t~c<=j$)xz%6sd-|DV^FlDVaP(Vb{22h;#$<(mU;diyYa_!&1 zu#J!0g9~GM*RYZME9TMT0$h0*w>+JsgXFbd4sqcgH1q^PGp_@E^6;)%1QPW&kaCxZ zb0A6q2P>Iga3Nw=i5lAB)#lXa1-BdoBTizcnWsrUv%~5V9j%hYk5-zH3koKo#pRaY z-yd$!1?>P~i9pSusBPD$Qt!XhzoEk?t(AkM=?+Ed`&+%N>lPNc^73(d-mTu05OG2* zUGl8C<**I=L4!A7wAuJT7@AV}JpSPCyL+^)WC?$wQ&NG1+%_Q+_4?v)$pY~C#E^fy9-kKf!&aj8N9Q!m>vlX7wb#oaR-Ekp znxX_?LTt3%gP2{QV|ZM*;P(x29xn~Z(f<>sRR|*J#i8@Jp{Bm{_?h)dFeZ@mCO1Gp zSlctPix)eJ$N(I{4r~etvp|0P-UTBBCSUZa&%`J;Rho!YU7zuAfCX$3;O|Cv8+R(! z(@o<_yVA2%va-Lq17H9);9~|E4UcR%Y`U6FBgOHGJ%w0fwQs|<$<8+19^m@Bv2wtrxp_88-gph zVB17*rfr2auJB^O+=BBIlOz02Nn;3nq2{i+fR0}mnWZxE%tL06@=)1$b+SwjX>X4O z-#4z~>hcq-A%cpnu_8paz5xk7XkPqR3t-w6Q&pU(mdW3H%X1wmp1ML=&oJ#`#t4IP`goK@E zN!iNgS#`AHVDaljmLnLjVkJh-f#R@IWb=Kna`Zzk<#1f8{);~=d+zvHMj!xN--R%S z-o053J^-1Q$KGs4BoT`sJdXp_WGSkUL4d_fz0oh)6Gmg5%#-GdIthT&^23Lp$om)yp+WDS!Wk=2OFL0ekEi=c;eNnZaCf<- zAv^-P#iQ>dIUz7h5Mi}ZLu7rYz`bJb4nFU@sRnK}uKwTdip3+HPphn6=?A0Z2?DW+ z)rZU%PZ7Qx7)6^9a=6G7WG=N~^})!av-TOz6R-VI*2K=oVDW`GXk*8zj+>sH0`#QTc)b1X8ADwxH2``5!@z!kBucs z8CLuwjoMFh)$5V4(|ZNA;GxygP^4_0C6S38Rtp*AwXNQ9B)rS= zG5tusMVEx(PXqyuI}J`$dqQU1@tv6(%m0q%Q2a%mhs<=!NyZh@zcu;TXj`vcY-G~= zoq`_-Rzk`?C{8HgiMVZE2Oy&=?tQCJ3Xp8^yXi+%);mAkf+D3yA&$i{j6*{CXTP6Q z`(HbY@JyZblPomUH*mYrdk@%nA8z~@(FQYKmA(X0B_O~2Cslqfg;p>>o}8Uz@PoUo zl$apmiFxWuIrSP^9;~^#u#VS}pdqyI6XR1f;q4W~4@4kJ^ZCVRev~5?sstZJJu6jN zj|8O!Yd}^+fQ60?-bij?rmA7Y4h!!H3HhW1a{loh$`J2Pl9%Z~H75F>?MY*VWIbdP z5mw_+Ov>gWF+{IgPEH~2!%T+#=HFHrRjvKG_=~8~BHQPv_v}@o#s?c%I8L-IDHL=l zqug?_j15GbwAur}0$~A2w-oz0%oyk!EcC0S40KeM7@};UJ+R@<#!o2MNsP9;6CYm= zN@bTA-9D?QPD+8raZ0wSv`gfDoZIbxK_^b>*NeIHw`*g>B^&WW9tvt6_#Y8jlcFq= ziz~R4W`g2hIcgU5*R_qD3}`*^!OtGJz7}b4iP|$v_x+|nDx;O|-}Y9nUr*foq_T*M zN?3RIg!H8!NW%F%8$pk2PaH|ma}+^{PoP5q34WtE(Q^%d3=lNFqrpo;LI(Sx3X08s zDH=vI5OFJS1&FLdEm(Dr-|jDu!L?pbC0=^6rd0{@Pus8EGgK~Qy*L@oE0^#+&1QA$LagCY0Ctg6? zD=I;#klcdYXuGzN?2o%lnMcP>XC0Vuxt`}E@G|Hk1R}gfEvFvUzzHs?;3OoIE*m3O zARbZd0nm#wfPYkWCO19`S!4jfM2f2WYa84U+}vc06FH!t*Y-yu**p(Uv;DVPAWMgLYJwI zK`&k^RTc%(E>;5Mex3AMpm(da>lfxOCdnX{3vn8LP$NQ0uBttvuO3qP+~|TjiO@a3 zO(T4b(Vo|09UFz%LJs${B_(5czan7RRb8YnALaf|?T|0AfZ`=8GK#x}SYBWJNmW+P z6>I1%mI=pA-IWwWz(gBS3v@IS-4Lg#CJD{iJ=CUa$q}Es|gq@v2=h6F<+>f zu<-w{t?!Pf@_+vir;v5bBy?=bDw~WB*)vi^5pk@@>P@nenLV;X$R1hANEsPrWshVN zLS~fly>7jGe|~@b`pe^V4)=ZT*ZsP#>$z?Ll5w<*L6Y*xEv@kuHgAhi4+#$l`6zDn zMZ}$xMg_d=?9#Iql7|Jdpp$p1dFMX~*Bnx`K0}AOv%cPF-lm}n^8E?luK1l(k;|omhxpgyl zhrRmu4`C%%P5PfvkD=qcU6<+j<2*;xrP~i*G*fo4dSYSU)_~yxQ5kY(W-~gRSsr0} z@a3}yT=MPrpSfK?JbZnc65oJD@L+4OBm9u87I(Pnl|&BnsMUD>Gpm|bRs)e?!L9T$ z1L(w(aBflJ;i9C+$i&gD6bb`_dufG@^akm`AM(Uct4XyaC-($>fl3P zUWuq^*Ju}l6S2pps1;4P*1~yQ(PZ-O5I!I--)sW27BU2hx-Wsq_@aWmGP1@fd{if%HqK@yeFaNDOpfgZ3Xb18<|LVKhAB>jLBJFe_DM7u)dzA&!_pi zYU)ESS7nf?P}IrqpkvRy$l?lOb3Hf57nyQ3n|IfR_bDR#1#k}3CT*s=DlFP)Jff~~ zhzV(yIe3$>IUcBU$eO*s@Mg&;ekiP@J_}as7Kg5`O(0?-lWD`xKSx zGH}7};bvc z3o2B0*Kj?-E@7HidYz_%xEMP9qW}~_Vvkh1Ch8w|G>`}+8_QFQXzebG2`z*OsE5Sy zg--V7^&fh2jk@IPA0B#nhM(UUU%K;2&so7*oroso z`c>vU5=k_6o>61H*EAsEewXyKv5A~v%B(}Gob*;*nzfTlDyp5q91iuAoWs7EBQ|(G zR?{tKSl@9z+KSyj&0$Ap*jm2)b?iqk@$SfI@he}ecT>R?f=|KVkx={aa z22zPGFb49pk^6Lrj2H9-&3%yyI8e3jf)o(?q4vVMG zUm4qM*qunxD=RsnMgPBgB{&ex0VEHA!s52?C$D?M^c{z{mg$6g}+%=`Q^pFWy z>_9bVib!nOoEnLq8htz^{#czAMa41^c$L(&l)Zz??vTUQYOjmSyQuE!!Qc#o61TKt z0$T#do`8Vc-YQ;0kb~yc92Xm*!~9}S9s}LGsw!KzbqTaBgB3BGZ&f}eNZ9pIrx3Kr zU2zXiSsAG{fi?U(bN=hY)e6D_RoFyY>3ytv9Y<`()@O>^z`BSCkwcjG$eaVb-8-2 z$~x4a6;_Jcony315IqMPoR3Y(q8}@+-KN`hriZ))F6TF{M06KF#4h*tmRV{!LApXB zfWI}bEeh-1<>^^uq>qMrFaw;ib!G@d17Vi6x0A6;n7*;M5%(i)pZE!`urukNwWR&q z3M+|5(hAueKWSdkQ>qQt*9sIVb6k3;UCd8tbFPVG*Q~x0_cV6F9}NUub-Zb?>~ip-v#At_w1A7ooc!X*S?04x4*G$rqe%39ppZ zDNHw=vX}G5A$|$byr5MUHrQ(etcFWpiV#Dz5Wmt5#@y3D8ZGmCRoah#-2__3Yy0&s^~k%_NIz=Y>0u?0abF#bz_Dz9F-t@M^NQKaIz$86KAa#o@)YOSx8iDwvClWMX zSE(p#_I&KuJ-1+*`VQ*2Erw!lfZKF@miLX@Tmsjsb3gBbY4jm<{&w@HNx90f_mVk} zN7b)F7G2pcG;Vy8IG8uX0)I%MP^`TrW=d)|FhYAX(;r`3^)?R>tlb~iJ>~U#@Xabf z86^2wr>z~Ao|OCmf*U;1aRo4Y>Gy8*>1qvMP=n>Ke9ET{yfV!)+wuEN988%XTtD$y zN*0tyaYz*=sj%OL<3>O=i3x9lcIbqye}>GlS76;+PYav3qSah6GP~?SaV>QP>So#e z@~z}gt)NfrJeLu-_pIlGQP{I~!`$WILE{I;`3r84kE9Oho^?&x;&XHk8_F>t1paEm zuiIp=#`oixq!PE!7u${$mq+cF=Q=oc8+u=4mwrO^z8{Bf&t@Pb6{!I)2VA6>fMcAz z<&w!SX;qi1=B%;KL=yI$R8c8+L=z0_mk~2jq9ZRsGOa;iE1E|wq7#|eB%QwrjtM%r zy;;o5SBEihA$w=7#Eq|yxMjCg9 zkc>f(X3|s!1|G0G_zYX;7cRPZE~SsdOkdr}=u-8Sfjd@q$y295UIG<9o6uuh)=ed>@i;n= zF>`8151`g*r*15Im_IUBb}uD05OC#XpO*#~l$vWUpvVsC3-CrA+wp64M5yjf@R^B; zrgzMu&P_Se(jvG?%u~6yk<5YQ5*?B*i{@=7*|`_1RGe1>DcN{grXnV8?vYl@Z*t%C2%KSwAKy*~C}JON}mLlM;ElOLkqw!cKx+YvPQCXljZ zpNVXm37OoJUvHQ^{iiFZ{CR#~PW5vndCselPDw+$`UBM;hoEpDQH8oIeE;2ruU|?kx5nL|>lQn$o z6zSplEZuWb()c+_EYh`w$S{JOKIRLm*U`4)4k_c6_k=wc2aTLWtnhuYGV~r!`Zr9E z{~lurV7w!4bJB+>k|<8lnTSe;Q?s`*L^=_37|8N*x+{1beQ;Uf-Xz8Jo+jqz%!zav zR(lrN_--T&kJmrN@VP-Gv-wS_BZCCoi3mBI+gPN;YAN zT-&V=r)uzXDqVHLr^GrDP+~)TyeJr6dhIXW94Wk04Yd~ngLq3L?8&wlpOu}qk})o zfe5Z$?2ChZQVU_W8P%z&iKlKPy0nqE(5Ka}?)>7dnu+qq@nidl?>@i}x<4myptQe& za>%|m)rT)E6~RXSY3_8GS}Qc`25mg*U-W4%P%f-Z$oI;-s`WibCQ%(^(<60*kgk;@_9jHH=+|+|4|@yO``R-w`Rfa`XyEu%aP=8LU>MX%R&5f~ z=HpTZSRZI6S{3h@=5~LUIV2Dy>7o9$-p%lkVcu2W5UK)cppzO5#&X;onE{P!e^0wR zCo}u%PXA3QtLT&*oS+~JVU(0@c@#aggyPChvw?r)L30MC8Ev(YXekC|!qQ@6&d~;7 z56%~o@5t;2Lb5I8>;@Nwrk|SdTom5Hi^;|FZTdP@V}Fb5O==Wiz6>R~$+`M3Un&e(`~2!&d)?p>h|b z1Y0j>9%UWKy+ji|*SY&Ok7m8!hAWhM0$C$Vgw7x{xo(f43LCqnfW^o=#xZrlWK&9xBI+kw*zZU&K&1Z&YR|odhMM zQKdA$pj36b$eV%Q0AROvKwC_iVPj_a7Fvyg;^uTYjgo@Vj^F9}qIl!?wtjK`^uD@N z)tZ^vXe@lki78O&MlV7A7U^Mg0cy`@&ik1C<_peuiEjFd1atk&X1K3Z3u_jcFpbP1 z+8hY#%GJ0QU{eD#u@QCJ6%`1q&Yn-YzO2F=LS3n$YTEp zxCo}l?}YG`uGo|nr3Q4y@aVQju*fHhkTcXG>r)AH4paf!JMOddtV?u1firse)Q@`o zJ1LW-@RG%OeXrq6!i@n~)Ta7|Rxg8HswG5G3y2g@x=i73wVd3&={fq6S<#^FAivO@ zNSH8tt5iiRoLRaZSQm5XV9U&N@*q6P;iEgE8w7;Zpl7RE`G~AJhtJ3ebV%3Gr!zd& z*?LbICabxH=78X|4G+iB@FZ=fEu03O(~-yx2C46a0XL6!Q&}#}}gne&+f2AB-!do zrF)I*vik4s%H%jgjDYj87bR- z3W1kqdug*REQwy}9DdpRwqmy$)9CXroOQ?nNUZmz~`7&W#hKUdgXM^xE275`lH6ZlWmjf%7wSI@c;UDR&k0cfoWUtldfVuP%N8Tej zMYKMvNmR`CCdrvrgDll!y+=)8w$6V>PA~eTgd0B$wo!9DR%iP8A%1!JwD#|#ds0$9 zjb7;bbFxAM*Hec~>8pY&qSdeg{vx&UIQoB5Gx~;?I{u{1-}S$c&EHBwFhN zY0;4T@o4k!AjDHQ21wpeM@FWD@cGDTp0GAG{;Et4F%OK?*1g5^w8j7tNA|iPzYveb zPUIN@*K9`qw_jC z|Nl=gzp}b}tLY95*YxBaGqv1D?b6YiVNi?2K23i{NP-ijf(VN-Z1R|_9VC>t&<;sF z^v=o@MbiVCB0VJhk*7%1J?GZZNx|RCVa5+q>CzAzGuM=ujp3Aj4zNZCH2Sz-+sigE zxE3vjV)>Gw_>3S4^8;mvpuHOm`ff=5=Uc^~@KIFJDWFwOgiW*u(UV4E?v;F?vw$FV zQP}&+<%;m};pY@co6)fS1m8|}M+nPN;XgWrg8;cZJ=6Tqso!o#(pVtv><*HHyYR|y z51Cpmz_E|+q*a(Gto(Y>BpjilvmUH|rL{HI`1{-g0MLRtrSB<&uQ@~lW)qEU&FAV} zDbmZIx9a3Py7yWasxF3@0K;SPiLbIebQz=}1FOO2#*onyy;ZaLrK$`f?p@kJgK;Z? zJeL-^F_7j$#9yv06Ec81FF;5}1xwNl1gCdE*n`!*nq^Vo0@A*q2XER-BSNZSvg=X& z_~N`7F28|O&XHk(3k%+27xV zoO)Ea5-8aehYtBlTUqBIR6H)jDVSsmNjV{((V6>@V}V4ZgbKf)g9AjiGz@IcFL<6}QGxK;8^qnK3-U2N1`5!P4D854>Cu#@IvM-V%gupN>s%@HyYnCv=YWdTgJeT+!n z{l7Pe@^POi6$(z?c759>9j>p{UnR!R9yGJR27&2NF@p49xI0Pd#1i2a>Px$}E+Pvt z`nfO@solZ*A^%!ssIrMlZK(7GPiL`riC`9zNFuq%y7Mu@C-DL{@P~mrBrA6SxMG0_ zN^Z_U^)AWc961dl2#t|%pE6aVd5M2_c^y!X-j*G)hQOuDSwxWwlI>CYJqHgB21CX` z3Ui_k%0#|E_7P0>EVgm(*Rzl3bPy2j=oA=^A;-_aI4}MiqIy1HeH!^xHe?{4DxcOJ zK#l^@yFSP1aE@e6QwTYD7@%yzEFbL?v70ax2w_Yjh&qNV)x&Yc(6JX$p1i&_ukZ-b zwP%D$evDh&7!=Q5%IQR&!Y~IAh#-=^B7sPO5m*))Ba0sa^(s3cU2mflE%I8f7$i;1 zL-e0f6iu;P6$Vgk(>QpDvgCdpxAH|*$-u4kx@u1e)<Mpv4 zB!a5P9+yioF-J`>a*JXoyoh4d-39qf?|`apH%!LCkme)R!toLxZE+t`ISUM#W_Lsy z-eoDjv^8&<%SOp*YxU*RC~Ahzw*icffyi?zmfu-X5Z*#92W$wfpQ=YFxZv|exWgv~ zWPDVKG|66(&|7yhdiNDp=H-4|wbQQd6IC*@&XK*u(r(R;@8SDVf){)8LSLYy| zI8^QxhEKERvHx)mk~x8PeIhf`Xt8R^{Vx*n6@vD=z9*iFqUSGlj_=9RSTZzzA>?sc zhx!$7>^l5`5z?;Kw|L&%(fhlmX?>zZq=AYzUh&{!^!gkjUf)94K}@N(6ED8TmjC61 z2J_iK;z-px6MWXCGNIlHn?{jN3jfg52siSfc395qLH%zq@j-!fP6k04tnZWCc)@*Z zJeBFjB~B}(nOv~O*WlF}Nnd_@=L8F&P?1|*^^^=36@*W*6LT$#4BTM5ewNa9M^-`t zo^WD26GyEi%;H^6t5_ki!AxmwvO4I5D2376Aid1d87yt)_;>He2;g%>Gh5%)wREzi z=IjaV*awQh9_wthWJ+IA6SvAj8gCwGlsEI`w5!^*W+7;a`%%(E%q;%K)A7&kikCRD6-EiR>a|xlUf?kV8_xOYi%bv-!C_E%RcVh+h$# zUp2Lg8-5Y4(JoK#W*Qa1aI8c!l=kANlclaKk9mpOcslw!y+ilrq ztc9;|iZ5&Q--Qd~vI6G9LcLZ1E*F`L{ga()>p{D4cMc}_-_&I~Ff^$)pbweKFZktV zn6{ldSP}ESO~QFjnKY3u`?JGc53Y%;l8WyzYAP*P+{!N~ShCd3J$ZLukW=+j!kQo< z?CZS)?6Wga8DA@D%uxTLQR3XMmFv4~Vc9TuVd2mG58s%uCDOIIA0wXf8z-5Z5I>o5 zKQBwpJCI&JDOTafU==ieRJ(^w{YrDV^s(;b!rJwZL7m&GJz}wcJ2&lBB4L_^-2OH> zlH+S`P`t92SUE4RubNnd2zm7{zUdSAyN3MXoKd>C~Ea~<_r ztw{Fl_Y3;}8C(41$dH6bg6K5pKRxj))-fqVK=T@qjsGl6@gJV= z6@AzG{`Ea;&B9sZIrq8uzW2WNwc`XUDM+C|A$kG_2Zt^rEv^CwhXjLzd-MYZ3HXKS zMUWQo&m(&ksW))N10+y5I0`r!aZxpp-cAZ?wSxN9o%NCXi|%*g`FT&3Gev3DAW!)? zU#N<|=u{HV33&@s9v1mHL~&VP|Ly5FPN;Z>3_C7+@2kPP?K=IdixRUGLBWk3vs=jI z+RKff^%J$ipI|qyC!FWcQnvdNIL!Pc;b}zviw~9~e9a4pga z)Ceyi?Oktdi2kd~MGnHx{@t%gII6L+G4H8z#v8wm^A^y1#SWD#Ry=aV#%`SgM6Zwksg)K0w`EadQE{0CNw6D~9 zp@H4)^z-C2CLy~xzw_SbS*xwFe0IY&luplIuYx^QR8=i!`_qK=bL5lOrxXvC+p|@3 z6;w*}1ZSQ032#0asm;F_XZ<)|Tk(S}xPJl>4gdMg6ro>#M;I|Naf!k@UG%axeOg~b zzx;Ak{&S>>J;#Gi4@bzDtD?cwa3H`;G+u1FcO zv$Odcewg(qnz&=|d+2?DbKm<)E)03TeL8E68RWdRNY{zRQr+}jpxE~6E9xjtna)3U z=lvNFKOX%f*bhPa%l~|E65zO1C_8R$XRV_TS}{e7trEEyL$^KX<)f!V@oT7{V?Ps~ z?vBaOXcpfsXk6T}{E+O1w|rfwptImw_*GHvYN3Sba*N&2kI5z&p)I?}x=z013}!v6 zI4k5qvl6fW{>EZHEBGrp@6ZT+UrBF_Vvmn?qa$qNbk^irRdM>1n7wILGw zhg5G|kxp58zET5tG2*u#v$nKFZ(QZg&-WFw0TD!NTbkQ(%(NBij8~qw=VEkklV4n6 zlKiq%1;2X*9n0_?t2@>1^vtP02)M2=3AIc@d#T2DX^VsSD_D5s7= zEV#e_c~Dd(ThYtvtnVCpH4W~n_9Amv+S0WTvlIrMDY$JjcV1!bjJO_0x;k#poRt{| zXEh#2C+UpTy4cHDLay&h0&7{!2D*Mm2t{KXb;{lnRkiZJy! z3_iB!%kp+gnJJITsCUyj^yH$ zf5F+X$DhFd`TMT>WjHx&neg3QMVf;1JgpeH>x*sofkRm@m|(&_#I5+I+-gdx<7~&g z{L@TH+VyYSAKP43$RFPee0cn?X$9c`o~1lT^7XH28OD5JnCegC8B`H({<*DQNXHR~ zS>-^na<-sLTBuoHYm~%X=hS^oD@SuQMA_+-&$GbvO!ix;rEksK6pqe{v~q6CHgkpV zz2$uN#&=7tmBc^VpAf`lkP66Jv<6fjHNTexVd=Z(<~&(9HfzmQj5km7-0GR08rvPF z?!4hF*3ry)eOqlQtS6yUb>TqBA&JkVyR{WRdJ`Bry5OZx2b$`OP!ra>v0K?0XMK&k zz!$`+2NSFE1P^rfvo@ZCc5_Ebou3aDq`2&M_Lo$O1N_&kpw!YE90pEkqzRpmEGX1%Y{&+@++H4D!z{b+%R> zNB!ZMMGUvK^Ql%x&DtjkoHCVrGbXdo+7S0`alDo$#f&<5PkkTFwPBodT?78Iu|ntc zp{VJiX{j~bv*zS+#$%sIRpB+cZ_yc~{Jh0=1dZTh-;2J=nlyE(c?`@C^Yd*Ktb_jX ztxxsDX>&Kmj(dCE9uJORXUqAfs=5#0Y5Ki5-dGb{KNV_oPxfQ4GUK51N@Gov4#s&A zPOtLwyY%h#Bc7;JT&Sp48aJh8szTJCh|xy*D8$*e`eQQtU;ICp{4husl_HRBUS%ce zK$s~?q;DU_nXkjMsn%${@|*gPJ$p}4*l4sRU3Sy68?H(lDdhD|5;Mq|{~NWGX@Mgt zIOpU2ySjK3oQ19BEPgwx&Buq8KH`4ATZtHPJl<^L#PZGEH z&f&6uqxdkj&9Yxtts_-J!f?`KDY`$zA&3xOS}X*wKB$Gqa<1AoIx&%m!+ur#0n4Q+ z5`e|za>jE?9`-6D_hGH)n+pX#4mLg8rwJzLWJ%gsp{|Z82Fde-13rpBvWr|}iQwraWr(c-=iZUUu-HC0W1I7^ zg$TL7`S+y+vIEHt7PRiA3SJh zIT8*5%wp8hb=w-ac0-^S8B4^qJcOo@@OPc%7`XPj^Y=UhiGP11K^6J+X(-*Ba<17F zgP885K$HHT?R1fON7G6asC7dsz#)HclBS>m2-lmI_KpWF&|r}xT%PVSmYEH5(+Lzy zVaixeu?f))XhlDmEQ@z23l4+&OQ(&_ga<>86!E!_+7xIqJP)EsH_=2!MHyuE`f>dq znB!ggOK83bBNS*nV^D5MYnVd&b40*1zR&_50e^KJMZ$F za1T){TO0!aN03Zu3Qd-%URDAIZO)LG#i75lL{0%zj`s9&MBvZKc+*g)OU|Q1heY`o zT+E`e)9W6tHlSC^3PSk=4It$}en2p3JwQRx&bv^)KU);>!Fev^Ufl($XFtBZ!W|Q< zmH6WlSn}|UJXWz5Rgjk|rN*4O&A(;-@E2(c1HfG6rn0k*9v%qT{3J^hI&t)b2fxC@ z8ba_@XE0VQ7#$`tojkwO(*=>8e~v!-J8<+gdhWFkH$AA|hjD7j6V|a?|@8Jjiv5v3XlZq;CCsa6pob7&La=^%ru<7dlLZ|mfENMl>cvM`v z=VI0XiziZD{Qe+Aw`{txOw>#D=yA78`9C@mfj6K8;uzDrX@L20|GA12B^^S#1sN~YkT=C3Cktb+ zdHZMk-o~^5TSvJaday;{i#xT*!2nO}PS9T!W!?u4hiZGZKQ}(Q&>thsgnfLF3i#3h z81@FStdsa(p@#2;;-BMt`Dcq?x_V;hv$bz7#sAHf7pZ}R7HTop{JR_$I)F-egSV|o_`i#hWEHS{We`@CYLL)7YUG6$xlQi z@qc8Z=tfa{%{X15cJ$es+i#qfR$Hv|EZ@e-LL!l{WRmBb?kU*l7S`m-@c* z4`s*{1&)C;&;G$n9$~%H$_i2|)JtLeUw-o6+}iz3F3U>S=ONkxU-lD#N==$~=|jYR zgcHOGs4$9d;4epd3SVO%I+ic?{6QDM7a_o8HaL`=r2Z-)ita;*bIM+|Sb)>cbLbKu z*&lU^l;U(HX=XX?8lR+k{u6$t?vI#Q+v%Spj-vc!w|E?ntg8d$MZtaPB>sPO zbhw6_uO3SBzhtKa)Ml2x6aM$A%r}c;3_uJD=@}x4{qr-04C?A2t4f(^G{0iiKlb4c z*oS`vv)cosrtt2<=Jn8C?Ifq>A(MKrQ3`J#M2X_skpJhyB|mgW=b`s|KRhEkQY7O| zwC!f(SfM5>YD#YJUqxaG0fyhw87KIUT8vRC4l%GnoPW(a2+n(n!?dgX?p9N~%8GfV zGa{lXh{WfQWM2^eFOlvkg3=L2#lNtK^$6Zl>TBdzeJ?I5nCU<=p*J=k!k;q&XazEG z@ZG-$j-&sqOF@Xjq|aivEY1#+C4Z2{@C)D?&)*>^ct2eCf8jPViyLVY;iPi1lTgk- z&k$KfE%&%I?>=86kc3E(2};rY35L=(fEsYzy`g_?W*PTBZUJ>NX;>U{woHFwFpN#` z>vQjMuNzubp$l<#&pX|jIrp-1>lw9sJDEDqcv}AP!tTOlzP5=rqWmM}#o;=7Hd^I4 ziaQwe`{zPsf0SGVQ1-D-eQgX_ZDba3?zGSr`ute|=e=0x1&_($t6Ilx*_+Fgn2@1F zu1~9@io)pUb6^+ye6@TPl~+NPWuf@Y(P_4kM?VD*yQrgC^&37a%3+YbelZGE8b-~w zOZ7cY#_euSCZ#O(%m$L9U=3Fax92OET2)plix5HAJ?CgL%zXL1C6|os)7B zIWh*OH&80G!})uC zuin|NR5%_AD5uDMo0&o7*N0R76?G)<%GLnFmZF%V#8+ZZp1lw;G@wcs-d1M?vq_zu z0GQV^ywVvq9ml9HE1PDoP-%916@YH{UT)moAc0+lot8Flun4Hy40v4mcm|xQ=Ht+o z3Uaf4C56|u`SS6=Fkho6u`fk{$BC{ZLEImXyX`l&l?iaNvG?WGkkjy>9`Q21kCKuB z<(9J(2V0$FZf`G-p)vi~%SWZ%G4#D+%l_$hI)}EGF*lcYsx~NZNxgK&n~`6p@}drEC!s8M zmL=Tp%1lBzW#!On*IP!i*NEd)Ln+ahjaEK}IR9Ai)Co^{_DU9?)oNb>A(Y_23RdA#?#Q4cb+DYzAReWF~SMRBVwdb+2*4^1c$4S`MSX92^>!{rZtIcUu zZrBvIgW%pxC#;Tj)9kV=Sf9I4U!Tq#7XMk%k3jO9$R&MRmmd#=ZT*)F-wyV;jO~6N zcJ;-2l1Y8*64l$MK5u}08XcR7i%b2mLpk!kNE%i} zp@kutr^|-pWVf6QenPb6&?`?$01@@mQ4|M5LAeU~cEGjn=lj*cBieONMPR^XMb(^l zXP)65v~>w!1JHNgjEf!OeA*XPit9~ap{-mlnb-yZPS3?g8fzdXdED-7Rqwp>0y8u2 zDwdwpUW@SEwJH&h6-jluQD->&=c!*i4$o94KahJgeR-s{&iQHLXVc(w7Xno|d`S0K zpr}bFpNPlVwUOq<693*#CeHh8XPUa=(X zaZC&VIt6uAA8?|y*MYQ|2fp2ddvv^r2-t1$kxb~&S4_U$5g4lp?5 zGQxbmLWtWrkbE44zY^~m!YTE5GnU8(xWHOAB(1k$e@Hd}E-f{{QaVuLLXrOSX z*d=@;8x}hV2e^=xF`&4yjlKlxyhnL6!0wGo8?-q0NFDpcpTTyj3lfe2ws*A`x_GX! zJTHbRNTVotq86tmt_l`YxD*S-Ahd5W&t+}AX7`C*Hp&A)2<0{}fMl=n=2T}_Q0%qdM{Y8)X zs6;8_jqRezJ{|)kSeg1O7cA3lTLoOUu~4il064WZJo^9e%kVv)_RUQ^*t;VZNo#qp zToYOdWiF4<&JPon;7tq&R9_QA_pQoGp+$V=+JgoAOVJTTZ!oxoFe9V77f@=;a9= z$(G!e*VX{&0}##I?G0!l%?Dn0R|}GR@5h10;sUei0n6>gSB`sj=Pgcu4=Yvi<)-~O z^SYPxhT<){qZOaN_^XosG#Jh8Jq7}PxyPsd4>i9f!jaB~zc}hOA<%ol#abgyH1QK^E?)(kXOz<{ ziq@YjicwGEk!!xb2vbXCwgxM|{Kh~Xg#%N9?T(Urq9h)D$cAWlFSpYc$rJ0OMLLzI z_R{gV>sGo}{G2J5xt%JPIb7>!9Dk|KeqCfgZKCqp4?WOVD!ePAP=l~f!)opYG6p&K zWzA1RiG0UyT1qLk4j)jJ%{2RTt()@~+YVS`FMTKh@-5^vPG_~}rTXW|^b>g=nGASD z5}3gLrQ?mO5&8tsEX^Ji)GW{}po!PB@gA$cw(Cx!kI22^&JBCxGEx8a8Oy<5gJjrh z%Ahb$j#8zuOgCul*|gVYenpJC3W{4NDNn?9J6! z&c8|d0dO*&{j2MXBRQaqtNV&ct5p0~u;mP?^PE9!(r@<0_B!?R;-l>npSxnMGD+EZ z7RxNW`r{F~Y^(WNq{sAP9wpe+lFz|?!hE2Pfj_eV9|A6Zr1i4Yy?s*cj?Y z`1(U1T0&kpxv*6%lEqkF6t*KVv`b)&P#huGJZ~6L;T@W|&F#sgzTI*gs!@2nzGuyx z%SMV%#hKv?*5j23VWJ();`iobqJ@5gcXF&;7z|?t>b-J2-rS5DKiT$ZuSNUC$27(C zzs~||z-m)XrM@GJ(;q=zy!(~DM7&jI;F%HP?0Mm`HOAmeuzu3Z+V~v1^1O;+Z``y$ zkx*Ewo^RCkbcf;Q>I^g3K!IpQsQP?4^s(+LjVwvSGFfX`u$vK}AW1rbpO5##7)!t- zP6Yx^J6+M!{hz-&qjzZm^gB6D`I zV$x0S^-gc%r>6(ubVcD03dG+zD|yP5tqOY!3s@MDFUQKi0R&Ov3I4H1Fy$tv+j8*d zv)Kl7Rl=pnf(>7wULu4`KXZy{Yoh{#*v&uFtR66EpQ#wJoa4Q48zh^p{NC#;pD11Y zzUKXvnD0Aa#}Q=AX?N#8axb*GwE101lf4G`;wGu(cfLDAq8QTL4QIWI3e?xyJIT9ko5q==lLma?h9m}V?G~j?g>^KtiBTuNk(CS_%h$8GNo7x zx}SkBk|CnklRadf=LUQ*@DWuaH?GgSqxJsBqoRKF2%0;ST7=xEl=zxO+T+7IC{FdR zCr@hikI{g!21IiLRTUMItC>cx#@83CF^9peXQ*?5xO8vX5Gd#Y=gn}~nHsJ{Z5su0 z8!3EdZM=_4oW1jQ4Bn7fbnLpm{nf?#^*XJ2*=tg!<(jyFp6WG(ZBB@GrRnVz#_ z`Ksu@h3f*$Fan8x_LVp%PM;x@yMOrUo#H*3B@?q+yIp*=gV8Q9n1@rCkpeffe(BJar1UkYAb2D+)8f{y4vAb;S9CPu?tTF62ZCiz zmF7dZ+jG`gbYkx3xwba^kGDYpEtbny$$mfCx+{)3aRIn0K{AEU()5s`#8RpfTK-Dw z@RqOR_C%2nt8~s1IVJCjp$VUd(23B^zA6m&Q>dyE$jbH&qW;tU+om$zM$el_#oTHZ zd}1K7i^qPcVR)6L$qM~>y+V^2v-kmM@ijILwAYOk!C3k4AgJ>kf*TD&E z+l*P8=7vuG&_SQOW}V0-TVJ?)SF}ma_<#jy>(@B9po#abRmH_&$e2Hk07eLfeuXD0&_WX9BrI!_i-Jdy=WZl33 z5W?6(G^~uG<8H;IHn+c0uop4tRTVqiRG`>PRdEO(A=N_UlkxH@h%2Ks+W~vL(#$4s zCvzQMk!j<*PQYY(xVNy}aS~D={Q= zqSYzHJydZI6Juk9ndx94F4R~r2zg^6>5J9yP%%T665gz4n`0$W76cduDfpIH4L-W` z!wS7D3HqhhF9U8p^6_XN95iM7l{o89p!YGTEGXw>w1pOOaiQSb7$JOHy-{16F~*q% z+Sr)Klk&(1I=#PNsuv)Lvk0Pp(LBJ&*i>K@_xrtUqBlyni`5c~U7V~dc3x53wypm~ z&lQ@TPLRomuV-a(>sAQfa6M2u()ROO!Rt916t>PHtzZX>DElfXl76{ZrZqML9V{WB zRP$zQ4V5Sw9}GmOTGFweAO^9DU}+0DZW-5*oM(DFb`S^&9NA?e>swGrCzzgOFAhtQ zb2W@5Aw8PeLK70gvt39+`mHnAZGFpR%?k_|$9djsO#5K*kWa`VfxC1+PGS8ALL3GALYnezvgYKV9}XY5V)|Q9!Z37f)knwcUIDN0HURJxt zcGKl&-xzy;167olExcGyphj8j4rM?H zN=*~g(Rh(0L40>Buqhhzx;1R*;>hWx~E^Y>cJuS+FPn*uKeNU2S|q^8M=OoTu7z;Tg9sxkte7!5&6gQ!dv;)>U!PL1Wt&f9!)z>n@H-m6t+MiQX`*1EN`VZ1 znnb$Xo2@K*JyYu#$ry8cL{9CWVIH*OV!b&;(Y$rF;2ARj3`=BCOZgN7c5l{RJL8Lq z)Df6|w1&o7%`7tnQ|?Z`h*3m35DELmgN*(p%lX=9MY85D!ORDNX3UA$$X52f;Ti)g*h3BKW}xF0`?0` z$9)!!;x({^8&4*5eAhpHE5)neqa13n`9O9x!ONRYl=*tsqIpS6jbmg9!ZkWdWzoke zSe7<0)muhxL5$L3@c}VY`*(Q8_cR#i%7S!%5^}v!WH#p2q^Se7pQAWRp`jp^{5m> zcX@-Bi@eiDUA{Z~BQUp)Evj8WSfL^TYn&k6^~n<`qKzz?Im1aZ)JcD$K6O*Mv-vd@ zCFJ73J+Tjf3J)qGr{igU>hE~Y;fi%Ti$x8M&D)Ywr@2;d4-3hd&ZXG1FGO9;4$bK9$-&m+t%rkML~8lR|E z(vIZ)<{6f)VCnaKH*O7~Oi~1P1pB<4P-oA};PS{RDXxAiR?I7tJf7UWg#cBsZ_w$? zrxO25X@nVJpdWMSr!lD=ktsg(B{5(U{7&CFx9hDgSd2V~L~`qG1rirkPb5^1w3T7a zx{_3rn*n((C@@L(hL9?>F9uGp zO+Wx4XhihZ97V^Xnpwfn{ivU(cy$c(Q61)|T!m{q*qCklW?@aiD<1b65fna^$d;c^ z_LtFob&g4dj`t5+WuEova~0xpXx!45g3Q*eofIlE$O}JmCi`4YHr_sa-m!&-G(qom zd+p8bnYM{4HS?CyDnlsE^C~#e$F*Kqd}3+rYXUVfRBWH)1g$1@X7WU$1qmQMza`)t zzusOlR<5^A`}qU3H-*nzg9q{=B0EPm8*3!#f)XOi>;4`$hAo%6GrZfxOtEMgCM|#N z9nGzxm;j}TPJZnuTktW7{(uGoJ%R37j&%i{m$HwsseLMiLM3^u7Fc)~pbc0>=i{NQ z+LG_Luw8v;tKwN$@=tNq!P^4n(S3U5FTPw~NRfZT=b4K%K|M7#mE9dk@%3z2x;YaI z`G~%jjb*)@ay#&)1daY}#(n`MNT2JZPrm~ET%&Xg>=%gL)e(K-hfAz%R*wCKKgtt#VVxu1pR zwIx6`iqj8wG#TTJ+6SA~;O)|=?2hs_FBROQHQk-J^Jyaqx@k%GpLW;vm4gDdi2)k)l=s#$>v>f>ap;G#AG8=t&lA|EBC-XUOS zd8P{}Ro8>@%jP>U4+aLEi69%iQwBs9zEm%#dybv)A*i6@*_v^l9eU)XhfGH?B5xdBt`n_kTxAYYo=3bfC|7ULZ@T*s4a)L@k@ z6nHh=Ch*OBQq{6rk^r`&U=GcFcg#8jSX6KJ8g1um?R-^mS5~2;KCX`UBX`!G7qloN zy&hLRq$gW0@Ed_3o=P`ob0gKX3_}Zx4`e|OK3FC*X+X_$rNQH|7iF|-R&B%uTCsIJp7KA>OqnJKEg*|01E7q;tKoN!B`@#wX0UryM}Y+drs$#IhwX9&-M z)dIk*Rr9NY)M|PMQhm%;b)8^7Qw>(P>-{4Y*82HM89vG@L2+{kYLw`AmEwt_qrVKo z>R;_>LJ2?YP_6fU!mja^Ujy1O#LBO$$)c2}oSPcLwT`-8miFvCrg~!0oDk5(p4Fv> z-a=@>l@K8PylQ5j?-}gaReQ==w1bLKOp=L9B70QmIR*-~1y&!>52L%gK8jFoK}NpC zHV=HkGP>_flB_8|&r#mvlwCRA-XtCBT%=vmqvF+{OfDNtp_^x3o9BOq&+eHJAhwRR zx$`tC)zlm(2(b%fOti(W=QM5N-6-Z@({t#rmau-418d);3ehRzKACPsXo1qZznRe< zwMly>oJ6F*5nv~|zopjVSQtQLJ4OMm&^p1YaIbutW!lpf~t@KBZe!69tyE5 zhHc??v=;a=W=Zah!vTiGd!VLDY}W+-5EOZs<&AhdV!dUWJ~oc0N~4Hr$2^o z1F+VDqjD&?iuEifR2D$F6VC&Qi@`~KuxjY;FF}XPk=+~Rw$Yha$ z-!6wZ2v}k}zu{&6PxriPP37*lc?_qzLK#&B-9&HqeQZ`OFLr|o&R%-WcSOY35E4Fm zOD=TvbF4sneAl*T0(M^%e3~9IS2b45Jc^8QY?ok?MC^JR+t15MW%xP?Bxw+|DUL4s zu3wHN3Ymz9`!)@L0OihOe<;wq!`1T`|4u#9Gz=gk3k4{{dq=IST&;_$o~Ne(e7RL9 zI&GMI>A?TBK#E;-(--B3sm%yA&rhn6;{*Gb5NJMjN~x?ULi25!xyEn^Fyg?OxwYR} z?C^E(ajCBX>Dy6%6mfOR+Ma=9k5a(+obsQ((&sr)Sin568%(r3{>WE|mzi;#`@~9m z{-WtICIXApL{=b~YgeU!;H5GFkaMXLL{aw42FqR^VFPu|Cm5Mh>W@0*5!A0l^2x!Y zZ_DJWKHcgqAh(~Gt}|BX1HhRm${Op&P-l6A1-o|7g+kpZi}gRLP7*op?(Y-7IF6(vkygW8CUKmUi_ zTcrQgBDT_C`HJw5C$lCvNIwHb}85yCX;3}1?^$^|HoCBVd-(m(^qobPK4=xo8d z@tahdmyk`07EiBL12B3KDTz+!hi9{?Q!w z!{FCP9mL*^0>P=SA@V3nSeeO8%IyLtdVY6FvHWlOBRpg1P$!zptB^3`h;>H}jnCAd zZI$hDE6@P0#-{|f*GW0Q_P8X_@A4~yjsk744WdiF?faHuG1ZoW;&c!nblE@NrPd@` zR92kC2@~A7;M$wljp)0XEjj;goT4mIOrc%CqjdqwH-ZV>x}pb)$D+!d@qpBJiArX%(Ax1nbBU-Cw0C_u1L)%yGG@jICl9uc;_u*AWPdb*fl}jI zQ0{_M29VKIp8bcc1&jzw%UBR$xTdq@e}gh`NBvfmuQ7K-2iQL>m3OOG0o=MSbZDJa z6P1Bf^?7%-w zl4wf-fXnATLEMZrY~KBkJzV3%u3hgDNEo{8q+i&GdB#wqwBAw)ur5lRsX9&L-5D%a z!y^z4)>eQJ##eP~VBD9K&#>C9e=WZECpPta-H05}*?dW3k4Fy)vf%y~2ld-WMT$bH z?RAu@xjn>3>xl2|LNwejS~0`A=td<4uYb+u;+3Tx-JBf;MIRQrh2pa>-d-_JUgiql z#cKE>BYQe)`Xc$nrYOV#4f0f{lPVraq#-^r%ZDEv=fF#IEcr^!CoVk-zDjA1Rj*z^ zYOt4KVziMBHW_buiFf4=SkzpDC-n$mHj$H@pAYo`6-c^D?hvFS%>$zc)~Jsd$O|<~ z$c_cxNA%mj_kAIlRW8}+>6dWMN1=Q5LYDowW6T zZunTZUEmxtO>0s;e|%hPDlNaIVbt5LenMK1dnDd7S*%+(qqj!~xq#-T;tw+%vZS^P zAb5WuU*OEGmmd>TQyt{54|O;wmWiSBF?)gE_a9<46GZ#;!6wdCMy3Y-90=D4u~Z32UbA0 z$nMA}0&#s4pZPiCa*Fm!@3Z}BBvql?cd5z~E6{)h3t-`~-jWlG>Tq%#D zcXb+xjQLne@9o3h0zI(O63oU6Niw%3|0dY+3!%bLhk-mZq_iLAi;q}x_{O!$lXA&! zc!3u>YQQSAz3V5njxmjAJE+$z$;zqIE)yveKEQMZY^;q$;HH<=q-8o$5tzuc4--3( zoiBHsy|UdR$RECBOw;XR^8c)QakMdLmq3)TLX+koOg#__l9r{PN@ z?zD?EkR{_#?NE$%n{ob6Kx)_+LCnMQ(G&D&5n+UHBiShHOPg3kJaT5v+fdpA1uyR! z4{rc0b6Ht5(9T*hg^gthpJd=s-kr0FHW*|Pw#QchtyJV%Y10E1lH8eVppFCBk~+AR zfazqK$~Low0p+GO`m)4}S#yP15aPAVQ0&V<`L0sN!RJWhtx^1V^1(i*Mx`Tjro=E%klP z1@d`=wX?im?Wv|TP_=Ds#P{@WxG=3QD%1b9QQji9u!N2;s^XPC-Qmna+<{AeqshenKj2{ zn)9ruvmhmHdH!If(|7B}U|}DaRV;hVqyja? zbiZ%rUFCsIIEpgDuYjdzE=tiG9)MeBvR z{9)~4(s(d%1DDvx4H`%Wu^-=$9K5H>&KBBI>R?~Cz=b4p?gLr`<&Rh}hf(3`H_u?n z7SYo+7+k~!mFCjQ`S_2xyo%FGQSOa0QjWEL?C42Wb~Yh)Sk97Mo8Z}8Og5;1Y-9(Z z!HRf%9+Zc#&C`fX&)P(I6)G-(hLcBTXCWxJq>Ke}apN8^tB7*l{=@wxBzHD>9%w?o z!$Jvij(AOFE~{+WBK!V+DBwr2R%aBo$<2Zn@|P^`&o5S7k8Kd~%Zs~E^mB_3Ezf$#S?wt|`n{)Ad@3agdip)(l9bN+;H3of%Ed&Z{WW|kj zwW(KCrR~q2s5HZx{+uUe{zmC)%by@iEf^=&ExFM+*g>6Jcc{}#D*UNzpv&siD=3>q ztnu}&Tl&I%=>=Nv>Ow|87Lf6JTkkRlwBe64>-UCYWr)&zHpABzwY^TMi`G=9u}wfw zooK5Zn5pDP;}m?^8Vafc##Yl4Wr8Z38n7|O64z-YH)DoW-x^|IG3}`MY)u+@Dai*0 zZ7ai`9xFnY`3#L?@v3Yo5!4K)lEP!<5`@o_+%z7tbGx|Z*9S8 z1SH-2$ivYV#*_%5gdC>NMR7A`fIhm3U-T*IcCaANF<}9;B}!L|GOGv-%}Mg9odjsr zO*xKQq!T{$v*YOY+m0kqU`1PEVt}JNtp3bLW%hm~#)l|j{Z?@1O_x~PuW#+8pumeQ zAC0`hGJP0lpUKtft{u=XMKTcA5Zl+VJl-6ZgfflJwON&=kVzj52*9KthkAVqxZJs% zPF+q(;~YF)ql55e)2r#X0pg(JY$d;eov_{~ko6rJc}Z8U2Wsv$r9HSsUN-(ZU{zHL zUVreGzZB4C!9t?MO5}Dww^@->V|wSL_|$JjElGbl1t9fE7$h%1gh{8}z!2;=O}zw@ zi(^*nTF}PXeav=MqfiN7l*0)_kfwNb|AtuC%V+PE%9SX+gNzeX;Sa+^ zeX|Vu6XW*O)D)l-)#rv6AII|2o$dexa5=u%@bwIcjSkzI_;w^08(E)_a5z3F+5D;OyZtU~xkosYKgh?) zKF!bMDF&TfgASI{gLulXoCj0wsw%a7)4UEw6-S^o8%!I%HB377+8r;=O@@XF(*-^a zQ$d|8g(CUj+UjQ13uXOPvW<_zNe{OJ(El^C0&h&Sa~_Oq*X?nJi0-eJTCYuXK3WwM!6~U^6tI>pw<9kg$H!L<~&l0uu{-D>J}+43%)P_A}0bOyHf` z(4IK-q$Dc<%$uZ>TvP47$UTECl3U2QRwNSWli-=S0R8cchKN*&Si70CjdwTJ_akJ= z8$~|y{1=`#m)%Tj6i=um)eZU>(9@r3c(5X6i($=cR9LA0;8U@=z1%L5lzA>lgn{f! z>6tN+Vvgnh3t2f^_StlmbsBbeF4?t|0E(Z5Y~y&LX0K({JpSfm!aSQ7({Gz=>^Bss z?d0YM*@@4fqn&x*lt@$SEkTC_5qAkov3R1Ka{$NvdWCtGsUDmbLk}dt6k7?A`y42U)%4gR-qO5d zU1Ivk7$m>&T#r38`_QhOO?RbsV2V8a|2N;JF6%uK3TaA+X>ISzR=p6HMZfROqU}X9zJ)o0sH%kRS(!f+LBo!Na`b-p*_oqUm%LJ_wOe1+GSjxoH|O zRu-%I?fCqhk3BnsjI!er=+UBkzvptt$U|n|;oVF_1f4CHZIt%1#Ng1%qFBH!&+7uC ze$*9qoIs`Xd0t(xGbmWJT|KllaTLf+ZxFp(%0V*Q0Z!GFE*_86bnL00O#e;6*B{MNPeSf%=b34XaC`+3Eu;Y;$_y zy8kN_A+x%J&*gz}nIw@RRdoHICoN-tFaT@i026LJS>aavQg00eKxh(lmDX+z zxOEzU7)F0-$RO{bu`bY3)$=;+PJh89*uorpi$$6f!s!{}Os|;2z6D8u_Z|gPh*K10 znMdKl*!IxtUAuu{bHNN9Vf&ob?i7K%-PFFf`SUepx!c2ecGIPYTCTMwM9SAcYS$K< zOWjW_25%^7p@pGj5 z9|AKCy^m$lDXLs3GY_U&bmtP+HqZE6CSI{+!mAqy(+|2P=~!Wp;c|v8>xRd18?fYn z;@RY&q12wi&Qoj>mooFe7{YI8IqGRW?|>rDb1H8uC+PSP#+&c+9W2~_>IQ>o&&Ay- z4qN+ow&M6m539CDUp~ROdjGwy_AcO4zqavZ14$cgp@$5KLuU@x+la){BTQn*R$^Nc z2)PjgS_zA^cn0yj+66}7OWsj=V}1IT&=*YJ#y$U2oW11J2Y@6%>8W-jMSJ>>-KfXv z&8lwaJxfab$H~%l$S)1ts!lbqqidUBW5^Ku1^1({SvG?!j~8e68CsZ`6p!$|6@r|- z(=>hYzl>GsokEMKqKZ0#O$A?vIO}N&`8ioFlh%%?9wy9hV^(W#j)5`g&263_wLItE zOamNP{#2A|Q>V zbf+{(cO%l>f;5s+(%lWx-T%4n`@6aK^IOkato>$Rp68r1$IKl0If8t4tx?9jQfo!X zbicwx!9Iv*6uuYW=l1Y!Z03%GW|yp<`q0y_xbll#!g^~>O9ci)D{GCsN2zue$Mu`I zkCglMFrVbXpvxH%2);eEeg1S!v^$~o^z@Xm)A9B5QvU;PalbE}YR1^%;nv#juTt`| zAooki3S~M?S|_^!*5>t`1N*@~)~RMEvW+JSF(pt^a18f2>*5J~5_kF}TT%Fv`lYb~nQdZkxa5Y8|(`8U5S2vS#vF zk_SYjpn)o9bUs6)OaV{Oo zr)4Ypz*@=Ko!`*xJ3uz&`>E@4F zV?i+=5#Q%V!L3!hL(DUFz(GEeBb&hJR%9j=H1`uYV|{nTW}wMh!2*n8Tx7>1z2y&u z5^i1Jodap5|DL%O7zJOzqhhToE06ms097opOucPH`qSxZT5fX!I-gfcsaDe1$#2pFT(Bg947Y&$CFO?5L^#{V6)23wYaFY|) zCY+2byGBKOW9||^{)wNCvf9HgLZ zH5NBxEs0)CeE95eyW;CvZk7KdKjj+jNlS);&|ODk3HitQeLArxh5Gd$-0neXD})mP zuA?DcfNFr;U&o?9y6Q0I33;R0Y!KQGcaTIwqrP~3{#$%ghfz7@#oPsn@72|r1g78m zvB|ses0bt@8r=Z1qrO&?qmOyKIw^6sK{Qaab`~U^zewYh1LPh8tfObk+g$`4U&jJ6 zJRh3)bHsGSH5-3*D7jhQ_9q>$AFqc;nPz+?vCOusuIv{LG+g_NA5i8>x#*uGE89nC z6)E}QNA1_T(HlO8>uOzrh5Nr%j4L$zVG`;enMC;c!%CF95~L~6e4`XVlir>a&cESBAJ?NuHqQyz=}Qck~eZmrq@1A90)>c zH@2UAJ@|a<|-L(57S3v@%g8N0KE~c`? zkwqk!=2eeSTSU)aSp@hdGBTlAQz&2PLS@;spC zPg<=*{AG0@-{Kju#S>IzCZku>42EUMmrZ)WUUkyn|CS<#ApPl`wx57k7#pNpz@ojlh*iW@Y!Yt(>z61X6YO(5Uf?l;&9PR-v zJx}1g-n=ev_k5DIX^l>LL)61=G;_W;8|JD+&2Sp=;52jDMS`GV@RShNcN)hHKaoL< zAC5@Oz?Tn;Q)jlF#SoiKrKX*wm?q0LmG#T}vj`6HiPxurqU&83(d6P^9efZrsj|8+ zNg9npb@!}=M9LA|(FC0pAy$dz-ZiIYo|nNMdO?4kH>^MBM}4bHu<_Efx~!kI+jdwO zHiOqe&pEVMK76uczSG7$ocw{i3tH)0tlcnC*V13}tLn+(QFo}uMmL|_*?>FlvPu3Z z{?5s#PQ-`!wv%TiPm^x`&?qCUo=_+6f|LG*Qw{wBkAK9Q7b?iA`t)j1nsuhoDlO6F zaTFG3zjBH>;vJz5q50GfzUoe7_{l8J)EI<7)mZhF(0XIXywF1!zfs&Cuh4wqq8LQ( zVc(U)Z&)6u7z2)LJug@7k#8%H5!;Eg?$8UKw@Gq=NX?#2asgl)){-4cp&_+o-b#*C zy6Lwbs91tfJIgQED9xDQ^fx&5>m|xMnN737B<}c%2UPr_QgnJo8baBh#}8z`hH0JA z@`tLJ{@l*HXg_kl-p=FkI_Y>Z@`YDmvp5vDH!sd0%G(PLc162PC)#l}?B1b2{eK1g zt--DD)Pu#ADS2`I;cR7|5Y_}uC{wJr6Tglh)tiMG{n$FImz&IeC)S1@mYz+1liNRT)s^;$kkjH- ztaGaO7iXjJv@}csPev5?)?OUzr&TMRp@jYY$vWs)j(amkmjNnt8(v2bI`U1r;fGH1 zKOMe|zB|Ot%W7qqF%<<<14#wGS`C#`m?Ojj)Fp~HBLkQMQtMBSdf5@`PzczqiEs*F ztru7Cy>(5}wUEwEb-2^P)u>9gr0o;qUH7oeb-6qiKZ(iaSwe9G&{u~MPe!sNSWSHD?8Czr=I>lbgm90P@`B;iIEBqLJr{w zkQH<~<8l7kpq*miVh_I#s|k(i9MV`FyaJ$0OkomNF|54WPh@nv6P?%gJVO6427JVz z3=AbT*5$j_mK(VTW&!TAw-uCHey^a$70!}_1%Z4tMm zAOH*RV$sX%4MY}42vfge3l32%rL{nXT<8vK@4dDAnC`xc!iEuW zM01!=ObxpCC%y*go>}hGuPXlqKSgj@I95o-FoZ+_l*G_!Q(OIB z*-UT-R=QsuL!R`E`ARhaK|?%8`v+Kq9txe+l|pNIFsmYPpaOuB&A9(iEJr?#_e+f3 zyF%5YF~N8Os?dyzj#N7`A3qA~>*}5eA?nl*_K#c~2q&VUj(dex@D|_8BtEB7#n#XP z2bqocIeB?~g8*))Rd3ss5z5l6!Q6DJOs7$<%WClL0K5(jok%zlCQN*D^906vd!S;O8H6D?*`8addbH%tBaQ8}`!MzyIl&{9$8pKvN> z54X*mnFXheC)(=&-VdsE ziGaYo{y~|F{?C}6Ag*~oAwNL(k%U(6|M$=56&R6$tF_(!zGOT-tXf?7C?8W2F;NN{yI3CTPs2HwJdP-E?HA**9_rfx$5faB4h^si@; zf@k&q*f>r6=f?o8rdq5qe3YElp!;D8M6e7BtX_;d^(z}gVg z5Wjw;)kKcLed%aOvBKbSVJ&d^guh*;O~!9^#6}M z2g=0%rk5WO&=_7QH&SMR!@W^(09OEzz_}#U6EB@mcC>g;- zA~sapKVn}Ybcn^8RMAOEv8Df7c?1kvMd56TFa*?|Z;ykOg1A7ZA-yIy@9`DA!b?L! z;^zkn&lR>Q{@L5Y?1)&5w6^2#Fk+kel24YW9)C_5DGZgeKl&rs|4cayM&x*r`2AHN z5u;#fbO1VRFhlpBSyy? zu3a|MS}%&B6ma2me*aDTN6G=Y2u#U{f8OllHzLaD&hthIxM7U({ELXl2E9r8m95Di ze-{ijBUVc21RgO)Qmn9mI}-K6M)0u$sp0u~d!atQ$x6om9IP|y+d5se%< z|HJ4>OF)mHX=TIB!T)CvVBj55cEbNGq0bt0HC!f=)Bm>`v_6od(sv9|#C#ZID*r$8 z5Q2?ae`_lNX5-7hXG07;l)Qxcf2WL;46Mk?{Jc}B#bSCa(boS^MR17O*+E*4ARP^X zd{go9i+iE;x#MCEfk%#?lp=V7_U;6^*F#!n`UunGcoWI2)f#taf0Xs73w)qfNN1Ew zW)Du6a-cz%E@vj$YMTOcwrcbzmGL0x82{w)Ts(lc~Tzo`cSN>TWoqiH>q=Q zFGnIk_eneb2>j!q@t_D>3EY6uM|pw3l!0HlP<1*|Mry%@mwcdkR;XHGe|=bKWNW)9 z$OA(5MA}W7$JyCZ=~?3IV{N|Cd|sY(o|D?jbrNBj<`K8&DMVLi>O9R3MX5Zls>z)Ss6nD`#Q4dgta*R6ywthM?)WrZGz zmVbl|`{Muaj?<5Cy>~e=d`RU{{;AHwQ{na_b>@eHJgES^r012mJTW)KL_PzD$4&z7 zVIWS{cD>i&mP?_67!MxypIiX_Xoio1JQW@dSovD-W#$|but3CaTEL<3sEUn7fna4} zd$n8qcr8k+-mb4#yWz#rfvWMwgXhQc%$IHI`K}J%%P^QUI~Zx?!Knv<2cutqLc;PW zB@h@Bpk6u$J|2?)(U$-XZ_(>P8=O2;>wn0iTJA&aBDg)ks->cPOZ~805#tHF5ZRym zQk|GLgyiTV8~ciU2WcXo%tbanmR~#sFN=6){dbDGJgFcf4PaP_3f$IX2vLx` z04nxeX9)fSlWIfln!qNJXn$XvPz%{D@rRa$px+D&#YtE{SCDur-Ro0%-8I6CHT1+z z)?1cfMLYV`f7;7yt=0gUtblG9#**io;0BL`z^Rq#FoDdU1fW9q045DZX9#u-C>WHU z$>fIxI#DYwtlRnDD8QZ3sr|gj3=*s`4cQ6-{HfYvS{Vt8cBv1~)BerTAX>=Ze=#rh z{aY8k04XJ|W{F=4@)d3{m6xg>z_Zv)2Jx~|$Xe5C`iBH#L}e3x*Oz?gTXz0u|DJZN*xdW^Y@0ij%aWcBgzr8%xt|#G zoc=1mg3GXW(48!0BiNNouc)_5Nze+=9_HY8nfwh z{oPCvTV6Jm!TL=Sx{&Hd=GxB_j6BCT2NEd!LH1I=OlNup+zc;b>6Gi_iKR`)dP*4n z<8GaRe##m4$}RL$f?7kc8M0kYHsaa#NAn{1&!-G**5oYY&cGMiULNYvX;$a20pp;S z8rNqUlGR^bVbBeLE(`cRN)$qHGQn7H|Yak15+~Eh#|-oTpG=5%BYe*&u~i@ zK@A7)`}2RHDu*HE27x`fa>IB@P*kR0@RO{+jtVK%lIo=|vTOU_BDWQSePvbWAAH$SPlne<0KUGy-yT|H}0&^aEP3?}9?Ty#6Y zuQ^lXD%1Eb5q)rb5@sF&P9nEOH-+D*1ZxE>oZ!3)(t|bWY+(9O+w32ymf!R;aL=YIls$sk5PhlytSb5#h8TvGBI!mP4SZGny)qpueQEN7%sQ_f58gQ1<_@N z(uRFrfUyb161l$QE}ztKliL_fGw;W;>sqYHUApNe^}-WsS&Nok1L<{Dr_brgKzMM~B zaI&hm9-$o4egiyJhQW}qO_#k{+2x|L_)&PY-?e!e@krk#-Sz`fz}xg=Ti+j{5@zN) z0~q=xUK(Q_pgc!n!lEOmdtRG<4~?dhB_Ld{et7BaJOb!=Pf!s|rwbGU1>cfA&w}ZA z%5n|@8&Y}lX|QsOE}O)nb&1?I`jPn;V0C2*&{gu?@IP99zgEnXC%#(pMOy<|FCugZ zeD+(AUx8?7HReNmz^6?4^!bZOkdVixqf%?}-eo;bJsZGR(r?1mL=h^{6W-nc=R=Ne z+ZS7q`ttnNPxov+;iIPfu)+~D&V)0FAAb2X1`L%3=3WpkCj%G)e50oxaD}mip zL>s~y(q>U3fczfDdO7^j)zy_Y!Ne4J`y?-&U;m><0s|J$Cyh^>)9P!5v<)vfJEqC3 zH!f|aIraTguQy5Q&Uz@6dql5g5iF@Rs}Tx$64ELL%+b$&)SD>K-E?49-Yq_G!f_+m ze482`FEW~3>$PpGvyq@z2y!vHzCZ&cL~t7ctIDL4FO^kt4L%DWeOmX8Jpc z1=|jMD@30n=XNgO*P87d>xE=DZ&6d~(tIA*pgntm5e|8%eE0ULe*-r_)%M zz+5%|Tp!*KdkTeX4g%a3ukJr$N1z9vf^2?cw-mrb{{7YxRqbN5| z=m|i1%}FN@2<8wzILGWfWWO?2j~fCdCmjowA6Ckh9&U85^A0-64YfSYHs38s*(B({ zlCsnzW3$d2d@L6Z;j;ScS~8>C*XpB8%_N8Va9xx-S0g+4ZiL2v>?I*k@;gw0#Zys* zlZuGw2PLYywMt>W2gnU!U*JP}zhAc+9;puSWDqMCxgS3adoiTt1_762{0lE;sr(dx|yGY?0{dA zZ6XgibtJNJ$fa69{82dNuY^oMCrpYw$m$ly=d#jT`hiZT7r}oV1e`J5mtNU)cizeE%rV=M-5XsE=hejR6$O66dbCf&Jxi%a#FNN&r+ z{cK$^AQ*#gH$jRG2xdU&|6joc-~YN6l8TCG7}0vvav$9y8OAEcDmLO$!xSN8HP*=> zWSc!HOEgQoSS?)}L1-d2uuiWC`QVX0_e0^H)NK=nT0E-d;lMaP)fZ0u%S(O=TUxv} zxLnc@efd0E@~j|zLG-Fjecq>8POdZ|y6vf$_-$gZBH0)jQ+_>M#eO&`+uKXpPTHPM zn=Uyx`CC=4qP@JglfLh|>(OG~6&}Y4@`+rKacGhMs44w53@fy8ixh7qM;C%)bioy} zupGegffH?wc4|`q*H=pHh+H?=?JR&pY0+WwhZ5DQ@kpY6BiWpPyuy0<4G&nVr$)U` zCFI{6&nrcs+MWO>n_D5<%$r<{zd-KeY3PrsQp~amt?EPq&`3!vG4&!w#c&N#%s5Qa zTju7pwr78Q7A9VYZ^Nind5!bxT`X;H&yhstp!;eV*H{Y~?MIh&?8*3B$^(pLqVMOD zr`H;jMoKqmQR*pnY3<3WsF7K3gVc~W)907v2pVvV zusATG-r|rpbnc@L%<-rE(#02+^O_yL7j9or+8n#dbzi_>&dYg#Y&NS9DCw z_xrCNL}qBjjCkL#AsVezleoyc{CQ}=uSr*Md~)*40o^QVV<3fLWWDiZPzA@?sx7g{ z;~ZQZzrmdY$~$UCn%tbk2Qw{LXPrpAx{00O?!`Q5rI3TD!f24k$3&aiH#HKNKH!w| zXJgDznmUl5Xh%-;e`rUGE1)_sTM4PM)ci-tT7fVfzJ$4pEU=}}vi9i-!4{mb zDyjrhSV-tYOzIBt?~A9)DoWDxU8^Cj|LDcUQ)Mi@9Zn1!aO5bc0;oU5-q6MA?lEgd!HdZy~-*V%h)dls`$nIol{ zykp>EQxR)1eXLH}*OznUvMK%40N0KEuC^O0el`+dQ`4|ls>f&yw4jxbX+T#*Z(=60hemLTi&)K>BkG( zFvnCq`k>U%hN%Nn*vMyEmL_?-x>{bhM}h9A!;*1@;&f{g{7Uh!nKY|uw+taPm^*}A ztfN{%#!95^?M%3Mehid&&bvYEWwC;&$NNA#C>i-EB^8wYW6^VVv;$R~T}i~nrM|Ll z6bbkKxy?+4fuzFO&HEl{EMX)OB5{IYO!Z}}lXm+ocQQJBHqqavJQYq4$3cZQWbQPlC z6XN(ty)0jpjv_bin|uxqU3J32EMi~rr3VV{ckA&5DmlgZQZo-H8A_rt52Ff|cLQ7? zvVGIAA06A@pTDE&b}=8V*3w*Ezx-SzIN|2*BD%%;8&;_XS)@}EF|X8Yij^{DTDUf_ z1t-*Xl+;oNhIk}*NUgoE^9@%e-Z3}5W0-8t7aU3?>)f4?9#v6@>r$GM7w3@r?=8Rs z8{Zg8RHf+*R~Q$^pdLVkzf*2Hn)^LAFgL+pbAML zlLsprR0c;Y0OobcON`v{tPd^v_2C28ki=6Li8L30gDZ%T;}`+*TgHg?OPqh`wP8JV zr<}iTo#6P`CVEz+v7dCLLgFhj;VB!mH3Dno6P?zYeTkKnZvWpe6Sg)J<+Ox&@P(K4)oIGw z{wdG}gfoZ>y1-oR(n><~q&)>$S>$KFcWc0wE&a!zq|it; zPV?A>T!RyDf2RGm%U`4+YzMJ!ZdP|jCe1e!WT*9Rp#>c@Wnb6k0+j6=AS`k zAuQ}k8@adme2x}W$I&&DAe5zGYi3xSS%PS4%m0` zxU5oBf%#TQDAL@Fx(+lWwD%%u!B^0Dtcz$D+)0H`6WgkDrvCz^g z?im{!7v5?w!xHkxM0TKJ^$jE=VD7%JGJe+vQOziy5*0C;iPmTO2`ot$0;KrL+-HoN z@hC?d4tj+(bM#^ByqvoR#1SZ0aD_t00WWh2=rE!=NH(XmL~}l)_kqmH6F&zz?DpdX z^MEDK{C7=rm^5<9+4qA2&-BNbQZHb+iT0xaY_EL95#8@o9A&p1%4lv%+Gq0g)yhg% zvOEei8G#2pc*l}8! zD0Ew5%r4=kHZYNVWY&nUXyTb<^Y5nzp@;qgIxPUqPoIvdg#0yg=)|tnTM_Hs84;gR z{abye7V4E_+j1>aJCsUiFZGOMP|2nVR&#V0bmshlPI&l(tX@(L_R|jLrnT##cw?OoIw3D{A6M*+zJbV!13V ziXQyDhk-11%Pf9YU-7Ajy8|yDq0W>>wn^i+<1e$|Cq8S?(hT9Xz2WhTK#ipU1^LLg zi$Fm0rXp?hMZ&iv zQB*>4qI!tj*_BxDbq_^^*b9a1$=CJM_Je$bz3)xZa%K$oAqeE<3jV4`7)RXQN68U7 zU6_{mLE0PGKCJID)c^WA>28#n1)_p5NKfI33VzuO^~3tKnN-Qp`V`O7G$Tt;P@7QgWh0}c2>d`Gz{ zs0v$%29wv~$o=_nXZQUYQxc!=)cXK5J61Tp9A5h^bVVu}(Ttd;h;u%=>7mTMmJ9@q z<3R;>OKlvVWD(uISh`B{0P@c7E;q-ACO7&{uls>0@(;6wCsBVxNIyo^#Jz_rdU7LxwDPBn`QP_ z{;gb6^G?3A(h0RIq;KN>7$mva_jHsFhx@ZA{Rlk!v|EJIowSyACtrwSG8RsT14kd_ zw8o+FlRbXtZw0VUggM9~e|(CvxEu1yX5>epSKmm$RYFhP?W6LaN;4mY{t)` z`%@Cu_MUXL*Cnx)2ISh*aD}p>@=1^C;AkmEzxl1k^=+-&L2C~pF_l@cu>MD}exAIT z_giSn1?052xhLN1G?Pe3a(@k8s&P|9hlyJt=zaHL^D>hQk9yEuU&RO0GDGz7$F_WJ@eH>=3JzK24LGwWFj5V$ zJ?GWzaeSpZ{wP1Ev5yD#E|;E~aypl@pGH1Vt?0$Z70kKP;p6*=R_rrsHfhd85aQZ6 z5ItFzN^i!opbml{sTV6cV=;eZHUhTdMESQJN#i{E==z)e5yMt*1hXn}Ca{m{D0F)p z>O@nwP$Kz4!eXa{lCOrpoPGHG)Ba{odckGjYP~-XTCJ7OZ)fG>pIj>{Yj`~9K50bM zHsSPkB+ynKm)fo^eNF>id@vvNIGpnUn1S>%GF2=HZ%XS{kmBrE-cMF#HR<%;aO<_h z=?T1*bcmMm)af49S0wJ%n=~u5tw<9$)tsj^b@)0y9pJ*EtF4sBU>h%*=nZOaa76rh zo^v|u&W7COxp=MJJc~1`s{@S-{K6=B%iZo;02EGGXVAL4d-Wv0w;?axRYsows6&Qi zpeJ22oCrU{bA}iCk?084cC1ZJ(?{`ojIETI&eSfrW|Qh!UxVk}bjw<>`a3E-PlvZf zB3Or+wyQ}ksXh>IAcx(_%hXnx-0X6}#`QGFGQR|2vzC*uva`89@5_F!ZMn+*n=JH^;9{31 z?5q2WBFTvO=fG9VmLM+3f-*W8>v~36qC~;cnPb?8GyTdmI@W47_jlez8A}1kj~K5h z!YbAv3%fp3IsaxU8F8+f!DI-OHdRcZO%+jRVpid+@@i;QMva(@y;(4I>$P?Oj=~F{ zM$Onx{peU6D;uzP;HA36s1JJT*$ukBT8ow)yU-q^@<|+X@_hfbjjq;$q~^sK-&amvK>4Qv)JEF*awF-%B#IlJfWMmvp+Ciu8U3 z467xnb|>uywe3H9fq+fu*a(MY9NXWEXF=UYuTeJcI`BU5l>{lK%$ZhNqL|piGRJ^C zBJ9S|7i8v0XrKdVe8u+xAL*M5IPZmEHJ+8{e-(Na>?CI{8X6(&o(M64Q8oxtBC~NdYjVO8_R4Oa6DkisjC$#7cvRqM+U=G^N2fD0u{5bhB^q0Jfi^EJw2@yjywp0VR;%^E z5>g_7C6tZCRAQTd#kDZv3+lt@{9&(tN?i%NMnD6SitgJ%evJmi1G4h$IHqLl_J@Kb z7JE!;$Ke;#AYcSP^QKcYT!6UkcuG9^1ocZ_2TEXub=wU`serNb`|^l@Mqk}uM|3l~ zD99sm(8_@75VM%;mfK_5xby;Me`zA&IgzDKzT4*T)%iuK-Oc26*DvKGJ~=FF&MBOh z zC=r)3C*tr=gZ-eLx@Ba7vLl7fWNcMho?h((JtX!vrSo>8mvw`tY!M5Mg#W~_0`txu3&RBFN{fDV7^#J4Cv@n z3gy%zrRpNa-H9@vCj;*hjY+DFL{6YL_@B(@Aq-|qvZJw8C&w|#$vYqJ#+6g1 z4}q{Qz_iOYHG!m(T)7!r~)7wPV7s{S(`b@1cH;A1d_y zXHM~%HE39Se=53cQB(}vEFht_;o*_FD~L1Etiwh*54L_Wk))FQR!JN2hg#{4`N~>j%j8%hIr+gB zWdp_K@p!Peq(7S5$PcFbW zJ3XQ4cWj>en1#$6)4-I=^GqdKxaQV4;X7zl3D3_ZZX4$~|1<-PuCQZr>M0S75Pq3c z9%%E8!nI5>R!T_+!Pb!j#oyghLkBkpEoJVDh&`nsS}3V09v0uS96T2}f4)OyVqf#Vc7^2`2VPIHBfD|137n!O-+ zOVtAuB{D?$j` zNLM!Un>zI|iJ;9MuY^V~6vnMR+*YvzUQy*whtO=q4--(^1{Z29<5o&5u^jBW?kz*B zHvuB9M5ZNAA%i)4YWOAEarjTU5sE}#1rZUSzcrIOKF!>{FL6pi=pX-*ND>mn#Fi_c zrfHfAmOjX|>cfUog12fEJaW*Aa zDZfXR@owt3S)Dge1X)@saJtuB^c?JRlMsiW5>ddmQ^(9%8Cw&JNPT-zYQDU#v(ztp z1xk6p+P%O96Ve9il&43lwDD$g1>HPg6GJ*<}KRzMPk$rX$4!zE& zO`r*_W_d7%>a1|TUy(%DT1>ALG^{m!St{vkJIj4nww&EMJlK1C>webReTC0$oyjX1 zCCKf{Bl*|Lpc!K@(8%#^BhWqRyNQlBL4zLW0@hNa)ZlE>eg%g)^RgytbX~X$-hG!| z4BFM64|sBiOL8M8z$+{6UN`Hc+z~hRlo7V~tFTq6)C*3|UVi&or%+*5vT{({P07Lc zA!M>Nt`>XoZ15hJ@dNd%8+Nr2Xf=f&U9C6wyZZNU|0_jzAa*uV_OV5HmNCF!FE}L}znHiJHJHI0^*Ycj zO3Tlxqqp!~5!Bm$yx?~5Ec|F?KL9(c7D@cB_exGyR(2+IFx6{3=lbY|_nh$d)8K1V z=VI;Twp#*LL#aReuRr(m>@rF*%f-B<7M}>FN#DA#URC>;?*>!qgeGtrsp&fklOQS+ zz7qugWeU6oS4U@`%K$~{isxr~X0rA(O}qTpNs~)*T=S~idrYFqW4=>DN{HaT`3T>dZFgkY@@m^~V)jXOQlSJw$C# z>ed@g^##{C+vmDKAww{VM9us+i}p`dXvl;k-5O4z+AbQN#%>bdFWPi<2{)0y|0MEF zBA2=axjgT7wlEt%yC9$srdzPz?xcJWutIz0Nq+!Yn#LDX-Au?MO+5NDh+BYKx(E7s zL3>+{BctIwN5*9?Hq0@t?L*ec4`dL!{P4iHib24o4!OS+9nq|gtd?t9DTo;>xuw+N=^;|?V>VdtLYk`7>o-%5 zLe|%1#bzIpXV}#IcEuZIw;zzgNhfU@ukrSHIBia6epC(eGOP(a$4>6G^izK6W?9AC zmvjLu#o|Xg_mON}E?V~EcP+1|?3!Rl#^F?hyg#8gLskuY)E0)i6y8atk`eHN0RvCJ zwI_{MtHn4C60^9we2)*Mz9cs}((DF(Z1Nlpww~FQrRNM+?{&YHH%=$gVKlb0S=#Q$ z{h+26UkL*o;3ckyN2@EeOiU;mDLXcY8gFd(>Uz9#w(t8)zl;)PcBR4MFAIw(5&x zmsjSfLN}PD%ogPREYF+SQ_-rHcM#5aCh&kaL`|Hz`BL8qJLPQ~HF7im^ryC8^&f-< ztH^#`Rc;vNX|w9--I%@1=Y(_I!?-%?HZvdORONfgeKW{s^e~u~a{vS1BQoD0`yFy| zIKy*&8BTxE$3pdxmqGb?m^>$&L9<3?M#m{FoA21wAR>v)JU*MtoLWvnphh`w0yrG9 zIn1do>5IGOU9~E0z8BvC-ZA+SQOas9xDV?;dQjnmZA18Lu)o;kB$h6k%d6SmeeI?6 zQC+CVEa+Ubu+kYlo)cOP@*T>B--xkx8Jncjfw&jD#ns&B=BSdgW_r4Uf)SgQj`DTO z{{(FZ5mY0ciVe!2^xyC|Tv*e+vx-rS#4nLLWR8>aJ1R&-{ru;YiB@Hbf8Tv` z(j3z>+UYdo?DZJAg*ysv_LOzu0TAg=i}oH!j8eb;dLH-Eq&IAsuu3^PlL>H8 zC}|jQ2~Xub6)}1Ko7oco(|sDTcu&mPoRmGr+9WM!y4xp33+UhBN3rRp7;j%1D1&%0I6oci?E_3G-&7qNu-YNksdD+Zc7I zMya-iB*+WGma?_d?~?8QJ6&b2^?rNOrTPrGEU|qW`JICzwaK}f6jJc*6!a~o{j z#UPUx$)2_Q-BYw!HS8;=rdGe`(&9|vJcp;6a$_Y#cz%~R?nrAL~lP0&GxFHozh&J2@MQaXD&D|sQz?rO?li|~|N<%?G zL3T|fO{3UYF?|Y}jO5$mb$}e|MJ7_OTN~_#O6N!?d+&kopchJ!^8L1M6GH0ykFSVR zATt`P`N#9s#$n}-8U2@!$B4hdlEJ2Y6}sN~o=Hiw8wEALG70it{VMUCc)}gf z2?#kDUlCytVWvCDJ@`i&oVP@@giXzgWt66_ER(ZJyv-~BDI5OVd zWH%pZ5#O#8{;Z{q=Azlc=B5aT<&b*b%10CI`~CEg56?knY5ZfC&iR8kQG+(A(No%G zn}(0G9Ggl3Cv@Ek(P@6i$JXH_;g=fYf7tgNt#nV?yp6PXO0Vtp_ZQgU==l2 z))KG1=uYu|{^y-hk!eRQKv)u!dQbu%lNx2_hqcC&aOGx)Lg#MV`03;0KIOxcq2D8w z!%C7a1gvTq=+35q25p6Hg*5n7X>0i6ctzjTW8OiTrG0Krw|sdyFlXB|kIv{D3dn!S z3_vemOEXz|KkMDGA`p+VHTY@%XN~e&1%5(?Vc)EO7(Ss#uVvSU4r71yI+?i=$naHk zS-tehUUTA?#u*bU4jc}z4Yb=Zl$3hk&9)B7<|ZL0DPD@784qsu1-r{xMU zj-~HJ3{ge#gNpvMRgQhx*wj#J&{I#b(-M7D$%+t3;l zHrg(2N+7<;@E26BFO7RTtp z>Fs@%;h*&2FFxIul!;(1al@!x+Mdw+OE-i^%$5)3hv+{U4FS z#L=I);QjuAb2j&sbT{Run);l`Bzfkkf-E^qqOoaP;sTVCg zA@ZbSmN1=VLKn?@Y2Ya(pM!>$(@BbPg}{P`VFDIO`(Nsnu6#R>`sMApDCLA+zCjd9 zG#_{#&@n)OMsy(0`b21KGk1?!YyqU|Ylp6i_xozIqd8Ta4)IS6q{q*bdI~D0S+^s? z5MnrLACV^%x5?u;CIlck?pEHsnr1SLJ|mj;oWy`TJ`u-ZDW&qQ_?2%8=eV)n8)&!r zTM}Eyzq8YaS!9mbsfTQ5^U@bh{-=M}P8a7xAgST(={9}0b{j~%Oi}ty@1QrkTP7w;VrJyODs1+5*IOE!Q1j|;OvANN|0&A6OOG>$=}GxM!+#5Gdxl5 z^jZB~nicovyc4#|r16?m@6KF35+&^^gs7lueySpR<8=b|>eOvJa&*%{^T=_uY~ff+ z3kYZcwsg+miGISiV`**UW!BnCtL<({2tp0i@iz7ZYFs1PkLZ-s_C%xrbwl7BA<}ty zAtP%?m{_1E38^C~p(MOrKQK)fp*-C$G=yDs4NwzRPuhV^UQKa}pn7HY`kb z_htHoI{MjXtsXvAY>lRE1ml$8F@nA(z`lAHWw_!Jc2R?haYL>?W&`>ZF!XXSU+BBA zpZqDJOi06sWRNG_`80F7n)ne^M?l1n@|>vK93~=GeuqjHlium$3ez%3^mL^QxIMp4 z{Tgi~WIf^_T=&KpLtWb;4zgLg&C_dzYC&2zIDPhnZWI(emH%qqtDQC687n9xnjq{Z zvU@va@CMs^?J4Q;lgg9u`c7v2X6FLFc=M=vZTSyKI<2YaWfd=;eEAr&JuFo_O@)xe z=W%U64b0%RbhrJeLFQ1qpAocl6)oOdk~$=X7TK%EE3#o`u?Kz{gRd$XD7L&i7tn;p0`#U|Q{uszb6Q@9cBF7AG;37nD<3$19;FsuyYqOs!z|tm9!@*E zn5V(tn=RtE1#T|o9T#V2+g+Zu`3;kdcV>K+CWuNu)^ifDt|SHPPB|^Jua?@BS-$R# z6)Js?ZU6jZ_A?q%^zX9Cx(Y*(T*5ARt}&!*mGN5C(fzif8r+}-Uac^3;e9E5>dKW> zzh=w=+`GV~{*G%QAEABXSD@Q|0@EiT1(!h}DNg@6=CM4cOQh?dF4Nnhqg!{b(E3$r zHJ|$Bq|yVB>HXz*!Oy+r=og%`^8~`)ax29jzrCP>Pe5hVvB|4cx0{j}hPDI%tN zM?2PxGp}r`r-!n+W_0)SCweflOCX%9*J{puSmO=wj>JXK+qA;K(<&&P$o_w9ePvix zTidQ6NJ*DSExJKky1To(L_oT`1nKTB>FyK(X;8XBQd(M&IAiVozTf%Ib)6r3|5z8e zCUcHC#`E0I9n+M1v|F8Tevg0SjSax)V3sbpk`5?ZXLP91t`i+*KAf*%0YIphP^rVe;>_OvZzg>XAoWR` z6L&s?8V&q#YZ-A=bi%-z>TL0{4ySA{@p9}B$COgDkPBkHs zf{CSFy6kd{c62gz=IH1K&H z4F8;@gSPV$mIY;qF(=cn*iuxZii3^SJfHXqO*18qoH7X&NL-PCOW{ObC|s#7LpTxjT5J@iCymfT|R9-wsy1hGmb(`;mQnGGhu} zK+Qvo{;ciZjL3V?W>zNML{EN1BQD>?J}3rBd@Tt|%;g*k&wf~k8&+|la3OXRK@6nt zqP#}#H$M)2{7x;;SnjPTI+1rLc8f_5(IeYl(EsGS4sI`vK##l2bID^jyu z{`@?kDa=UBrNJtP_Ka(`FyhN8Y3V?(ytSF?&}&kD-4-AuX!siI#{~ylQS(4VVG3*Jo(WrwrgZ$xg>Bn zE%qZiwg3X%>|3@DmrteF1waeX_Z(e1Q6w15nI&h!nQXxx1SbV$MrIOg^PU^t1Bk3_ ze8+O*F$g&Hn87kC@t=vWOk`IrK%r;@#9C&zDH$607om~Hv@NYm zoL+nx8Iq6>+r&m6b&oeGPv@!)7{GZ@&EYmJhhy_rjbKx9qVCUMAbBb0g}{=nxxC%^ zeNPc={XVxI{A^+y}oOfBpjF~J-< zCWPGwyw+r3U8-LGTH^+7(>AX7@4`_~~WuJ3iD|?8d z6;{u{EWV@@d27*fKc3z(&Lbt05CzMq{+;9)YitLQKk1cMvijoc7e0qZXzx0x~ z^)p!04r)M^-JGx)wdxsHJ+TsN>3cLoan@8ue#u9Zofxpa*4GqOj=iaT6HAm4b=r@u zq`viLHm5TwPAV>M1+WAOE*sNs?2tu@L|+V2NWUNdpi1K9cehWY156W@Ln$*kYYCg7 zRRo+=9+s#3)hsy*zHTccEN9S{RN&Q2S~vtwJY1c=s;Ij4TS z|B%$_#5cs`4k$2-Wolzp_L*D?JB_37<`t1sSyJcb4GHTf6ejs8s0g`bbuV`5KO3H9 za7MI>)umv4(=qGo|D^h7*?#I|H&es=M}NuZ9MzPZGE_p@t*uxsapSp@4hD(!lD(Y7 zDJH$5Sq&{2O&n*lagbliJG}3^B8)o@nkdWta*cYsJ>U$?&PgTlVC-4`czn&hbsFfj zyFRwXPn3&g9cJel89)fFkp6aVbbY$f^MhQDZ9FI@dK8tHj@E&~rnXwZR?)&Yijzl( zyL8TC<#>S!))fY3=6kC8w!`(ZV+Heu)!-{aSVxLe`Ky<0)$&wmlvALh)-T%w)=nAZ z1HZ3Bi;U0o5%7`d->{E~H!xlo+LCMYTjkF)WgRf8ivo`ae7qCa-xd>n=m$JQ5JbLk z!mf?+J#8*Y@?!CUl0mgq}LloD<4oN0KZo(RIwoWv7;b7;Rsl1p-@{p*s3mu}dZ zYL=ep6}WI4y5n|y@Msb}>N?WY0|v9gJ!Ixt^FR(5Y_1Y&3b%|=Ey7;wJ1-iao(!N)>Oz)NV4Cx8T@Wq1hnOw;L#WX0wFAH|<0PK9``+ zDV*7~9Wjz?3=Gx93%|lOz2~#lG5etYb+MTF;)4FkZC_3K=<8*VY_3@JY}WEm2PC$+ zXED?tC+a8KiG9mC=H6KFxbMPxZ!Ed=t%&97?*-&O)VxAJPD)A{p+UhL`n-Lf&aKpZ zw1C;lLh8E_IQ8^htk}QCa7vWn1<^wdF1Mo@hMtkH#?ZxFZ{J6e8nbn1vA)>db@Wwa zBQLn<)7GujeDDGKRaQ}nY_J%bN|C`FiFZg-I-_BSW^?Cx1kS^CW@GC%mbLQFgo4AJ zi>PKhjV{B2jZ`D^zsvRv^{h>X0=@Y~ou$%OG=&-!rMRCk(o2!~;2d$!rdke|8~(Jc zZzpAbC_k}eXc~Ozr$nB8BHlJB?vW57X(uLaRE#}FsA3P=vcdU7OStDHGc+`UWxc2> zjnwkz#qU~N*R$`^V2*_-Nid4bB<*oJr13}&y_lAaLpU-H!GxpVR;|@HuDfPf z+5!S1Z;9%R-u0z=2aL~ehWQ?&nc&|mS;euuq*X10m#E;jQnAfn1@HSDd`5Z*eY(AL8B-7C0wq~4l7 z^;f2Z!}x^+)Si3%DNZF$u}?-WKP2c+i>Ob2e+`S;S^6$QB3LyO91u)2+sh-?^>HLF z)l!-{$sUd}mta>CR;Z*fee})L-DL^Fi3X|9??B!xf9EGB6z7}^o_Qg+K2BWibkX;j zgggaxs8wZ;e`l*7KU`D@>cs*nkuj#U3mn?7B@JpcQfBXO!z%eD-+DYpa2Br~Zl>X#C2r(O)U=H>|MqBvnn9$aMTDq$>3F*3z!K8GG8n zFRm9O_*}@yq7>cmZkC=-yIKCg^_U*@>G7r(`HS&h3@?{r7Hu}QQgzuhp<@RycT4>H zbU~U92Ar6rHt7GVhk7xj<>b_2s4CNicNhQt=wCrm8)i76r`3`!Y`kFz4gM%lM^xm! zfO&5Hk!?&}A5IN3pE3|axnR(T_WE-Mx+yYztTp!M3jq=81S@$2x97BiS>iDCe4odQ zQ}5k(pOG;PcS(uM-VgA04Hdp{6fq|F)rcSw=mJ&Q;N4^&JC4@rf}$^Q3Sq#7ShVlvUQ%a!&zLx!f! zc6{E?8`S-m3t(k#u3nCfrG{NJW)(9p;>XKvnrwqtVHiHo(*36(f_N9m>)y$h>N&R&G?4*iuD2+uqO8OT4}?gQ_e5fZzsKJLBWh zAFeC4W(U-I=YD@gbRu)h#^#D4}Ft=Ck}c6<)#FjY031u9SmYM?_c-y5pP{Bn4ePIaT0mRj*iTne(4zo?n|y1(~K#KyBbCXrtP)%r6n2h)XM_V!J&8xjWssg#gp#8Ctuc8 zN?OatAg~a)tiY!2Qk6n}6T1Y=-XqMq!9L5Nvlf>lud5m9I8jS9He1*%^I*Z31utEb zG=IIV=tIVGdgf}*{~Q*XoIc?dNDCo98{)%{b)Zk^Y=pUGwOeYG^_*7r65+t6_C%-? zKc6VYPE5Z2cm)v&zGO^!%LXMp@x9n)z zHEkh3g?$gjEnTr=z+vwYbW^&?AXYzhPX-d*n17`Q4iZr4<55Iz~g^s0so6!I~ z`h`61CF1@5`AXc&2ekP?zz|DedU|Xr;S}}=pB{>Q*a8Hkp4U~CzU_!a0Cr%01)%=hO-|d-ON7rr&G(oemc(TF zkR#BCS~#`GP}8le3uIy(zh)~&9BE-yu()`4Us-#N&|%IVYI=a7azsC+R4)67oUHjy z)4VJI@U-48frYOLH&UBX1;L&@jmUM9mVQXzvKNUxtH?RBrL$QV{LV7;UDme}B7L>~ z)t5w$r|00hf~2MHj|O)ciNmK&sd?)OZ$<~t8#C0wkN@n}|88o-k-{rOd{&AqwyuP;nk|ZIX6}6jvxStr6W6 z+w7XarVAbguT&^szI`V}j~>+;_?p2OdGxqTiG=6)UM~h-uh=4vPYAnppM4{Qy*(&l z-tv(LavWyCpFl9Je0yDhdj1eaz%F^4`In$~h;Im!J1Q;MIGi5YRGLAkuDSm_K8@eA ze#L%!=X={_)`O&DhEaYK-{tiCdPdQfe$tH_XF2TgDAR zg^>EksHSXjzMNBe-zqBIbz6TdbQl4xAlwPGo1eDilKNP=?nv})Xg90)th%ME(02L#XcRf)HzOaF2(6lwYxniz@|DQOx-2kG z0~&$ktIyMGiR}<9JwwOPnHmSx@J7FX^&eP)-=ZZpJA1iDa%S86w6(s#hmY?-tDNMs zx;fV|*1^(TW`p*b_OoReD0latvonvG9E1CO!a8~n4xd3*f$KNHdvXPbvnPk^cfIG! zZFCm3bA5t}gEc*mbH9$<*giqVU(gV(g@Rxji1ZV<5fzq_ClL-; zp0_LNE-EDYWLgpad(tyfy>RAcfX)A<_S+M)^E*qg6A^rba^e}kR9RSnjl z%tS=wMv*s6copeMy75|sxI~6CrZ0m%En#Yuf6fAeNXG8^<`lBIV#~kE=%;ew)Wdp8 z2NeUS^o?a!b$Sj@ zxh_xY#(Y^C&)kWWh}^ya|3!wPP;mreNiVDwsa9((0?RK}XG1PHv-@^ni^gwAHP1sTXK>ne0Ghh~^#}EMS zhMibD(!rcLSmq>SQ>{hSf>o&z6wqEWPaFq+6JBXO? z2ZZ!)#t!%nk+QqvSk#vDU49pvE#N8&K$8xjQ!9}MM#aI`xNPtVv0oIGDvba51M*+J z>df(N*;2lrljOd>;eBprKuJPmmPhIFTP;f-fmh)aBf@?elvrCIi`Xuz2b`>N@bF~a zrMh2EP#p_DrtLk44Tc+w(!fM)Pjd{v$pu#eI$@+UmSFp`q@mJy6g*ZGt6R zvPw0FXB=ZtiH2yCWENKtCq&B^9BQp4Loj0&R~3@V9{|>1xVkhcEV<+3{q4beJ|jb2 zGbIAy?IGkC3Z{Sgo<*6S9y`gSh_-nbh&T(Qx&4#AcH|`?_!W z`k3nC`SqqT;Zr7{Z*bqg;{G!!iI9^ z51yzBAHA4Pi-~TCDl4h0#DbU9;Y=3yK|TjNkn|P5rv=pIZ50wa8_e>uUPopn|Fc z_Qqh{uL8%ZL+DDqe;lvIh{fi#Td(Xk3lzkQ2ghA4<0GhdYz-jO+M&ip-&+7fiq_8l+;2M4!T#x&|% z|KKn1k&Em@EJ?(F{KeGM#M79=I4MG8N*$K^MEFIJTjOOK2l}FA;)p?ppc6n8L*Y@$ z5zmo3cucPjX5X`}H;x613M}$z~*x&n$$z-;n8sj5N z-?;FSro1vIa{pJdHIhItZcXx6WC*h=rQlgap)s5t$AdflY0&H$2)Cd;kD;ZfSLIoQ z=*T<8U%bFfOispC(4r^aqCAC=LS!LT@}XE4U)4QSKdJGAV#!Xba2KoBPKbJ}I}Q>D z#%6n-LZsdb!8zj=>-=-xhC!YXhcwhxLOdT`x@A~4rtK5o1nbWgRT7e&uDq%RP7x9w zyvL+n+mFuI=*c9M-KoM1u}`EfA6x5N(&+)C4p5v>trk+UJb)nSFyX=oqn%GrtY#o&v&La zzKc;csL@->}Omta#@`F9ll)4(Q`kZ=T4ZT;&!1Udw!Vt7%2<<2LWl+xypWi z-Q)pep=7b?!5{F-OPqkBt6SAP6wOva#XjH91%;7{tV2bS=VIHDQI;#{LiPgyK=kr` zP5?R>WdbC~<#deNO`D^Xr^+w|jX@izxC`6~ePfMj{aDzHNZ*vasp(>wh_;i^Dj8K4 zc&fZSdw;^yPO*rxe>SS^W=9w{u-*+dLkkL|zEJzXrHTs=pn#@97vOK^YoE4&67T)7^z%z=fit>16CGs5GvBS*Zz5aX z2zCYr>Q8dKi#%3&WvYyf9*hGQe}_{Ptr>dDyzlZ7`3pmqy4K!g{&wv%7f32h0z(j; zY(BZji%I+>TRx#j?bnz4g_Z2n(a{|FFOVElFM1KCO2K(+cqU&ux(95Fw*9VVjZ)H| z{z&y}|Ddsbllj8Yfz01XMY>E$)B+&M{|DVY$Y#i!p zW&D|0iDjOuDpl=S1_!Mz1R(FFNN^Z}8@pS0mFWqQKR!x8R4@?V{(#&64&~ydy52~C zo3wPD+iyxkV#Rof(4u@!efHYhu8bQ@N8IA%zX1h;j>&-N9S|Pr4;W(2qBT47m%>qu zu6*G_g2Q{tc60kM_)a2Lle*W%%trd}t|{w7fN~o!{t9C5?u#>9wGuQIxg>V%$JEH6 z4kPh*7mP}W7mF9FQ{+;J5TVR_S8mkfmnX}9qg{0TmWiA~V6BS3V@%mpL@*uZ)AJwL zvik@V7!kZTgl2ZYn0jnANV^X!fhXvz{ZJfPATOD%$HvKGgOzeJ>HITu{30~Um-H0u zZcss!&OWy|}9(r9gOnas4$8K90QXJjk{ls)%}}uI%v4$ZcR}H@`%+|IOz>mlV~$C zhO*~fp`(*Sp8bAUf8}tD(%nvfFxZCrFo1g*MvmR!|Ji1GzPX=5BjpV zIk_G`2i}r(HP&B!MPX6ryAqWE>k|K^wA_sET|3t>pP{I9PE~NWZs&JLUuR%{qbe#B zK7yTq<)qB;a{SC;oQ*}^^aN_yksz1o{4pc;F+Z)yu98S4u^#D}Ms*qOj&2AMkdu|X zrU=A(^;hQr@$+3!z)pJ*QKXb`E30C)bvWY**}XVBx5AYpzw@w2rKn{P>Wzu|H1+Un zWEv-ksme$!)yXDnp>1{(9*o@D?2?+E+H<`Yi%9Y#%kdnh^|tMeAM||E9cE~}r6uj@ z-;Y1|{e-6*9ZhVcR=|Oo{qw8v`2p*yqqHFhj(%`4$zgx>iBrr>#o9GSqnRk&v>` zLe84qsfZjiTje)!U%6_Ur}Tb$C|*prDu1kg%_$r2dv)A-;aIXz&In5aU2qPOA_8Cj z;`Jw?y2E))?idiHLfExCjDs0Y%_qjY@kX~LzR2@@)kQ4RN?`Ip=blF%#}SPhNN^u^ zleBU7PtP6GnY(vMbLM(|K5=^Z>L+@^GvaU!kc_%qdsy@%r_I9{%g*S?@dp`();V)K z^Xyuo@|WIPn|ky^1-H5o3byj+nBi0;>Ez)(-t zV15=C!^<`;V1{Zi_;GyQqwXin$5j6oqO93jspS|6h&5(ybGnT80ao3X4Yz&DXUnlT z>s){44tgA2OQEKblccq-;x6aytFY0ii+^5ZS>KYEEZU#*>U8@F!+m>NBHcP^nS%ZJs8m0dU0F1Ec> z#r6C9pEbIB+|0g}-n$W!`s+t%)})}_tP!g4HP-@EYXch=$f0$QP5mrv7n8+{Gs~T*tonukcW-!klP|}r73fVsXXCPPC}1gV2k$5ewf<)VnLo3# zSB7TR+;xI2w>_)Ac9C;AJIv*ZQIB-)=|p( z%QoA_qoVUe5;ExRKD6dX5L-SbP}LvzJS9UVPSe}N-4!PsXr*^t0)H8Ewx(MB{h_q1 zZZYwP0h@V13GAbYy;K(}GWzzRV|WfKq;0W{hbp0=I;r8IxUa=>$r&{+gJ=>fCOooh zUB}a(zJh%m-9xNZGBH4xFvTLlpJNPMAz8~X9tS^vf8(=Ro( zoN^KBSk7O-oRwk6l^y+K<7@ftOX~4~QkueDbys$jy|9^d^QL6O#8CN+NJWmx@0oIPDmBb zl&bvwmMakhb$rfqJwYz0Q6Hh-^*bo57Z&FH2;iz#2maf9Z^5`38kphX-}=XN$DePC(%icp2 zgA49rAtNjMRqr*ayT0_RGPAwC+zKKuRo(LQA0{e`d0I6?8-Vv^XWvaH!Js9Zc~~8Jq>Wz8 zEpS^zzA*(zc4l9hahy~U;8v~y13H12FG-^`bDY|us8YV{{FO6!Uu3(|X4@L5PWZE) z>-CdkXhCm+mWSQWaOjtMi_vZ1K#Jz#1*BogQ&ak2(pmj{37nsyS4qb4xw)jZ1`oj1 zm6C-;>3Y@O=t@!PAQI~F1|Q)X57=D74jUq8!K*vq5U)BgVF%SSDN!>T@(iV?R4yn~ zJ!h3>WGR3n8$h;)Kx{}Umu*9QJS(xsMxfK#06xlGW3TC--#_W2rM$e_K_+yW3vpK1 zbF{a@54At|w0-{k3_Ay2C1tUWdGt(78gs21BfAb$tqv0ljN>1j?qruJxfBre{K=a} z0da>(=CP=-Z5 zK>+GQuSiUW;rPimR@Xe(3=$;SkR ze2p;PBz@d1+yx zT4NfF*-v?UgzW!U0U1z$A0(~AiY+BO+@{F;ML*|0w75cXYcgo>olSxl_zbpF3#^J; zObS@*OVI7KlZ5RYfZ=RVJCfsffJrbvM?ibI^g&TEW(8z^o(p=q2lsQ%>r;S)SaoyF z(o&i8iS)azbgP{Be6g^0H+*@h`5YpJ6~MQpLHczN8usP~5Xj?n(ze&W3G)eNw;xKq zLgZzKlmqx09qO1&|L_2m8)`hv*)W(MAX6F{E=TWsuH)7>_^F(bOk?ntcaU?q3cEo0 zkU!BP_nnsey%1e^ z5h?wNzNcmjg}GH2DwfQ9Fza4%TecGsa-aAM9G~fa=?j-WD??bzCs&of)VC6Yh3T!5 z(QI)8VxVGaTjHdhSA;*vz+v{wp+O3I@6g~pAP^&>-s%D=Qr{WVq&&pm-#J=Dll#Q@ z(GULdPuqqbBg4Lrwx@@9u!kVArQV>U=ph6tFz_E0cQ%(o*|09i^kIN(yN}-f78?fp zQS#ondaXp{Mm3MKIdC~j0Q;Tv)SIQd>_J?B4%+(f5km+5s_bl(fDvFps%xlDNwwT8 zvlv6Hs;ONnE%+E?KwU2IO*TCUID_xgeU>gPjJ>XqYbJrZh2lZ?7Zk7S1>KJU>yeMG z2>FWVSd|06b537|)2n9XQ6uA*nvwYY?u{G33hgXR1cbdbkZr*C&Oes#|2RSf-3YPEP=j-sld+z#vfP> z2`59Db5NsU!;J9usO6QLASBcbxNNG^@*LR2&MmcYIh^SHO)ySSi!tjZe|>%ec9&$} zJWr=gg{+>G|Do44_&h-vY3R<(z(pYPF35@K-7sHI98{Hc@D}!{=!=)|AP5=~7;ZXz zYdb6GL$c|SHhl)QU;<2=@-EV55$HVB2x?L^Z^#f-l?83CAY_=di=A*HL%sjy0_e=G zxu(u6d)r%s-J?8qJ8$`Jgkj`Gy&*94yNh08zFD@1%1>kxVV;wdXzfpAiyHD6W`Uu) zst|C!q{CwEl0dNr8px@Q-+dZgp$wYRV3u0CUd+Q2AuVUWs+*DHl3;#wMBuo5_bj{v zXamc`zkYl8TjAyIdy}pDP|RsvkmMeh+NjZK5}dTND?2;~V%hW=%N7@9?X;fk{S%X- zqDuc7qeDH^{;5&~pTDn=vxHM_O)nY-BgQ0TNd#oFmQt;noC?+5kAxqTxsrJyS+#2+ zI3_<=8h@Dw-e=l(V;->e$uz6PzN*1k9kSPJtWV&{zeUP_MM;Hx zAc6QiQC!R}Jh_BGTh|Y`zl>zmhewNmJ1m5dYXNu=BX3FR3$16^w zOe4|AoATIEicMJk^yXPSRC&K1Em;r-UNsI4w1SmmdR6@Q=1 z+A6?corbAW!^?Ps<$VG3JNf-tyc7wN3qpsAomvLscJ}%_>jg5Q3_GOd(lb`v^Yh^^ zko8=TW1(nF5rI6L)ls^Z77SfF6X||?fK`k`w2-BusaZ00{*H>S%}^zHDVzi}WD>F2 z0#krz@CIW81ufQsc#fBy^)mwBeyt6p77y_nHa@MZr3+dF27#BIkB=K@@R2rvW42R5X8QE zwkAJcYnP+CsqflJy8fqKBjs4LdYY-s2|6a+#dZv2t1IVIp@-_w0{0de5P*h^{`|ur zO|JCtx9E}UM1Bu~D!s6ZuYOQ7MM`o|{i9WI*i|HluQvJCBdXKD(iT77PWqQqlZ`KI z9C2o4ejq!k9c3TpL5%x^P4l9hNXV~qq@v`x6XMQVyX)ahE=0Q4iR0pPT{^zEH1X#w zrwI8Rex7&S*q=JZbV8h}00T4dQ4!EiiZQ>R0jJ}SV>G)&v?)>A^6xi$6{6oIxbJ!n zh^nnWHPRzFB?)3^6PVJJ_AP3i-^R<0MFDells$0-SyAa)Wf&frN5F@|FTdj;_~m*c zp*A721}*7CPdbpWso`cm-mEys*7LZQqwZS)+g$aAPr_?Fz%JXB=%w85tJ~L~q=)%- z1^#5*z*f5ssZQr$J9PsLz1krBV`pIj&~3lqLUj-dG^XaxC)nO%cmC6cx$$GHuH7e! z>h$1zvBj(f@lVt4(F1YDHudni_ihxfO>T?NTrS|eCr!{}0U(Oc|6A?qr^&5M+7JyI7pg~4y z*n7SsyFXtmy01l}*i2j`k=INPtmjOA48T(I9jWyGV~{6qs&nbiODM8(b-dhyRjEid z_5=*zhsC~8=QY{PYpO>7ObW@EBq;4`{rJg&(u_RbJ>w>q*K}dC>{V(^@3k+45vK200;l@*@AI$MYKhhtVeQEbeS0MiuU1 zzF|Ivq4aIyl>E+y7Yw5tOrBb<$bgK}J43m0g{i2&?y-f+^JgR2U{Aeqo7g-%y+x%; z;4P5|q@t++KL{O*HA{D9)Kw>SRQtS|IoFrZEizyd@h$0g3P^{r!2dX@uyjB!9I(oR zZ|GsN5q?ms6rShZTna7QsbD*8FQ%%DJkW%;G2#*3t|b>M1;)VZ`Wq%NT8W=bRg*f@ z|FKNE^O9Pjs489wwqumj?3;9EJOx#S&3w4-*u%NpBBysV`Swx_3Gw*?Fu{=fdDx(b-D|!82$G>gTUf$8^B(Vxeb#m zZz1-Ju9+|)o)tc)GSt=8O=dM251*w9)L1>N=^>#75hg@|-=_l)(N6d#(%38_jGarj z0K61`bY!b_54KViNyRjtbx}KQ$^2&10c4AG84`XRaiF)?c^|7&h=+fJwW1> z!0WyUm~Av-A745S5UJX)v^pNJ+tM~=G<>6J{Q=#%+_+3t)*PdqtN4P)?-p7PHb~eU zHiYM6B;?jq2zWT^V5wHlWfQ!y&oka0RyNSD4w8!vr_4bOekS0dryn=mZ@fLp@#s!~ zBeS_4sIMP1;||0;!$T>t@+yUEsB5vbfzFuAQNWjJ;7>IZM+Q70;xI$uOMin1t*V4Z zJSKpX^2yk(XzRWtE`QCyLZ6UKA&CgY#k=DaCtsSarzy=^R+9uQYH3&N#FTHEI`5sV zG0elN1yhJTp-z8ne69&xg_5Bsz;hF7#F`3Bl#ZzP&^ zXih`#el&@IZdn|n?Rg{H1P`?|_Yg2oLx~~c&1#O??zcIuKidV?$k^vv`w(}cw#F6m z_#r+-ifrC5M>cQIW0F{mB_BE2g$!$N&q2_Jw5+V`hFQ;02jGnJ8(Wu8=IW7-3AV>m zaSBXYuP3<2XNPJJbr@gdkfb>yTR?fLKbPvXhxpXeWp}n+Y^O`2-&=t}`FTsv1Jm~s z{LJ~GxQfhT@P2Slb`Up7Kt3SGUE=F-@U>TR@P2qBrNJtbzIa>koaF$xqe^O|3Jigz zEKLN;u++DEU#~&S#{`?1;~}d0nj_9K_{A`^7e5~7uIO~>9|1WU)W9oJf&R#`(2iok zae3WK&du*K-UUEkYO{Pj_o~f3XCFCu_9-p=2^hJAo;pWrq}IBkG^@xb=J(3jo1q5y z$_?Q0S6iO7RMBw2*2TG(XpoD)4$06h4A(=2hRfqCKw$T^HSoOE)VEHm%|IDqz?he&Xl&8!kbn8TtutR%Xpn5Ci2G zVhQbR?x3i;iWK9#W9ZW;09v*sGP*L|D=?)|~+zi~R0&Z|LaT{|frPUnc6%Vq&Oz<52Gh1lM+`rP5@lqpXe9G zCvt_1cDkJ-&1p`D1|$@jiKU2LlgRmplu|P?=u6KRF2~-w_D6OS@_W>(GGUf7jO+XM z?hcv&rzh=VYF%|2PlPk%+u}lAVEbRQWxPjcxScJ7XjT(3UakjT3YFZlmnSo2iK>PL zid3IVy4{9g%1%u_aTz6634yn#ncecQ4X7Ek;Y@{}J-HPfd6-yPT7FYb6qtdoKuS2} zbznIy>w%QCX)3oyH8ou4TZiZ&4zd3!x_AZ#ZAJe6i+?Vsr#>8M4hp5XPtZpHTND8{t?UO5yy0~p)NDs08_Y}Wd9JaGRfh2@emeM}*PukU6#>e2Ex2>YQL)dJyn#>8LhBnKx z^F52H+N_RPrC`>P7>vd%jro0P_X-*mq^2amiB)R12!ZatiqKvIx~TtU@H8Po=K>`S z)`O~p7(<>n2GEY~HIYl4E$9dV)RcvDf8UUVizf?4p{bPu*T0G5p~VPa&mU2sXm;p# zO~uD&`05B$%;fv~s@*oJNheEGSJ#VDJ8jE$N7^#>tGt_D$1vdC5KaJSMF%C1N^>S> z;1?|*OA}*oTA1o~8hU&0pkzQMw92!Wg-v@X$suA0Wkrer$Z1DIb&1&F3k(}DbdNt$JXohJGf9&s#CCu&QMztFDQw6@b;ozA+g?b#IPtw5&zH;^@q;uI23;`NJ6gJg%u+K;@K zGUf2?0#8(EXYjjIU}Tb9o_#@=HkI<+Y#@YJK2mY_7>ET}pdiF0N>(O^HP$Fc^PuKG zSPbs}A`b%^DK%JRTA33i#B#*q#sDmicxE%xQiD@vzyLK@1Js^`gmk)e5L7g0=uPJ| zIjPO#)5;-mr7nT@4J$LpM-@7zyB~Y_x_m|bsbF!Fd~q67#ADv?Cr@3bnmID!uqoSEUZ6a!e{_NTX3@;=IkGi$cl9`3Us;b)rXlS zPJ%fpGvJk*=B)wDLhl54WDQSDq(l>PBa4TpKiO^ee{urN4}!!oNI%A}q;`Fi0;(jD z3{IPpM>qoM90qCOa4aDTVAR;1)*KXvJ}GdcU(Eb?2>K8Jm1x{8PYOLTf-x!jJ!--+ z@X*qF8vKZp5b{RD7s|NO%<3Ic&2bK6a{~D?f$e10ES1@Wq1;u0!?4$oEQPAB6OLC*!d*Vd*$x zvp5~%6y8uAXnGx`9lOJeHdh9Y%_GOQU0%1NSEV^N&(G!8(bG~f4z+vRB`VXDX(-9d z4TwuM+WZhRO9s~&#uD5gH?3{JdG%jD>%nWkC7l7hQ~ZX(g&># zK%}G})u}}2MXTRcC>pT%n(700DRqBx^!48DpVzGSkW6QU%dNeuZ-8epQ9hd3R2k&f<}J3Efu^7o*OHcN(Vu8C8~;Iq)IY* z^q_pK{sd6E?vZ=8zb}WswXp@?a(kYeYhlIOj76`Qb&GKw+E{}uziMZpl*3o`Dtd(( z53Unct8U9xQyyt%8Qbm?-Ub+=iCfay`BlbOR!Ra_Nm(A7pG1^g!K12EYqH_uGWi-J zTfX5j*4D2688lf_i_KJ84Fpj)g%@T>rgWvw5QpO*CM7Qs(N#b$o#OI$R6> zm6l5l{pt{~H=fG^ouBELJLe}uzsX{PBFUNFEdD~qO_@j~SL3or&0TfcZ+ z0PQKN4kO8JHT1#s0_pOvIA2oGKa_!np0F>zV z{(rq2K&-0(t?jt775MNXg>m5`G^32BAAYyoEZda{A|2y>6_ofCrpSj>{>LYOp`!)o@v)<^ z?a@o(6=3NDwCx$_j#epR`NDpww07^CsH=Y(~O!>b&vhUzNjg#6Z%f&j>6yN>Wo)$>n@MGjx%+2I7gD98S4N z?m}mOBGF{!9R&Y*lPsA-h#3z+cn;&?eM;6KOt{9#St=N7cu$KLxsHNEHM>x5L75|n zeU~jm7zgM8I2;nljgh8Jrx%$ISWfb3NwXuQ4N{y#zw+qE6{7N~PoVB?f~S>weY_mT z$5=8+K?Y1LvFie6n=9I}fj4f~5=KFC)_wMwGqEv`^-}JEnP|K_?H!5tQQeoDBy)ms3DoLD3aBPzJt%(@xFWMs&84s{mr`SV-WlS?dHUFeOY8oci4 zSQ<0cd@z&eo={1%YB7jFQ$|$`v}l z^3M0sCEuIUJAL54W9zw31tp>+*ew_Bz~j?iW>ziGd#WyiNtjZyto7jmn%`*{ydQ zXdwK2K^D058Qdk4Oei~2{8a>hjP=+RzKL;P__(2=VGcCZgWC1w1H_)h9NR4To?wqCZcn7IyuP(nn~jZ)l7T_)VRvv$?pIT`^e8S4 zPN{Rh{RGeLHN-NAj$!qHCUz5?S~vu_ih;P+Pvs5#Mbul*yw7`Mh9s9t=-A@63V8K&{UpMdTu?*I2rz$1dJxviR14^UzSHCTrMK@mDE5ZBj(K-pvlmMRmmVW1iSb9GJG zt}%gY<++Ug@eI!oK7B#?O)L_};ctA6hLVF&lx2uEuM-P@X z4sL$sGxDq!RMskU3Eq{C3`+TR;o<8;(pAcDHhDDlTA;{@6{t|~tpe<>@B+7`Ho{2# z;(#qPM|ogePgRq~rBrCPK&cxfu5^2Kb0Bi(sm`7By9VxGl0pCc`S0G45bBTA5DW>p zB(FNP0%H*zl^+Zcu4?38eX!&U5-KXz`3q!0sC5{^!n$$Pko=nvG?}%G=*8`#9Ll|{ ztn*cyI!c-btH+CU)YI+y^%H!*KG3`7Wwv&(Ls=S5QOyZDHi$}-quxr!s#`7(#A170rw z?kmBr=}d0DC(hIF3~QzF$SF(=@bLSWA5nR&V|Eo}RUP&w=s^m^+zB5u_{W4{zIHs* zcPWW_28#&Upo6hTI+fS9iuttyx)dPlD+GFu<-k0H4z~;d++^`|XQvZ4LV?y01_ zfLdt32dqs!`<@`{clY4w?Rs7pUs3x}3OWh6W-7k%wa`OuQ}L zEfdF!h0X|Q+NY!JgC9~j^Rux}@f~&YT|%D0uF(j4?D^V=N+3c4p27N2Y%u*ac!x)0 z%kg1F8x+q9H{EhQIw3weh(8SkS)TP#fFJtjCp|GCouiQ?H{=1r?*q3Y@fmd5F=md} zsxA=F>3LXxc+nXCSZdPf)M2;5C&355VvK~bb_Rid2l%78pntS(<2x{wL*p51Fdt56 z2dsBa^C1je2F)?M7lCW6KMnGPV8%%Q^IUv%AqF1vmFbOcRZ5eeHZlLb4_^@A5Kn!K z<7qLWKlVin9;E+P4=2(r?+AgcJhuGw&ny?r9iSB}1q|(Swa+mi+^`62uoWI>n>ox> z8{a58QCg|2zW(zEfy+{Y4RL>L%ov0J_ghJ^{&08d)fxwaIj^9ExcE?~e=5pfzR;3!`&FLI{5E(eSrDO=Z-Qh^zWSq7b*!p;NG0#{LcsG%)%an z(_mWq?=Q3}1pJ<`YjpW%fkAJ0s}V#{&=ACc*S&~{iP7{pUOMV@7UHA$_h|$?`$;_c z-`|Bko3DH=Kc-m!=gSBJTU+0zW3ce^>+?E_>h`R$KW9h&KZJdEJk?+Pe?%EYLL#e# zP?VXifijDbaY?fGy4M~RN}*w0TlUT#_g+LrHrF0kl0EK~y?^JHdq3abcszdp^msV( zKIgp7Ydl};oTJ~86&{;_{4A6eQ9hEOO6oJ1>cm)*ztV*NhnDIL#`8>k>c0VsCwS&J z_Q?!Cns6EH)wS~aK9R4I0B5hJi<@2i%PD870FeU`8*QvdbRPvCoFfk4E`cro zrv~aH$PZ3qEjp75CK^Kwv6!&%@Qq1I5i!C!*Smr>C6!uSCJyX_fn^B`vZH}_Dp4OA z8pXgH5b6mxyU&8>5>5&ej{F#@;2E%El1mT*@kx!j-o4|rAN(mc^?>4ttXnz9CK8hc zuc8U7b_9R48myCYf;guRn6vOhI>*sKvq}IieC9ZTJDuXe2S%*s&JzZ3ZxuF|#~7KI zEK!y`>1xbSjAwA-!=pp73;e*Hp%ivgT;y{ysYS%m(Kwt13#Gq0c8MVJNoQV#g+)TJ%2H5MdwYqCixNBf zT5bG@B=3wv)4=F^GetV;;9`|`Qyorpst`}gT7aq)=B@Eyq6>4yW zrgyB-JesFFZyqi5Og8CZFbtFp_v0oS8R_UuQU5=60E9cn=#o7q!Eycib%MmaQYe0^ zh0g4MHaR)T$jZvfNpBfP;EsS@a-bm`+bD@l(_00p?T!Ev+iT}eM{E0(ah8bailLN1&NfIv}scD74 zhMcFbEcd10= zk5<^M3?#+ptEClz4E0l_-kU0idy{`0=FX6ikt#pIeBk!-?O?rjUS7_Lr(KOsfpdnG zoLc3YVaclCjLRs|ygU<7@QAracbdp@*mJWO6yn#fU-_GDFJt}5H9cm=sFmC)JgOC1 zy|H~?%$gW<3*ye94r*fbR6Qv;-#LOK*;eYTKVs^ihv4Ufy6$T$cwa zqW9=gBaw>** zf^`Xru-YnUFTwa33jsD=SG^lZ5GjRe(i=B!guZ=?Jdf$i<^J~V7G6kxt33JkJ*;MG zE^EnKnf83j!QsL2KqL2Nbzp7l@XJF~w4FLy-XBN+PH;RXfRsRiz)?W$DNMLDpcyK_ zb6{*M`^xSG_4(dz38A`EjNpvl8FOB_XVVv&2$Q)$SV+JHyle-k#1IFv@NhuPRs{j| zwDI^3;D9{+EU1r;{{G#dl>ZDvdiw8Y>KUw@8Ect z^`9P?0xNN%0D{mY2RK9JCpw6F z-7S{`SxHG-37b&&`$g$qyXT^JT2USlQi!y`&`{)cM1-D`)91RjP*38rw}DxY!Kx~d z5NM&WOpd38%{}jp-?ev^?*hon=<%~pPYm1~?0tZea_gF|x>rS6&Jl-Gt znkzto2d|>Hh@YjX(gm_op0q~LgA^W*z(CsQDNt!dbvZ#MC+`GpfA?gQPKJ@>lC;a+ zN{DxXjZQ&sOqP1-w5;u#6@1LY2&^gKT32q=GmLK&QRp~-Y5@_Bap_?u%CtU-tC|id z^UJ@LmX^k7oEupNE3$m^M3n3d&NGm_HF~brc{1Nqv`|}l;o!g!23^V}k@cD#zIZ1x zHZmQg!MwWQz@546OHw_ z97i(U6jwk%7P*q&R%+&5Wy40OUH5JI0;3#nueGky{oRExdM15HZdC>bhVT;PJcuvk3g_BrmLjf z#8VQaAFPE8*NvNxb<>ejY}M80w#U1pU(xm7H59i>fyO{GUzb|UO(e;R?e!bFZ|r%x zvk2dn-dNIh`tc!@T|(nx6#usTDG|UUi{u6skyPi}YT2$4*_F%%7?Bt*K^CGJGZy@8 zAMf@vAcpBa&_+MiNrlwoGhww`4g5b|y3f$li#c!D$@b*M1S={TB-i2Qvf|eI^!l+O z8*^zFK{Ym-AsaXde^gVq>07e^hV3bGuJ0~hYG`B>^C~k=l7l#c$7xQi&8EKJ-yP9L*{@mc*E%Gwms4)D z1O;tV9|!Jc;3ySv)0+ME^Alk&VbzMi<-N|h;$`DPcpuZmk4o?X}1>GyZ$zQjAYn3=l)C?|KZ z3t4^1u`UtU!F%q_r6Z!&#{hllV_lxjNWf{$8bA?CU#ta)^^nSLtXwrQm_|p-rDXjF z43w~)5VM^>1drEZ{rAgv+!nK)Y2~cE;b~$c;F_EDb;d9j>*?%=kAeT1f(NcU^!0m$ ziPQlZ9iC?x61G~eO^Pp#@bN)@0)p+bQ|7kyIV;!#pirD?pacj2Pg=Bnw&*If)>ikO zW?tA#%TntOS)HECgf6WO`yVb`Z$yA($s65*61iRA74%V4u9%XjC?5-yp*DiTFJY@* zB*Hv{(~lG9dkDZhFDYT0I8&0*)?TS-KS;Y zACi{FO*YqNP4_XZVb_)wy==JtJ$ALeQtI*`r7)$ zM@^}>Q9HA3H0|*VlXP5vjHrPn3ZTVf58N-jx%oSb2i_5oD55F<=hFctHbenHYD-K^ zIQutX)Xy|){mi-lQFMP7TOjT$?cVd|#fhP%WG}3&`r)#4NSdfN47&M7Xuc^*0NhP= z?+v)XUQ9qw=>kA4Iw|@hiWpDcCW{mEJ0gxpd-%V+AE!F7g55A?5)yypCgrzGy9X&>eli zOG`>|6TqLDe1;{0f(c{=CU@%9o9_hE6#%2e?=I@&!G0hTrlTSTj_Dd{ssGOXM)2S^ zKkrhZ^y(0<64SG&_@?*Mt>eCO7HTymor&^A9gV@(BT2IJ!!_j?@5LLAa29t!&Mfjc z@xGJjOz`4Pl6H|XO4M)9LsPHbgE*fF@By}`N`(0cN5~N9HUSwb(em+0QA_p&94<(9 zBAhKRs5QnAcJQa)FgI#?ur<{Ync8_yNcW?Z`&xUekmb#WEx5?~&S*8iaar~`P{Y9= z?|-u%*t3D}1tb(2Us*MJZ8v*a+I$ zQ?HLObs+eei6XC!m7o$F$$w2w_I=&^six)TR@Tt&7|yXdvMqcAFmJpbfbQ2C60qWEiOu6VW1Mz;#MyNirpiS2Q@9x{N_PEjL%=J>axOsy~a|C zI$VvqGU4s6{a?go3g{o>T7}3E0>XdBd*;H0yhLeOuI)gfmfh6}2=s{$N8mBWK~|LHOR`SF)m+_(4daKrrq~vs@$z7U2P;$(z{>kx$DfqekjO9(1o#(XJ17taRvaB zOzN=$CkSYjHULG5HERSfn!>~2$-dXzS4z!@7QkjdkCWb57+B~AEZf%}H}~z<3*myT zq!^R_P5-@AD)Sbrsoy{JJKEN&FN^Em@!s4ZAyK(|j3kCSwoC5CBn%=)!$x%DNr2PH z^n|*@jX>8M$+2qePLMud$Q`sK+^{YNJs+PZXj(QDjo6(NHxwKpb-<3Tk`}o zBE$&HYfn(>RRV`_>H(TnKb_X4XCniEQRX#{I zNaMXc;>cPg2wkpEZemnJb0HEk^<5XN9C)P8E7F>>826FI@%u9YZKiYoqiMl7c%$naI{T`=!ay8Hb z8f1dtX~s>Asz(gH$T0CtG=K$=47^KET=WQ@*M*Rl7T_WHHfBemm>nRMZ^#{3Z`}5G zz}V?jEl)=1)ZC>MDva&{6PkcM{)tNdLO4=?{GMUB~?CuI@%P6&U?I$S!%Lsf#niz5#otY?Ip!pQ^Humv9sy~Uh zGmr6h^}it58*u7|kLMVOhBhJKj>kaii)YYrXS}VdW|+l|AKCzw+&}ha+~ZEFPTq70 zufv^VBy~*SeojRUj6)i`=a&lSBv?8FAV}z6Z;66i1!nB{mf{wo5I}9T{qx1J`_F(3 z9B8)4S3lsgI~%44g)FjE2)r;*a< zyJ>y+o~s`@9l@5TWNPXsq09qf`GRKwTzvoHMDc2eVuBlR5C194ba22cSC0%nKv#m` zgAm{H$k^FQkxxE=ESiC>Cr)*}e@T}2QV0N*_Jx7YPM7n*z9RKgZXH+On&kq4%#{WN z(9!GDHaU2c{dc`MX1tT2&rd~MtT6CLCx3un>QTKM_sr5rkTQ3d1a1_tsxb3Se8}S9 z@{@*rm#IB#B|?}Tusjv`PSN8Np+qXU2;nzv%gfC@RV(t>`(w%;uPo|FlD3re*;zx^ z)aaK6NRR*;2?K9*vYYsa{}1v3dP>?V-R3+)X0th$_WSoU+uV=I9=8*P>SWGjwgU~% zDS_7=4=34)jAaNgklkx5qPyi$sQ}*UiF)@g1tjw|A+0en2m7fAn4Y>j(2ilGz-Q3e z>^EHM!LW#{4M3cOB%)RDfrHbe>+|=g{k@%tz=xs=?|)__xc%pl8gmK?YE0|hz5B8Z zG(S>5xeMrVA@HMR$867tW97{1{2kgH%iRq>5qX~H;E3_@@*7vih+kv^hSB3J0gRCN ze}1%?Ot33NRQpY2ly>q+Rxmz_hc%kW!Gnt53QU!r*+kzwtKt$S)wdTn_?#oXVu` z^H*x`(S7^=-S}p0Jkb?ZsOJXfBWRlbpN66an?^pNyFx-Ter~YXF}cuh_M-RM#!Fo5 z_lk^bW%+V}o!F%8%}EQ?l+6I;{xu}5S378 z@bHoLJ4RB350AuUq?{p66hS)EsVbm1T}(EJAd+~qA4|(zN!{?Td3A?J-!7E=~y*r?x|4vcuu+CTy0LNr0^&i$3 z+!wMR0&ll!LybAmi1KA{?$Wi&$A_*#h1d#=SN$DgC!q4?hzkULzTcj2QUvM?j~l8HXwJdtih z-JnP5Q};Az85AfqNb3cHg&yG3NLYcky`yDtCIe>pLz!esJVrf0xoVXUe!#$a&1sDt zwg|k0$xs2IPEaT?pt47Qw`uK=o@urs283>Mflt0YYfs#m(7Dfzh!FOku8k#Bm6i&y z8jeg`^OfUkxUa8Y!Z5WDdP4##DSC4=l$*lP;BtpG&=Z>j@Et~r8NmJ{>5;nzzKU*D zULhjvyB8 zFm(23qM{ZEY;L`*2UlXx!hd|E#QHofzjBsD=nMtr`&_-v=eL1$9-S&q2%OBJfu;q) zB#~U{s&Y2tzI=HWC4qS`kgG#vL%CQ9%}#arZI2oL2Rpaw$MIOgHwX?i$6u2C{s{1s z+!v9n#3k|LvyW5_m!tbF+r8WyH=_wL4*ga>lw0j)$PF*+SB`pB5>Qd*XA-?EKS?L zuY)O^iUD2T+?pfGRkK8(SyOu0g;jfx6Gu6gz+?2_BdK~2b9FCpjSq{In^geGbqI0V z|HnE&-bTU_*fzY_lj?&Vz0*ssCS)rvfMXtgr8+=l@+>j{K^YSwFWV3Q&bi2p2XBA> z5Rzh}6I5Yq$@h6xu#4lwG=b)YAZzTJ!Q)Fbi`$%C750V;5Rr!;c?CYH!F{p=E*|kU zCnuPV)MW@Rdc;IkCcuX6e}7AmIf^PBPEpa`dH?2=TP(vl9}Sy6xe~W_E?Y}671S(G z_=n&+q}fjqe_@U{QfcX2gaJ-}yh>?GeE4r#3J*M+_>&zY=)FyV%Bq{9jET|~0W^;1 z+ql3ZR^TjGm};T2AII#x1X%+XB54-DXN@fHYZ3+&YAFUM#sDC+n(5fqI{t!?={yFa z0^~nGi4eF0_Y#~dZzS`7@HY(%TyUNvoV+ko_M`h_>>dgi#O)s7o!rL8?*Ab!_^>VY znK@!qs#%jn8hPR7gF6VfADvowwD|9c?x$#R^IcV2Sy`c?p`l5Ki~Prp1%N~jQyF+- zDoA-QN9tlD`VKyxbjS6YLbPfN3#qKxK8uEEuyrSdtv~XCGHP7GMk{6$Lg5ozJJ5_hs&+ z?0QIq&F_yb%hhTg!hI2UQZqs$d)M8i7rvZ><+y?CY2+#S@7A{h3M1`qUHQj#feJt^ zQb$HvBpprj(-dek+7}4ds;$x{vAIsosL04?z}xV0bv+*)Fuk~mTtc10~YmUDr?V*rnO!Sn;>-(?c zN*tms?6z*}s66V~j~Ppaij8>WfA17;ADC_&DPeVdyl{)(DCJ#~O^ZWH>FXKM`O1~J zy7iD#@x~@arAdyP9?&t5zD?euB*9X%4kH~^oqkA|_e@Rg>irq_?WQpiWZm6~UtZu{ znB()~RI z$!`?)qq-ZHnAef%AxYIP_iEd4m5|o;ub*y6`EA_IoRlBj6C8uVCzs=W`1NK%z5 z(uI?qYM%jjiP6_ zIUK9h20h20H=&m*R3RhubT8aTG9*0jRBH@44JjusRp_Yd_E-95O8Pn~okq0JXQ6mE zEQg!6Y?bDRyYl{so{-==bB}adR>axanMz1cV;H_Z4iEg@V00vT_3e1JJdp_sz5dE3 zzNi3f&l8j=VfI(GQHc4EcINHNafiJq@e$}jPyN`$*zCA*-`uL@Rl!n(^W(f*f~gm_ zmdp%lXEZ}98_PBQLS00Tc>fBhX`heRD1Z9;Pe1L1Dx+Y|mI$TrlJWfY+cCP? zSY?5C>yO&P^OgNP#35-WlhH%TQ|2>})(FIpx2A={iZ9k($EyX4eX*T6UfxlY4)vXm z2MVUjLVbBN zuiu|1YAA{;e^0`}kpj!UJLl04Z26D}+&@8bwr5|;k}U3#PyatU)D!zSz7CUwFE zJyBQidb8%Yl&tR*SD|9FsiSW#>$3qfhxKUOaz?C#m}_IqhFwETZ*S88s_^Hf440dC zu(r#~xQ@215VpGcZVqK>H9zAbO`SA19qjScOJqn8Z0Ru63$FRMPMl`opD9M~jeK_> z7`Q)T&o#V_n4xPviATr^uz`2mY#4L5tG|^G;VIKPEC|~czwV;S5Ex(ct_JIa;@gS^ z5Z`TIPcsiD|BWiCj#+c8pQw_nqo0X;Yk!BMV9Z% z@KTVy43HGQr}R>lPz!)&O$2S=M;g&XfwR`hRIGZgVHb%{K(ezh2LfN=NH3qk`Tr&6 zOyMAtd zC?!O9$t24=Eo9{Q+L4NhKJFk}1IgPcKnLG3TpLKe#&7WX6^m|mmTJpDU!NG)qr%6` zS=8cK2k%+L;n31<@OXaHh|KKTexZ$(-&P$!RtSVN3(z-*SBt}ckGMXqs~MQfzA*3p zaOL*5a<@$t-&#on=M{ubrBUp%ZGWe*y6-pkQLwj&UJ-lH@RN^@SgYP&Nwob5UBUaP-qKh3uX12m^lN2+SPX%2>dFC$GKe8 zmf^oIAaQ&JvN*k9#`yZV*WxeO8cOpU5MR^ps&7sDszc-@v`OfX?T`0qX1gxsC2GYL zPf(*+SEI)^ip6PjO}5%K#ui$XN)L8agsyrvF8?{0hRu)ZB;P#ctiA7Gqm`4Ibm;r3 zafaurewSqp|2ZK?e>rpeNS=cmLkC{ZMoep}Z*979^Crj^LeYX914D^=)u15vr`P6Q zYPE$(>Icf09%Ji)>uDr!yF(GR?a;;ERbfn`O{J@c92$zN6JURc4IVHbDYAYSkLws6 z;N#Z<`mmyDD`hv&M!)=iv>7zMPt#j-v2w(xXHrb8AmQLjY+mMA`lO}i`;M*NqA7C)8Zh(Q-=yodi|*`x+Xp=#Fj}cFg~kAe>)1HT`)ZLJ zOVrKVBGv?}fdqhVrk(k3?JORY-vO%bWdC+o4%httH{X5@M7|)sQU=YYFf~>qK{}__ zq!%N0?@|OnWk)6=bS6R|eV})%Ea_`-jw!1&Z zv*bgGWhHl|$3PyYW#}$L@TX`vTtSp}3F=$#pP}}T zI_1x%>_%!n-I~W00=q|Z(aK_feC)DTgpr?Pc!&w?r_k6zd*g<0$KoXo*?YsA({QXT z-HpeU5U&Xcs}aRk+TZF(kYm~W{`TR(iPL+|p=}`4`d8~CA8=aiLKWqp6Ks8p$khh& zPnQWK#1~0$Jg$j(is@}TZ6fu^COq9%SJFsXgQGOOtL+NIkHwQ1`slhDC!dtL!9@mP zY_`b0oovk=3rpkqg%LS)$!W@-V3?5m`BVqf>JiV6G3NI#VBnvBkhx4ZE0yN)O{qv4 zxcn%TFMEn**>x?wvltcLIc(x>=21D-43*t&eS9)=@J>8it+@1|Fruy0)Gq_2y9mi+ zibf_ayShf2wM5~0^5Y*hIOLP5xsCppx%uP8b4lf%C^?<59-UGjGM5XW@Z@942HMd% zwpGYizSHgGNJ7~7?4h53yMwk~F-DX+L>gt}1dNuB*apJ?`PgKyqO+*Q#GxZ;WT>XtBu3hO!0k z6ASDO5!dVTI9OAZI8p?=?2SUN3TxPBVXSDGJ?Bn)@&7W-7@Km{pHhnJnf&D(2cU@g z@BTlN4jZmonO2|umgBeIXG^*}%A>MS6+o@Dc}&;d(377yGaJ{n&@aT{G^i7&uMk}` z4TK;wB31rvkLKZ?1uUk|Ig$3NkCrMF;xnE~`CILJ%!Y8P&Y>sMxR9sni^JKWifZ|O zQTsz4$xpTf3aRO<~h+W7?IP7ck~!Nft4<#MJ{^@+m$&DB^-hF5i-d z68#y!u~oCIRtbOFvFn=-yYK=NLA6qcxJZYlbTnJf84?bB32mU_rz2l|?*Sk@EmIWV zk!u~G2mDsgZ^F-vzw$0j&-P7yTs#3sDYx%AjlcBa?My5gv0%W!VU_Uf{8RI2+=;NN zU%yt|_>z-Vr5uBN_)Lw4>!;(GQKhoqMFb37AElOJds^qtW}L=q)d>l>AA9eTpGC=9 zm^jqL9*h%2RS7g8yyG#MAqF(}2DYcUM)J)}wM|-=6N)a8Q(M-oc>jod#q!L6ag40m%>pA@fJzwjTDESC^Cyc0y}Mk@bat zzHMmccqiHl=S$gNk9p{^*#$d97df{s?)um5=cz@U+VdE*gL8>%7Xme2>37CK(1Z%h zZo8VY6Etqumnanw>$G&mJ-<))C2Z*MqLZRW%+B_G>8G8YC;8XA6HGSt?xlXY2tOAc zgX?e`=Hu^~NVK0Gb9QerzUd-3h4)fU?``~|gImLrGaW9JPY9sSuLkcwoVOJ89!sda zhhqQ!!=rjv1+Y+g((!3pbc?U;(g)!`?S+^!|E4|9P1w*_!zSJT5%*H4rTl6)lG)tE zQTZKQqOe_Y>J*^_(+||H%}l+qfl_4nag2N#na;K=7G~D9zb!|kCrU|G=Yv!U$BUIVg-+5 z?%!#i+I+T4gBlf7AO2gS6ovFKF>XXGh7XH&ejGB7b1{d?n73@MvS55aTLfbsucXQv zx6tM0WEh8gs)~sFnEFTdIdm%??iF5?e_J+!=}V6YHkP}tiL*FqxR|Ow?CX;Z?S2t)%i4!A)t+!VAQj2Bv?z%-r}ane(E8 zVk;Eb*rI#}_kTf|pC+U$w{F$^Sw`SfNuY0+|K`oXI-X`bf)^C%1JXCGydIVzOX7LS z8%cwX5+?4O4Qk^fWp17Pr*}3Q>O4gBlqMka9IvDe@2DVF2Hy6Tr|Ow|jS!6SxPML~ zMaX9SlpAuif*1J_xs(WMP;`5q%b`UeJ=P2I=qhvU;JE*E%f7NQW1f2D&&+*CEDbVOcf49*Uehtf8=D+9&Xvpd{SxR6&(m17 zro;UM=4%*cvxxUn37Eg0byoRM!kJ?EmGHmZg*40g+~>Tb|m{(~!d?EB7T zrR@dKp?%c5w2l(1gdNFt2iY0BJ*C6V-ETNS#JZYv{t%lTlrqampG|*0ce|UGv=$$w zUl6#r|8K8b*$O#YS{_vq=QM|ob{`P13Bq}v*x|=f8FJG-K8bxD(v}T~GVD&vk2PMH zzhaD@ee~S5Kz2;VX;uoX)JUfa)U-2eVa95&n-C@~6kmn{e_H?-Q(5KMw2twXoT@%VLDuWQOz{qC<;9z3bkq&$m;I*G3&82JC; z*fn#c#;Rr8H1tIZWUX^^^DfNKNi1v)(L*^L-92yF+m^Wue+Z@^%f;6+-_}TY7tTig zh?LmNlLS)Tt7SJQ37O>pJn-1Q?T<1~P1-h3fMg80Z}wf&e%A1=Om7(GVGdx2)0da0 za*8GjumekBUeo#wCXI+G?}Og@M@<58Ty!(*J70%aB3670X60)lYSFLb`?+Gl z=N@<~9EJ~ZslhrqY}l6IF7CNw9mBU~7o#>%{`d%XXJxzl*6OsH33bJ_p5Ap=Mx*pH zF=f}Q{7j@I7jXk7pb$_&E$Tif?suf}fj+AiPsYTUuQcXD^(Io@92$;DSgQ0!iat) z>y?i4Avc7n9g}vp6%`Nn)9x73-#cx^k^{mrQQN*H=^z9->}Qu%!9G?fePL7gM+0|B z!i&0KeC#lKh&g&ESF7ZML-v3oU7w+ivQI*rbgdSaLD*j1LnOb?mrB4;6(2MWOnG}J zA*6OU*)Pn{0_NCozV!H2b8fERwT^;fG%deIw0d9}^? z#l?&~{U)Kfch1`v!Rn6GrDCukSD8tU^i!|cGe6E~3@uq=a2hpIZ8F*%E3s0G zH^SOCmmU4GYJ%M#Or2$P133!#Pr-G>vYNCdG++JAzR_fK?b3r6aLYWKm>gZF(+kyN z7V)%)1^36kTWpVJuG0!=qc8fnY~0f*HOoi0$+h?oP zB4hhQrN5s`TGXtODGuRmT(d0$98)nB9UUw9nV=IP&!ol)*rd+!;6{LC=m1<`-779` zdUL0js`tZ(k7bjO6F{7-NG}>SiA#tTGdVBGrQ8&Kz16IY%Jjr#FWUR0@`mt5TL-PyA~C2pZgoL%Gxa-}9A9UPtXciJya z>6AMS$*fN-$2($#AC7deHoL@*3$f*#CGX*2TV5WX1tm-_7eVzTwWtFjKuuNxI?Gxy zSH=z7MsXR&p+61=wll554wm{BTbkE) zA4=5P2G#ND4dXiC;p&5Cx1--eqcdyA+sYf?8q;)oTkKmx_nxVEmm1z!*Ff~{$8s*V zCNV|ywK>obB1aAu2Wo!3`+cWiteH!#FUx%E*b^KmN>>+qx6w4NVu+#I@dMSGilHFq z>1HEWH-{E?oqM*ZAx||Zx}-KaejOhj%m3UDZzYRN^Y_&;u{-E@a7YSK^qtA|NPXJj zW?Jd#(oMYs9nqxOlo|GV*LLFzh&-Sa?DE}Y9Od^my}5W^1;kLvluerLd-;HC*)Ker zW08HZJ^YvYpl9-$+98F<;}j3)Yon#g7cX9dNN{pE{?6 z#gBy@JM`YwJTZhLK)S6CANz`5OVdLa;NQ&FXAagiC}qZ%ogpSguF>}Uk|3(sQyA~9 z7j4>|UMOTc8hH7S_ek^*4V7ElDH`Tn&nJ{6Hor=sRKZkLs zBYI@*K`Y5nzd6hA%Se%1tFBAzA1O~JR=15iM_ltVmS5g#PxUo0`l{rvev9?py=By!3A=z(;$|Guke zjpyN*Kr2rMHM7kLy@7($j%AqEsGXCG9mv%-q{)0cBgcI5%Q5{E+8v7qD+G%S=-YVcPH9_ISh{0z{sEv8yvq_~6Q4mH%%OeP3=Or5{JBOT_bfJ(hjOf+5A{ml zKyU1J`}1UbM!muEZ2iN#`4O>g^MU)nqG@#yYVB{rIUuLJ)jc-n+=~{f$9b*3VZ`Q0 zHO%BHCegV@pPq&vsKu7Em;NqSo3Z}RHZtKqwCoeVHuDHRBe68K_#U2vZM{k+=4uB) zRSOmktEme>a-Rl;W?twddp&NqZTvS5wr8PoXSJjLp?!$HvUNqGms!njDK)mImdVwq z$+EY4qfY}aO^;#5URZ#2quO_JoC0s_Z`g)ALTW9`4*=w^TQU%Im9d|1WtHC5>5Oo3 z`)$#(Z((VH6*{4>{51|AMBl;5SC8w=Gn@%KgF6?vzK}uBi_Bn|SKu5V<*9nZD%Mg{ z=efUAlWDG6UcIJOzA_16E2O}l&|6LqjFOrjmaTJGEL~T(mqInMC*cI^be}uZHCHRN zO}B@F3)5?E@bjyH&KpVWOKF1BMncwuud@1i$NAUjecKpaP|3>@T{PqL^=HS{7{Au% z&utIAzfm)GuF2XmtMB6G(uSdKSsF>wIhVQf?xsG?HOMW=jmdwC8VOx^9w6D^wD5jO z%8SRCF*HUSN+`M+xZa zrhXA)4X<*uxTagKV{O4MzSlGYH03RcwAc16 z?unMeH(Nn=K|s0FT4@q1>#Q&meqe?=-qY4fCOWCk<7LQXsEcBq#Eo!e|v%l%Sv5lLdlDqai3-xPi){*d5AOGF%#Qs(D17y}- zrPY^D`UV>ZzvM#FAa%>z#cEsFHuI!i0F>r%Ib2fC2az2K@@XJEtdr|>KPalenmqJY z%N8n}c|BR`p60GE{i}Gh$L$HLF7`6WzMWDlOjC1^LOn&kA#b7#^L4cOrvrh{X5(@w z+z;NJhd*u+56i#YAiOH@f|+ zV#0D%H5%P5{JJThJFCHAj(&J}owTCh}J)nbjC~2sV6hN3g z1_=sE(xR=kol^)${qh{+s-61+r+(kYch}`Z1QJ`eLJq5E!wkVj+SQ{uRO=X`bo&! zFVYf@uXBJ+$}C)ID^RW+0Z$+c$GG>|nZt^S^0lLTjal5h6<0QT1@r30X2Nqdx>cxzjPIz24m?^tn=1L~ zGP0G9DU1%s56-a2fF_iHS>*ABvuWR+@l$ww43G$cO?$K_!@gT}e0MB+HW2D&XFYy$ zmP$`#`ir)*8}9eTD86$EGh7#44bZjpC-Y&#nMTuWVP?`YiD(C-9?V|7BM6W5zmJbn ziR|q2UNX|6NaXyf4C32Neh=h+SB!|RMH}u}S*Vc*z9rW>=?R)P&^aQO`N4|ROckWa zWjZ>1570PnN(^b}{4}2ZV20j%(^%;`?W0=VO`ZAJr5l6ZoowjFL!bQitWEQIZ}+)i zuN&zRD(CJUhL68>xP)AdSB`p3mIA8fjtNR>6I$aV@GjI#_^mS(G9-?tuqXU>W5rpA z7oFUm4M#B#&&!SVRaK5(8%*9E$U8({+(5EBNGfcKqS~?3KVF_8UbbLQ_KSwK3$&8v zo8(p66+dJN4mP-zw5=n5`D<43pR*Ws={3anrNo)uz4rGB+6~Lvfod2t#LjPdme2dX z+&m^ew5AqpF?^42KXm8N>T57BVT;EJ8eU@I{8>&)5Bea#!bJ*pZi=IAV_r89LZ~M} z2-U}_`wBC7-vaj?w+uqa?ptf!YTLo(Ea||b_(P6i9k}VgK+96R#l2fwS%XZe5Hpyn z;&1?F8p{Ae5r&R^T}iU2(tEkL>yMMl7aCcg^yEXVw0*|^<4?_abv97NNcyfrBjUvj zeXsT8x5M3JttiU=P^K%NP%}M;*iV*u4SEh2_lmHV+EJPX@_I_m-_ETaT8&7H`%4i-{bS3=(dHkmG*>)zxTtL8t|oY>#-| zU8x#l1NzuEt~TWAl!o_;UNrtFRlDlH>3RVZHoOn7z3H;@3itdL`5M&9zno8hKv>e} z;N|kL_>YV41av;fo`VSA4tmadr-QtyfBtn9HNsY_FeJ~dTb6Oe&?IimgA3aJ!8&GV zCm+L=t6k<2t$B7LG2-A>njP6?I?T#>V%5+xD4BAN5I|i5ZLjzc>xD-G`uc1(5IB}c z=sPlXpO+>F6g^S#LDWyT^yz>^Grw{64o^m}6_7ay^ zS&aWO=SFg$OS{^KkL}}&9}f15ws#t0m=5EIHnsheTq3mXMb5WoY?=!vBg2awM#T4P zpaoC+iv~bbgExtRs8x#i#6(S|BK672cP&wjS~(wV&$TF`KssGI(M^Nyf|BH;>*Bw4 z-TBa8CrV#_MbCeBe%n}B&2;BdA4*L-1d6JxUB)FL;#2o(S{-_oKYgPPR<$0~a)>f{QZcIryL4BVu!Tz`}d35IFumfPjDqs=ckr z2!m3KFdbij7}f=*Hko~+cXrEAPp02~Wc0agLQ$^mm%Oo%UpsT!UVA{oy63Vjru~BG zgH^9}!M3GwNvXYqaVOqJY({K)jS$*T*ebq8lfL36{Uvx@DOD=#?aa{=%1P%NhlT~! z2C2sW%ulBIf7`x0#8b7qv{Z@8?H_#!GwfW{>%3Mpw0d27{+xu5RO9AQ##k!MF@we{ z+r{$a3-Mwucb;wJ{t(l8a4xhvbS|N1xr-X8Q}lvNYzFT{qMob+!wZEDGAhOo24&4d zvSkEK1G-8;>-}G;{##c0Q!)@Vuzvgc)x$wS>2DE;(-}fpy$(MJ=AGlV>w4#(IlNlM zbJM7(e=%kP>G#T{NExP4S!W5IfkV9b27aWM3X7#1@lfoQPlzyYxL=xkJM7@maggL$ zhv__h81p|@M1gggR*0DY+@d&TI_Cc;l;xf8K$PjzJzu=U=G5K?wj3>)71T!M?j@#9RNHv`nOdf=*ZD zjHzR;p=kwlXLlI|7N4Mu=6dFPa-*b7{HmoYZKk5s!0BN|4-or+7+?{(CFpb@miM%Q)qzXxKJqlSA;u4_SDR*9K7}6Uhw$g7j9KAL(kq9AOn@4 zsn&~Ef#AVlT`SmLFCXF@eJ{MegAB$~zd9`-F)=oye9$#JmOO&4$uUw5%k@(YMjLrO z%{yU#*mYo>_rZP5Z^?VHXfsJV*CGDX|{U7&j=)70%wFB*0c4=?> zDo8ydx98Ht$x7(1JW#0FbBX=WAuzfM0{TxM)cjY&t^Q=F#OFd>ARUe;P$yoT5A+nY z8zPLJ7s1IHdJ!d9hW81^vBIYs zJm7bk45{`r*>{(LjXuru#tj3M`5Oo%FZlFQeIbC@#=7OMQ-E9FHdJY!9y}Rcwgccnm`;JX4(xlzD@W*66gQ(}q za<;yf+w0`r;WkYho1Xl9%SN|BVxXm(F@urR@A4Uj%bs)Z8~xJT9vxAd^B7n~XNP9=kOcW6L7<6amJ6Pm|u(?j16kcBKn#rf(e+}g0D zjwtAxbQWRb5zbq_WAmWR+47gRb18$bUHMoCYocV{awEU?=c@go4Uvh8a4P%KmtM}D1EEr^W|gn6a-?SELa@w6O1F*$A}Ptp`FL(ToS2xPqOoYQ!(6W> zR8#+tdp|s0*MrKk-rr&(i#FfgeUuVyO_sR(9s;k#1NE*NMvPN4+G_JA~G0%?%)aUezBiYj~<)$nB)Ivs!+tn9j2g@+A( zHP2pBon&n5aUD(_7jwYMgosy0N4)dgG%mQrSiZkVQXsb-0fNr)OM-Gm1~)@*aULT( zYV>#-1nchZCb@2w`HEP_s03opA{}p^E9`ItBn%b}9BkDeAl28WYjXS~(n&!_rCU1e6VUxbNQYK}`n)dZ~y4{GM z|BsKkkF(t>mhjzxNp!K-ZKHWQlI%}c0AH%}RLd2jk~#*kHF^Cd5k)M+E7_|1AbM!W zgin@&y4wcnHyQjy?2R)4w&)Ntax6)IabpSJrz{3u@GIvQ?1;|hIQxlR&&YJ&=0R!Z zIQ0}LI=Q~v!G^wb1oY<~iDtiZPq#qyz_A9RO{NFq%R+1G-8w^Y@{g-Op3t37NqMB~ zX5o&u-90y(_To+H5=>>#ww1sZ`!%1hyOxya z2S(02NZP)lCO1x2M=`MwQxS5}T13D~>UEG+V=2l4xAAUvNTd&koSMJ5uQl$2C@=C>P4-8wl`U3NTEpQ;@+G`gs4Di4>c zOS5Y#E{D4sm$4kntZNR|?#Z63%+`IYCt;L|3foB$^{nr3IbsBC0I1kyjYrPiu&|H` zz_@b+G`lYTD{Rzq4hawoY8O2x&Na>k25;~t+*ITtAzxz$L7P{m5%6#>m`x}_AaehI zoKoQyQ?bc-7XMRT}@WkNfStP7KLc;;ALci6d0=b8R5wJh%O#p>vn{`G`v%`&A)i@;y)}bQ8=b5Y7Vs=*Bvlo7mhWY{^e=XP(MVx z6Nm%V2C6JVPws+r9dfv+d};jNqC~waR7vOY>owQ@?TC3T5G%d-{#2#Yc>{>^^*=Y# zq<}T39`K1l{mUBO#k;PH98YRZYyrfq(9JCc7H~q-y(Xu=z7kNcssc%%MVTP6HrwP7 zi=ftxLHw~8S!F>&LHY6?ZfQAwPB3ky&@NOv7KFv~8oYU#vh{KV$cdzEI=0zw*o5MJ z!ry7q{iFw_pk%z1MRZv6->fuBB7QI>wk<*qVg7DODH~;oah&xFP&IeQ<;Pvgac?pS z-kI|eY<%d6ck4H) zXJ==!fj;p5;-E2Bbi@Ya$V2kS*3F2@-i90M7TXUsVR3Up>!1MRxw~%@4IO^;ad^&s zw2kv(WaN2rVM3RBrMX84L+3q%Rb#p#SQLl zHXfCP)F^XFP7JFHXny2iP&wF3e^8b@2$L;l)_+B%JGs7$RfH*nl2E2f#>K3!se9d{ z(G7r27M9Bny_caVHHkvCz8dI-<< zDlUQh;>)gbR*Bee{Txn-ev!entn5f=#q{e2CQjFbe8jXsmbq$DlhJ+_s7fegKjN!A zf&mEvHL382=%K30HWuVcR}9uSDZBbSR<}X?SuVrlPFE1it{*Ou<1C6RP&91Kh4Z%( zHb9PR$I^q+cG0}{TSdv(+9=B5_X)73YDpR{tqP;N`_qB ze~D26(Zy(cvrHGYC)QLmq5N9{)KsABs~t8wV=ru10|1kBtd3Q_sl}=1VP(qRb$!qL zYXV%$Wc6x1GY$*6H>ZRS32A333$ch880d%DKCFyfZrQlhSJ%}dQ;#W8gP4dcH7ebN z$xq*G@6&M)AR<)OX9a`!YE2FWEhDY52@w?x)4%r1` zh-u`MDR2Ac@35NMbgOynTrb{v(dn?*d{&bXQOX4&R{LZspc7scXSI+ z{_waogH=9_%ZD0F;y&n)U)6lo)m>IWvD2G z%ap1-z0Or!bj5H@pRjn8nM3AsY3b4`Gj`yd&8#b~uhz=BsO6(C>8-1vk`Clq3d8a9 zJ#2!tICkgpQ5$qqZ2MLIl#p^r7hYK!rSBArU6uC5n=Tbv2f+$iTM4LssC1DFC$2Bn zXX*a!vId10`&GqRhHu{KdE3u6J8-(w*CGNyIC-cHz%p5c;OE9BGAy-qZdTgZ1eRQJwkDOM+I;AO9JfikWT(6(A=_VBL2& z1Zw>)sg&zG8t+@*H@dyI96B5ubCcpoKLN0q$ZLQqeMF}bQVU7L+j_=Mhq~W=)b~|- zv&QcHb9>(zYaN${yP6_(twg7mXwI2|7LO%@@*DgbR5jNsG7XEwbQntBcGw|oTO=UF ze7~j*xMTFn{9)!x=Gejc!OkL_E%dbY?(?o|gfK~z1=1rBHm&;-W^}FQf&%ejr8OxG zHChyR;3uu$gsuLDZT}mrgWGZh$ zZr2_W-U~T=CF2IM{O3<5U!-k>l8pYeOu+(c(VYKu!JLaJO$YQb=X~g%@ZfwLrpE#) zLy&qmWGBAzF~ntvpVVL3?tynZ3lD_p5ZE(!c3~ciy|`*`bMcvXf>)qr%3=2p_?-pX zW5|bq^!;MkRC2*Gn@*_2{sxru2NGUklSZ^|^7P6XjL_Z|4FX%}QE@1Usml1eRHA_t zZdY$3dAh)G+y@o!D~~%}t71FWu5|g!UtwRfotALB(s;eV zDl&6`QzwrD<8j!teA~mZ-?me7&Fdzv20-e@?GJ@HOxPtF);UymIM3Jrs#7rT^U6pV zThxd<<22`E62bBfkm$CT5`*uZ$!-0SMO++OXhxQ7vM!mvZ?G&&|wo%F47RArPqf=E@Vna5%+0 zjR33RqK9kD`TYfd7nGbEeiUZ!t>7I$hw;D+cdLjbqM}Nj!{%dbQ0#@C#3^@ap4euj zsYjyT;!2^ZfoIe^N#d=>VXg`K<%)$Ijfo?w^Haxpj9!a5Fq$V42Iqq;HVt-3K}?n% zsl`M~o9aQ+j&hwFztvssd%G<-6DJZX4O7cimk8^RiROEi0}pp;dIAkf*#S(jn_tb) zuwX4pFzOU1X9C?dim&XAk)JavN-uD?da9f8TL+~)*{>07X}{2$7lY|;eUZY|*60)_ z^d<+}drhenY!N39CrnCi)|B3xf$_uO(&Djz#DHWWrSvf^OSZWF{H>01m%h~p+%Nsc ze5OrBqbbqo06Yj<8^%>e?P%v!c>P)GGO)pxsSf0wH+F6?PfXCcv8L{7iSC;J3|^vP zq%fB0@GXc2lCW4zypB}RL6`lqmtY5+onpSvHqq;=#UxLb5cWO*!TDFq;0Dng#*;U` z>~Ff4ksvQ3zHFE&zaLV&?G*H!!R^F(4?IuYvq(HNW6e1_xrnb|t z@5)QDWsC`3MWLbHw+yW<18O_j>$hj3$Q#PBPmx${7B(0BLMqn|W zoLkw$q+~nky+D81m1{dc;-4+as7Q+{iWot*V+u7@Kt%BW-(AY%@y_Ymu1U-n%vk=b zx##)~Uh$&6VvL&UQTIR?s|&H%{CFHDnR|LC>j9l0BO3e9vBczussfV}Pu940Wo1j4=PbyCl&SYEES&Yozz=^*V5!+?$(LM?!~Q~Ghv?XP$GW6Y zE3of5hoELZlh|BHj(~*yHKwsEp@%o=>l5{l6eh%nt7^KBGfHCFGLD^1{`&RngG2#m zs{bq%7=*&PpF)0!X(fv`BP#`TX1aKMU6|3Oi&pRr3ufI){#%LKr4E(CKGW7P*4;!# ztWErA#_9EMzGH3;q_0tQl0}o@a~w+^^Qu{aFh+6Ve$KI5Z0~oce%~T3yyG_VaCq{* zV9jnGy}X6Ao0b&=e-B&tbm|F!udI2_uWv03N3U0wAO>vG;D%;srE;stEBJ4AfKY`R zA6Q7gF;B*O9ARAe1yZhv)l*Lyl=e*kzY`44A0bkEq6DGvbxY8bE67qSkP>sGH38%S z;f=vx-7I4F+%4oQ+LPLU!j2HCg+vsUy4Ty+`ctiNlWTP=%U|Z#ZHVlnb}tuYINg_> zq>JH++nXn)HK0z7RgzfWD%VhZy%A0|(keqJGb?CKKDOuUhjSL;5{7 z031i+z7q)&OKG{~V14h+7pG{ljFmj9gLZgMip=$Hu>)t~lB}`W)RdBriFPOhZ&sX zP3Tg`igCEemug?XJvJRHH2k?#o`l@^FqDBUGOQeTD;UuxglIb>TflO*K<}=7X0pLz z3%DYm zsW*I>P+~MvlGQf+V`i8~2RS{DV#~ovB@}$!?2;^9JZA*!qN{yR!w!Tz$#r}gx=_}h zU)n+RD)CO6F7i}(D1gcD?Onv8(?_~UTdPc{Eub+i(tv*H=f&@NvQV2|q&@(vBlt{6 z4h89;1x8e%|AnZo6ip&>XeYt6Ij+*)>xW7uajJQ2CaqwqA`E&2?c|eL(pBBPyvk{7 zn5Ge+}FLVNqZANfcEhlwB>Ijjp)1 z$dBOaGYV!xMih;3#4jUF8W< zSHjx4rfFZXU>1k=LXT?8Wd05}E;nfh{84m~GxE7Lmd?7oNQje*}Y4-bK&Y&AW-*i2Pf;+t3B6GS2Z z$stfADJ(hgR#!|ed8}8*11&UFz*y?pMk{*UY98j5x!q>|ee_lLNa0l-<3p#mt-rBE z+Pc=c^y*(n7Rrp^tH&l~t32Kg5<0^gYt0CAdD4$vG0UIcXIn*w>I4le-f&!k;=5b} zVAT(KpHzmFjda*cI~O)+dt%W!yjNyhY|K5ScbXoTLg&*g(950d*N1AREU(3RFeQWJ z)2#|TK5+KzpizTrwdQM;*{q3{hR^n|=Z2ckVG!3UbVeX9gn*&zNKx`cygd2+`*$^6 zU4%wNB-G*eT{TMZ!tXu3%R|-5Z$EzgP`9_YfAH-&6=c=O08{{#CuY{iVqY>W3%GgP zFLqr=4cYYPN3mEVu&JcvEr#P42qT9)@!)tFz;(vvzICl{UA)-hF(eyz;6}#C7l2ZO&hzGz=tpDT8dz6I$m+Awy6n_w#49oSdAKQmR*|p}M=Em4{(j!C4%Y%yx_XU6(1} zWkA!fsHikEs$^w}JaB+nUh(d2EeV4!5Mip76$ze3v^t+!d94DJBj+A#O~JpP_;5+B z&LUi3q)h!qlFSDPzd69n{oSfxL2DSpV^=ToP}HbW@Z9lryIOy-J91UDL<@_&l3XRw zXjBsphgW4bRqb!`mvS9s@jn`&abo{wHy=F(J+J_JY~b&<8?~C!6xsiqzhd&phelyu z-us|C=d)P4FI0M5F^EdBdfF)cZBGPnN^*W}s4p~MTmDW?qFqm#K*pRA^xGnJSJHAVq}d8tSq{xhsvxpH+jDvqwdGE zV75Y1TGA=>700T^z~em|i%j>E0lV?vBvkr<#$O1?<62up4S>o|wMPe;JorXUPh}^A z$zn1xDZKtS9)4{~vmqiT^~y*0Yft=E7@z;a=H=Ip!UqyOra7pDN4exxu-Mp85sB32 z`An~+qmu^=o;$)-atx0jKSNNNLoZ8dYcjI*>B2#4?5(Y>E`daAs`>D5cbIcAOVkTb z8G+%XJ7GBd{X?wJWN6ccFwn$dpIdgkv%PmX_$bSnGXhX3%Sp;K9PqmbI8Qcrm1`(d zN!!f*{=9-C#UC@CwuX!Q3{`iju|-vx5~ za>p_1Qm0i-2JNIl&PD^YF%z`mtFU9lJYA=_?dZ??`P1g;h+L(N!a>?sWI`~lR?Yqh z9;B7$SJTo;2i;C}?+C|A>HHAV^MB#~jq;$pM_rb#^6*F$zp$nTSYLs%FIE?m!<4tC zeI@wR9${slV5K+l?wr!%FVU()JyQEygzINokjvVSoBLZ4Y*AX^0~4mtcA*w)`0FKu z#?J%qt*oqK6A}F0+jiMmR7~{v<$uk(3Kxx*{}~2laEs@C$0%kKJ*_Gi=%1I7k)ib9 z!OuB5#`N6W@#FVUl(lT@=eC&MtKwX7u%GH&yA7s<@uILH^+L+2Y}_aku&Q5_ny0YE zI_-~HqZLgZT#Z~`rjH}zoZn$EvZ z-2azGCmMzJPgXyUa9R#u3V(tmQM3x+X=);an!-TV(O<9A0UB;rK0dzpLw*F1X(K2h zV?~+&)>U5rHlubHEE{(^9ou%6!pYlmkNpTJ7;u2Ru~xvuCB2 z5oQM6Lm}eb)Z_vpBdS2xlx1&Y#6eJ0v>S0ki;7Y9C*Oni`!Q3RNu}U0 z9W&+diPCJK6}Iz8$L6NHka2xH7A=9(NVcZP2hdHzkiMAOAnOEubt7nnU17(~`RTvW zp#9bvpOb3Vo^RqlU}&4q|9Jn+<^GV}H@j@2cPRUCe3KUAYdqYrw@Dng-M{D<-<$fSGk|QAe$W@N+R*%l2%j^>qRJ)<#M~*966&qK(6+U=Lv4Run@{hn_l62d=`%Pv z9iR74`)|>h%@6QTCVJ-AunOs=0|spCIWGeKtvGRq zihGbIw5f-O(kxnvHVs0%KRI_Z8sG9NStRB}M4SSr#mP~Hsf*VsYz^=Z%}7fl>1j#> zANpS>t{udDeS7+`fx&{zFu={u&icEH?cCfaPTyy6nZ3w!MKbq!F!h+f2->&__h|cX zQCHyYQ=nC5iF|ErY^)@=k&k(11QN#FlTGcRq|W`bqM)xZ!KFu0UDWUjRWN)l0rP*g zNccYh{{@<;s3-wGzU)#))Toj(K2p_$!ewDP#Rt^iSz zwB~V=C#|UB;)tL{A76vR9+5+@1>BBfEol|?9zF-g~4WjjB;XGV!*KG&}(zb|I$Q?`uo#dXRsLu7m_ ziqH4?bA+S4D^Wfrnaag1ho{oB1Yv!ANu*y+V!TIURqy%K7ks|@)9YJT{JCqiWsoGW z1APuDvc3U?h4PuaAy=ldB{y&RDfYP<8i^6SXp*dHAhYZeKGv<3Ly7aoOw~5S4Nhc( zu^&-1>DlB3IN#GME8;4_V`gh>;dog#CXVqbyGPyJ>Rw;5BWwv{K~_F`S>Gv6!Jh zMbmQJ0cb~VHQ773wX%)xDtVKLaI^^O4m8PUXlQuBLOdpKKn_C6+_iL*pbM<3$c!*jHP)0_F7YH7)(v@>8Lq>$=4NXF1yFd#a;>Fl$tlQKzBk<@?Evb zpC7W9^_w;oA3e5S{fyuYRkO4ilu(m(lWnCT)r)J(y6s6PY|I-Al7a=(r#sQUZL%b- zFW=s+5U&W=BNGGa-6nm*Xb zUl}1U92yL#9ZV+*Dk1>Qb@dfpuFQ2#=X}>%tMaaM&R>ozt#}<)FCesQ7xYNwX>YAV z94>!t6-19-xF&8Xm&|g8Wkj# zM?3sVwk~DtYb}3f5TPntS6|#(+b(XOKNw#AuqA%pI&uD=8H;e&wRmJf(w+XGvAihp z1c9gMPXl9%;!-Z3U6Bu^r0g`|gpGDvw@-)zzp!4hH$H47JgQrf=W$!$gIcY5`%W+G z?TxH!mbuJ!7aJ^Bhe@x&tQ&lkWG3GcN$e8!e-wHO&03%Cm1@(K$F zvena`?=GQ{l@XDVuH6c(y3aWL4COSKsJdV?h*`2~`y8g)m#O-VUfh=Q(zsf7T*lKy zb0hI(Pn&58Ptab9-cK%>tLJ{BfrWW@&wcuv=bAHLS~Q-SvFI%je1|*JHN-Ba%?v^1 zKz+TS%~ekfJ?1EC4lg*`B|Pvs9Q&5kfBNnJ{>GIG#NI_~iI`&)&4jY*C`a(Wi|}P6 z**vTz_EAqg>dE!LpF5#W)@sVxm4S=sf4^m4_orNI`~g0uV&rX29fAMz!*2j15cx)r z=?cY<{J&rI-(!wH2WA*=kxco2vgQAJxVs-4_YPEE5`4(}TYu>Q_)%0)xmzgz^ws|Z DI%v(1 literal 0 HcmV?d00001 diff --git a/docs/core_docs/static/img/tool_calling_components.png b/docs/core_docs/static/img/tool_calling_components.png new file mode 100644 index 0000000000000000000000000000000000000000..582fd7057c8971e32608e36c974e91fe93380744 GIT binary patch literal 179296 zcmeFZXH-+`_C0JD3xX&hT@+NhG?A`=Q~{-TM0)Qv1XP*|(n;tMkxmFb^hl8oq1OP? zI}ri|2!VHV&bjB_G4!7CzwhVY7tC;sWIucDRpy#&uKnSKiX7=>+RLX-og!6`mr*-) z>hkodQ)l`~E&%^?hF9Df_~*2%nw-?Bf^PcdQ>Sj9QjmG7;bpWoaVc4O=eT7PZ3m$t z)L~ifynF!K&5*gHp{N+@w_g3l|D&2>to-GdX3$8pDQF=1D6;5kaQ&^4Lnc@{XhYmH z34}GUh#$sp?n(M}RZii=r?5C*Y|8Q!!B0hAQBm<@5BZZ*r_cS5f4*71C6}s@(C@we zKY#VqY3VoQ+;pe^e|%B$JjwLIC0Wn^^|ZkKkxUm~{2%Y}#4114-;fu;Ub)Hr?(uH0yUBlkPXG5L|GRzu$z7c&ZxM%OK2&;*d8NsROZCk{E*4x?-`>B$=h0z? zFdgPHQlgE^2Wb|dKa<>kbLz~0`SV8k#Ib)9z4CHzyIPQK8DGzJTy01GgE@uc%h2&QNsgTl5sE(Rm{$Lm{SQ-9CCAO^3F^ zT6yyB`{g@X%a1{Xj7!rN@5jHf{C8_U`9XV`yrDLD#NhI;$;ye!iMnBMr5q=!Dp1{g z-zd6jcD(Y(Ag6(Hb16+U^9zI~2_B|iZ9Wpr3lW6Xc(qf!$fR%x9-(-C=GPzlD+9l! zGkQb5LFyl>d;1sjo0*;VWHv~_Vh!&pylwSu9)L3Gae%36){DbBZ#B4k@(j}x)qT*Q zF5eI24ziP*ouvP889CrGpImu#Fa2^EgOD?+QWe-i|7T;`cGme;KUPfmtO=aZ!RrmR zQc@&%5zO9mnt%A^Cnq=HbVK*dufL2s6V8mG*Zso6JzY~2Z(&Z`dS7``@ThWdZi51R zRI!ORNi&I#O*T8^XU1^gQ^+@Vs*e+dfBBuieRH_v6fsyVe?i|4e%JYnCh=+zj2j~*qz=a^ZreEvE**y&DK%Kit=1s8!pl(v!@_+YF1jEV2x0d|$H9bMOew^YFNb4#gIGEAER4 z0TopJA1mm)2Tw?Z-p5}J`ZXN)IDXt}uvf3kSrc`AQvQ97FF0XxT7T4e#n{`m?aAz- zicnb$X|MnO&4vl`THRDaEyAF}w|6!+wGJL4rkh?JDF41iqK%Dht8MH$pYymI5?soM zjonTE+PK})HLQMR`lt$&R#W0zsM|X{@vS!3GtSaRf<28nEhn0yK{@XIwf~YIPEP7X z3DjGo^BTc2zZO7WN~denkZw&W*+b!>$He)u+E0o_uJ}WfL$UnPQ#8d6X1UCmOXHax zKLm~U1%g(Ba$E8C63`xU{8P;Rg<2bp{GFj(stSc8iT#Mt@A@S86mzDIHGk?o0U-v@ z2JuGqbtMuZ%{wB0yUDNT{H95MI}k+qf`;N(NB*h#O*oJsKd7?xBR}g?KlXv1v`6aL z-6Ci=tt7&4k9t{T)f&N!0UobH39PEZK*Dp#AX;23Nd_jlu{Bn`V-y`8r&mpH?BY}= zc1#e3%tl=w&CrTkk=?<-?|Zc7?D2CidS9L{5H5b{gZ7{3Gv-)^bQmfY%b7v_Rk7|l z7}3IB{k|%L8;ImUC-8**w)C$i{8z?$`wB=;q?z|jnSV`{ewQDII7wRVRShrc|#r2CYAYXAP4(9M8kr!whkqu7Yz1nxSvivX<!-@&VZ2V5o& zSZzWJw9htZH!DF9NAw)&$B9F)Jd)B&KC25kHN$CLfNZNR`v|_-keYTEvyR6I`MBPRgDk^Bixc?k8$>4CYpr@2)Q}os_)=bO0@i72f{HS#+of zcmM1BoWp`G;4Ejg$zl}7Efc_65&}xaL6)wfMpA0;GQK#%^u#l?cM2FQFHK)Cz0Y^y zR~Pu(sCyhwNQRAh4}N3jr#DZ`JYA-?{^1I5bDRT*TG!PypDWYh<08+3^XaU+3qXV6 zTQ=)i6Wt!G9~icJc+d$5&2HnTepQ}<`#1uJBsAyP;L?!|UBAWq0ZxOibsfOHk>KG5 z-!4NEB)(=?rm^^KDZ$Z(1B=;t*P;%b_Dj3)uW9CQ+ec~vXvLax&i6NXh4iG{rUq|Q zH823dL<1l++`xT@b#C;K@4)&CQ1==%ydRW+phWA+EV$7+9v#j!92GBpyNp}>fcIt7 zysWWR(RWgMj9sZ4)+MSV4#8WnH(wcRDr;+AA|qN{fx|VmEBw>pWKRxvZqDsDhvPa^ ze|1`AN?3bRpSLNw8%(LMaD2d1os#P6v=jbCa4tzJpsVg*k5e+jf%pMqG^Z7zq|6ma z=q`>h-$h+H^JPV|Hg7v;uz$8P(CO^!d6r+V^LIGOrvt$+aK=UC*C!uI_4ZY;laP!f zq-AkT8aa4HGs@XMq$*`{j-h0(QM0jgSY%>AiDrm@!5A{t)!gCq7j|zO}!#TCS1aQm5;%+%aqILfz{lOciE=udp}iDNWMTUt{9m z(Q#V!OrX;!-LEEnB69g0e<0|vLBX0gC(m5)O4T9D`MbF4{GUh5s;wLRgw9!>iC%ri z%8ZzOTm!LVeqM5t-@m~4OZipDqqq7wBT9>3dz-=+Y@dC(+*Z?lgA@E0#ItA21G3Md z7h0QwiWXu)tS!2j(I^1Qz}TGU=-BhvedCAn?yqO_pNK(>l znZLFXsrE>fjT#OsN5?+R3rT932y#LL$O)*6tw0x0V%!+*KHpzWP>`|mO5FGs)ImL8 zz_cOQ((?+NtTLM+uTd}XsiW;5k1ccp^U40(u?NjNCrace4YtzEpOt;ofO5xSzcoR zj3DQLE25E+eD1HtK zi|N5jFv+NQ&h3yJqH~DZ@vkOrA}*>yRLfp1l>)zd^4~cO^8j!p$5_!ekzd^HN7YRd z4-)OX>GGtl_t#(VlyxmVmTjNoUhV^<6Nr7zK0L7C-jx!CHU(K@Rb~V5dXzA_V_|mO zmsVX=Tp#z+duC`OXZP4{;sH(d&Y}jPjBQFo!fsDCm$>xz<^L`O|M3LA-UINaAhXeL z=#GLfPYR_gLM}93-Y%H+Q6A8m4mH#-% zFNb^c53WF~Tx0y2Cq>Yrr|z|qH{yuH$gMm>5yl+-k= z(AJ>ATZsv-2Les6Y&~rh`ufDIPD2|dZUL<++LIpj-;K<0+{mx|s*qDXmr`vM*|J#2 znG}9(U$em#XiN9xY(sz+88ri;VLp^`@dZl8o@VkeKw~kRLB;4XD}syhHeH0h^SH5e zi*kp+XNm&VM7Mv!3j+XB&b-&O`s=-abLaj%XzAH>$f3a2rpB)A%toR4@H3^Mib||u z)@~~RZrEVRu$!$8WIq{EPHGe}F)ZwN)Ldj_K3)KW?P(h(s6j7OzQdxL=49GvA%i3r zp{=?a#UYkB_gf{#zB8;q^Yi32cj}|?`Sxh(%g-nuHq;CJ8fky$<|nsx0Cx>wZF~Id zQU9H2e*Y;!_6@m1X5}E~&HqTF|8%YYyXx-^`Cm={e-{2FhyL4#{{89yeE9##um0b> zsiOSMNdq&9Gsb)MswUQO2wv8#EsvS8p)vYQ9B7&IojGrsDxJ6;1N87_d%^_%`tKLD z+TUnOe^OVR=j1jWySlmPb})JSjIYGxZJ}uMf9HHNAE53h_+2^mPsNJH@P~`bmo<^H zmUs&tt9VQ-!dDhn%!}%}U*2&Sb%>rd2oa>aO{zFl$vxD-f=GD+FM_kM3Aa`J__@F& zPoyYAw14+ZndE@RaMPJWJJj`nR-~4F*qPDopu2Kw@*_w6I7ZjCE!sgH(dO&!`HH>`0l*;5IFWKVRGn3V z1yG7`K(9tayq1YREYKuDo=~Ib{YGb`bF>e`ZHs}9v)+6G$6Jva2H`yyt&$pBVclP& za&OfhsZcIj(6y-5{KAB4xa^5qbaIk};f&vrZo@~aI0G_qmQe>3vC4=d=FeLG4XEtU z+JCYqiJ8;eRY9|b@Il^J3M4`Sjx}zHjLaCfcPJH}j>TDbpV>3X9(_6L(O!E_Z!C?4 zxdbd#K0Hvrr)+Al9y2Rs7ip?0Hoe1F3G-Fk65AOVO#L{%8KaUM*|cPZLNFwXSIcN* z|G;}EI}xuK7*uJ5qkg_KXJ&&8UbZzUOI`t3bp>^p@U?<-J z!)w3#o3{h$o_Eptb4oBd9)i{?8gQt?JNKu+Ldv1xb{L(@*Jz?Mc?)*iXv70S9*17S z#8FE>ezP1s&5z+s*ptJopXoztY&Z#vb)M->BWdef<0;yRl0I1s{4z+E~?$W3|F8(a0 zIcoyoE0x6NxjEpc*CI#1H+fwmy%d*kqI*UPK<>gIHWe4ku4Vi_4V=PZEyI<9GRm-_ ze=ikJEk(*>6b@~DzqXL$vj*T53d-xtI45Y#4OV8hdW0+e#bMhM+GK5d)VI#!q3Ft? zdsjG!jX%T*eXmKOfK&LL!Wv%ZVE;4djwR#)YKq!=BEnBqOXeMMKO2Rbjp06jnA#_} z1n#_k-P*VNpr3DSF*1CAC>0Gf@dmd(BP}Cp>NsV*5G;Nf};J=P= zEqr=bLBfBs!bxViPtERN`}6Y|%v_s`P0#HPo$r=i(LX6z-|FFm0Pu2Ln&y(^_)FH zKdC&!Koi_{wWMMKy0BOD_0q+!`NSwLT$&M)CRqms3@i z0Pbu}*&G4)wSutTTEr%o9b?4(Ta%P7=&2YEfve`8FFI-7ww+GizP==W?2dEYJe6O~|z5tS*0b)J&?l8WUi7}gV35qw!@AqKd9TV5HA3e4nfOZ)!V zr{7Dr!m)Bq6!bzMn~I9|9wiPEH#ENik#ksPVWx;?*YXMMgwPvsF%=v%#Ovo3f0|Fr z#djRT51&IA3`}{!`FhmeKNofxhkI;XJcXTko4v%vSUO3Tmk(^Ia=6#qg<@ueLo38R zd*4#>S?xfZZBra*{pS9Kj`fHajR}GKhoDvmkZd_w0^B)3xSu`V3?Do!mjO7}SePfdU?H#3oa)V-^kucxg zIKKdXPZAobCLBtGPt#4fpqBnZ6RA$Wxu}y+)L|5aw#Z6xHT&Q~SH&4slI=Twpbp(U zlM--E0K916?f5;8J^6%~S6D|uqLP|BqR-~dX&lnbf;A;nt(RZql)2UJ*L}vywVWP& z)x8najLmO!?v=egR_WU8nCe-;)uPI@DndEi)8^j+V-OdMV{HVD|w17_fJtSZ|*NefMl2GV#yu)Gu51a z{s=3iM=dxlBTy^{yaeptDsNy?nJg(ApGBDQY9_9XvO+@xvfL%1Ea(2BHJFxGPDku>DZX4ongP9>A;iqwARuO)Il-}vN<7Y zcoL6bb8V3E)!6>KW(&7`2Hxapp>|5C98Csjr@Z~miZ4VNSEVC64zdn$p;vW;2ObyH zFUC--=pJAUt5Oe(ri>l{w*z>zhHt| zt@cOPk)1G#U^BAWwvV8B`pi>Vx`}pom#P8m)_tXTd2fml0iT|apu@ZG_ciJ(U2H>C zQy&Og5^Kx9j%z?&Pn>6tz5%X1b1eebSLQqR6*hj#@-v~g{1&N+Bpzf86FVUZHM))K zGaMh{(^YTmBh(*CG6A$y)S{pNIP06W$4sQsD7{5%WSsp@s3K9@cg5Xtz5^mxXT#eA zPn&h`o5l4q;lp~g1AG-)5a%*d#uKjJooo2=Y+?F!%!DNPPX4Q>m%7%8o|>{cc{qAL zH4>raZEJ<_%B_r>`buIx8sU=1HT$mJ+=eWRM7;h%;6fLN$v)0f#%N(1_HFR;^w^uS zFRt0dTqSfFRwG_>c*DcXa^r(31s^Uc_2ZoqFsz1E5{DhG#q&+qI-E&Ah;%C$4?J9s ztR{eJHGoV=FCAgCXt3Vz-EafDuag!Ovi_lse*+gOT>rAsFkw9!AyN>G5DZ+$ z*u^=P1j@=wZJAfb-*T)$Lp@;JmC5Xt7eSH-C2{X|_Omsc1$$NxkDT%QyWNI7twfq>wZZy=OBB?VCg;jiN0`ZoTgjEFq~Z(Sq%OkT zhxNLu`tOd2E43`^9ObcJW6gk^tFiA(v?>7sc>bAmM7Xl_ir(nx5c>j`t%e)ttpww1 zsiJpOJHdl$%I@vpDfyrpbFr~3xzPOuFB{xr|XCVog= zNk!pN!w0#U@HSXWd22T6o=)yK_ln4~EfI}#9@nP1S0xOK1I?pEX4l@m z!hyqc?82cn;W8Pi2?%#jyvE#7oBj%Ng^F{!ha z#)?_zx|Mu~&4rEksfxk-sFW;xYkZevf`m^LI;rmdC#8vbXYp{9T%Fx!n)Nc-liRLN zNTwa5VK%M)ifv%f?QPc`YSE)3okTmitzkboMa&+Kue7b()<61CIlwq5LN~p6=jC}r z2rM~z#>4m=;CZtfBSKuT2lvn@hiP^t{D!ZjkX2-ng>VKFMIROo?^EDw*aW3{qW!#aUYu%Avi5ah6;5jAHPaNBk;H#$R+QbEjDXU1>;83h?VIlPAM zW%1W(s?pM%ahs|(XJ;3`4TG4MdGG}%lJnI#2PlSE6?fWql3ICn}IZ zGz{$Qee5^uuRb%&n*ZeVB_2JEVj;T+y6oD!Rb1Q!25g%g+8+ahKL&;<$;$)=%E~Hw zE%!y2DN>rEyloY-RN8`qhAwYrUAR^Xn(Me<398oGL%P3zpsi9om^<(Cp@Xoia9HJT zgn+Mx+4ZrM**kiR?;m-ldfP#S&jS$x{j648bYb@BB?&cW5wUsOaq%@ zV&)r|{BPlu#XLH*`;s!G&bO(-6U|T^xG8c!<>c-^LRP2-00(qOR3%S-+fNmcz3tyD zauL{wA8)&+hs&^&sO$1o<~Unr8^Cj$sz;q7dgC2hH@ogSq|UVu$?i5C}5@9Rjkkg+u0 zK2yw2YsCFpxJa4!*(fK5p?^5!5y+U92n+|E>_e<~K|)nK_gz3y`dqGJ-qW!H zNdw=-9^sC#GI^aowplZ1`OfeCU#3^`CIF#=maM};y$M1&2BQm)NcSjUUr}{8A`Jy5 z=8`3ePDpkJ19oZA0^VE92A2HB3qLyi-29Adf%%)5SBd!N^>w3+a;6;vQe#^Z{iHg? zu!LC4;wx0DYuT>pZP3FWv|KIZ!nD1nQ{Ein!3`|2oiC^obQCEB9XV^wB(%Z7)&7W& z@O;*tX-~e#(7|Y;?<$PW}~PYtrd^DPn9u_p3dlJs!~C zZQJEM1&+oJYO}Ib6FzR2AF*A!{#&caYpCm(iK}IvDNlK;i2545iw-6oFT{AeqLNjj?4_iKvnr;{KUd z4+K?V5)^|q`*EKO)Rji+48uJ<;U70vL1LCr?r7}JuwfMp#xJK*hYKGz*_MD9ly?p* zwPnEd=bQ2nihaEJW684&@{7?Wu9nxyj30=I`qd;Kt?}a$S*bW9Zy;*#L5~jm*TwE6 z2|*7XQCw-2F8gAtB7aEA%v4UuC0t#J&F@2k z)7&Y=S_^b8o{}FvzIW4o8|-`xAVOiM-9oF!W9o5rdCsr`DEY>qj;N`H#M>v*7FX~* zQ0TE2`kemW4>Kqa$C_A~Tq$iS{F5y{=;im`p)nQjk^C^QYS6-#-&xSSgFX{{0S+@C z%cah}Gdp%|$18?U3J@jSMJYlGNjm5y^@S9h9f+fkwIoz&62_+KP>;s`5<-Nz9t^Co z)#i=|v;h(;-_fmBCPRcli^cRteBH*a21DOh88wB4Fy~Y)c*>-6*!aFUah$Gr<>!T2 zLH{%Peq4{fbt8C=o-AMu?UE-xn=#M~*}dtI*Xu=gp~(_jh=z@5QNJiMEyGcN!7JJ^ zy!OFf$?Jn8(<|TAk^)>wJiG3?hOKmV!xs@eZOx|I?7JCSDWm%aj)a$l6{BHsM_bJ+ z7#3Q+y1c!02zz(xPrg3CtJ}Df{ut8Uuh(YGaUfd$b&jQSH3Zc4d4nIh6M5fu1i$zu zPWsu@)L7}kHr8-W)7^gdu|9|(`bb=Y>8!7)Wov&QMZ@6bgqBpV28tisrLFJ#GBSP& zP2HkYnkbudU7XXwPRhph4|r114XWq927)wa_y}{`gVvAl=fn(xoCP^-WRFz0+a-P%K|D=O}N>%vi7840G%_U*WTx7lHpWc8idjZ!Q z+-sn;W3^eYuORJkyPSM^4t072>t&;|*};V!@57A}!Yx!l*$$vI+RoY6$tCm}k_bs` zMn5N7h96cflBIvF)4~lx0NK`XpmmPFRbNcum&bUg!hV>+`F-eLyS&VIUtXs zOS5UATJjs<-Nwd)GIt;P`HKoopm#n_G23?t#f&|(t(fnSC-C;5TTU7=Zhjcwun@e_ ziVd1o+TpXKbol^eq+h+&W%fiGG$_}?bGxEi&L^A?(7;t$ zs?PG_!?`$(B`)z8>T$1{Vi|JtY?$EX325oatY z^4_W~zWIu~15kL{0d0@4g$+TU6w3ir);>=paIuWi2}DXs!;KG|uf^H0j(&+TXxtQu z)Iy5CKo*#HMu$=CE!`ZYBp6SfHWFsA_g7NsY4Stq$`$7+HAS~o%r zBh%f22-#z^b&`Y%`>Ju<)tbywCRPN`{1UfNb&25H}wHsbMVDUwpUE4=Fp;k^B;~iz7jz&$<%G%^vM(1nO zN8WaMVuHu+{6E+1>$EunlZO8AbHhJ-1cOTvn(f*u_mHS9#(oHdXhkP2sePe<(cwb+ zgzk#_R29x&Ej?1uH#Tb5^fN3%=~@9~(H|+v4R#)8i@u*E=$myfd@y6jG0stc{H!gl znS|Hw`=`Ty4fnaj4)BX-`BMcVR%6a?E9r0O0lkD#7yht}QM8A=AoyAYz>t_%?N9nB zxp*94`PtdpNZ3;e%lDOFGyIl{UTx73Qw3gLmsYkz=xi#8#dxaD;?33}0@dDD2j*I| z&QK;5@Ucx`4YcKd@X#t!rmWC^n#Ez^`3G;b(S$J}POnx^s#u5P1JiO(J%!oePaBy8 zKJHR#ZFux7TEdo5h&@~6yjlQQQnhYBGZ`j1#Roma)`2n(99rXe#BE;S*T^Jq9b4K8 zGx@&Qsy%eceQLjsIK9vT!Yg9JXefm26W!6>srKi5YRdvhY2%+7-MuX_K}1QP+dA^7 z5vmJz66^%+60ltz%icl3WYB!%qz%1N_@}s8o;s^by{ojL==kZGdHRMyTnW&p_>c*> zIqtPIwQf=y6$XNEV#LxpznKqDAf0~-))HiZ$^5;_Ud|i8o6Nsm)xFKdj6sDTbBQn3 zt?_KSGHvr&pM!IpQEsDcAw$%s<1Vs-tVEm_tEuZbpf}p(O`}n5kz@ee5pj1V8oE&(< z8!-&la^t4SSjOJc|GvKHh-Hn?T%GZ-0aSU56f<8pBT_g~{X4i^o$v;V>$KcOmln;i z31=;y2$fBlzSU?TZKn4fjkt_!o*^@zoO~7>RhG#~vrtpLSbG*Gq4JRXOewf`b|QWmm(yp( zUFc1vw=KMVH#!#5fK3UXLw$Y#{P4FgNMkWb!-jVLdFkQ1(M@5`Le zo(UB6!xw=$pN0-+;K*WNpIZ^{1I=SpNa-Ie`W=mm8T_1$U@g|=S3q-+RUd1hbpI?j1QLsPR7-d z-oU`kA{Er7c>V98>VHi8D;&-PNYFk1g~i#PSLwkX^k>z}hZCXJRvJa_!&FQ4$9#!hb;dl>pju5tPegL$1!TA0+?Y>QvkAEzfcHQZk@km;Vr4 zF9pO$ZYeuq-d)cOv*eA0DCtt}3;JVUdxMLJ*LP_f2E~7jxJj0!ye6yi9rt-Zz^>M{ z*5~3PBqTS0vs7fy1xJGz)uY0e^KF8?ulDy9==!eadH-m0BL=+Vx5be3}&MA>T-|=mHQ=9Q(xB zM^HA^AF69V+HF@mUgCiQW^HtVS(VZuMg<0uRxxSqTOFn1#NJr^Sm|x-qu?U$HjmGQ0|>_zzsBZ$uKPg z77({LhYxPq>Gyv`9^6{Do7Vj#4Xmf|S>Pi0d}lo&tmF+l)>n-BRe+(XLiz`WF55p< z5*YBjK+6Yv(Mt8Iu5wA6{lPI+C4BC#b1BwxRkm#ezN_f;mk>9p(dxV$d@fu0?1}te zXtxBO0w@z{Ro1LFT26+mZ$C|LFZJ80Q44!dD~{x-!c+V5e1`KJmj4KxVcz$LCt^W+ z?_qWq&*Qx}G&U&OIHxi__f^=sNwl_&BrBO_*+r7G&j!$M_nN$*P9C3Uy+s{+R;IW5 zcN1i(i8aP!_Ek!^UUGCjitnkXZ{95F;{7?2>8rjEbTr&h*Q?T501m4`*yLp9l)dV# zhY~B+e$aYvc!ItUnsi-|4~TXTTh4Ho%q*83UxnZE6guAMMbi;6MP+$qOyHCxQxs5C z+^@Fu2#^Qqish!RhlXpnDm;-x z+k3uOo5a#nZvgul!giK{hxAgV;kL?^PEW55ARHM#J}}h#s0?$Me!nrQ&TU^T`Sx=Y zrjkOdqENp$olMfsd#+Y%?kdO9{s(#hHqk+NV6l5dPo#xq%cq!r*tqn1P~D+g@sxL# z(v+#H1luO9%@IOayK@lq72iJOf&n4suv1g!$v|ZpaYV)O8~a%sNh7j`NoA`Ku_%}Q zA5RrToTZ|}8De7R%r5b$$uua%M$V$eEsgHv+Uv9ZJc`R*N!0`3bu&WuZcD1)Sh7vG zN(#%AzGG!dl~&G>&0smCM2Wg>k=$o!7HGx~ff&CQ5{a1oMz9?c@E2g$i(q03)Bk?! z&#=ip37hTaV$R=&jRVUcVRO2Pz+O%P?yPcM6)Qmk%ub6~{`6&1-N)scAwsSFDBVU0 zlxG49ZIip*2IKYGpY?6g$0#{Sbu|wSYha${SgBqT96Y%#Zf($Rbb85VEeB9(-o3rF zB#FYw^sY4Ilyy=x>}Y5X2tQt?FTnh@xMDWY^ELQ>O5SIiixcBjE%u+t9c-$3z($@| zLzl%53cASI5*xvVEsvy0b;^$_t99G7@2hERGy2pp-E+waXnfOhxt(4Qv>UgC=di0A2aA<(}wkQtS>?&?|6M-&#*nWQO z_@OW9^pmmSkbh|bn!6HNd9S67)$r6ksAMeHCQ6FCWhXO;4zcXsTp(Q_itTrcy7+|? zr^?)5%YI5q{VSbCQ(((WejqxYTyrJ1hfbe|l`OsjY*iS|7p$#*2dxaJb!05uhU(*PP;csj*+e)UcW=DrzvEkBZ) zdW^$E+{+c@onAn6&c*F=M+;Gn=TS|nr&ZQW{SZWc?XG_;RiO=yJGo0pm3zLo>KfCSK=kL`yfUFboeKM`y^UIOKIiPX?pd<#P=-2_u&~L)^!dUI!!EKiZNR$6r_JVa zqJe{ZqTCa!z=$MAW1AESJvJ5VWn@C@Y;-E;nQ=M~s&J zK>zHS@}F7=@A;_t@VJNe^-n!pO0hi_Sf%u6_pVi&ABgo>IZn+&K^a{@%~CYcAAcpw zY-&`xuVgHsZ*|DQXq28K#ij*Vkx|@{((IE;S*gUU`ZuVZHpDyaND0xUy&!THYmy;J z=WaEmR4GA24s9j1u#ohOIE+P_IG$w6RTn_qlwSUzucBY4D}7bVZvby1l;nld`@N zYk%xb;epIgFL|}}tu$g|4Qg~)I*sP1JYGM!p9_0G;8E|RsIgyfdfS`ELdHU;*# zt}$_sVqK0i)TJu??72ivG;j&Pnz%#e)R|NNmdyUgr^wCoX8@sHh40IagZd*cRZ=-F ztVHAb>z$V?JAPJq-)Ibjf_AfjQjeqAIT zpn~!U%Vtw>e6-s(pQFErikeGwzGS)XHJB*7T=%n%qAR@7tejGc^X%M~JSjK8$tCbw zgKDIhL794Pqa}oV1C9SgL3${R-0AgKKvW{)L);}->PiI{<;=)z4k4xN<4y3q3%r72 z4z__#>Jxs;eNP)daNTn+3pAQgHUmdgT6}*#zDg@F@GT8Fl&)v#y}5fI<1y;tlC_

j!ap0Juei#=LCtrR)nEk+t<>m;Vb1qUdJ_oEIrWC&MAU^J-o z-7V!fvxVv$S76>OmqcM-wox8xv8L=_*l#SWw~4@IqSW%uws0+_Wz^?&h?oT4Jc2Ny zb!mc%+%q{wOLdqXU!L2vv9M>?C1>7T;0@`i*d=5RzN&0g4zg%z?@S%a$L|6=nPJcX z-is~7r7m6RBrw*ee!ezx2pF{cZs6;R0Z<2XD8XT=TVmc-GJzArLR75xeYfZRmY_Fe zOX*og8%ruXpCI6<2tb~^>U@c|lX$Yys#jGvY@XS&XsU@o8Z9ue7I%T3d7YMB`^^szw7c(e zFdJM4Jo=;8%+Bz9!x;j#)kkzH*Yx*LrG{_@m(nC6)`-;IrlQ~Q3Q$jo%ULcS4d-xt z=O_J*zl>EKFNZpS^qRV`!n2L zt`oIYpPMH8U9%4Q|C91q8~xbI*u9xlR5#2{`MYBpQMWrQ^Z z1h{iYiuUtINn<)k_go~{B8hw0W$+f32A!sW#u_FeJzuL8lUiW@M%*IjTtknxW6W|i+Te$2L+~z}{IRrwt$Bc$THfR~ z9IMj&!sOlLE3S=nh{jVAsEhjuyCT|my8cv9tr+L`dlU5B2Vmp{lNW25$}_cWs6=gI zllKp7aTQuhgP`St^Ekg$U~sMi{`uBBA(>3#!M>5}aqP!|jrlgu$taxbLj1%Qew{;D z#Esnfh+*ReEW<1$Hd}0I47}amgxc|1JA(!`j`$VdO?hCSY*43DAu^{c!CHz%< z1w>?aOfPo}?q!uZ?_A#lwE8#U6Ex3J= z>n`n`9VCMUH5aWHM`C*6%@6EL*kucU_(6r_?q?Pj<3|#%S1izuff@|s;_9FZF{(-E zm7^Yqc#K+MF^?f}C4I1ehj)jXf=6*OzcNFn;5ESSI)tThUpJW!wgyJ1&OCq+i%2^) z)lGXx9GLRI?A4Nfx1z(R&5MYr@vLcH-qQgVX7vODLGUE4xWR{+8`GD)+uv(l8KiRE zEM0=0b>rG#kFa4fL~3bCQZjM8ygtfhE8)y68?6@oiSSu-c*no-xLR&<$ctqo9(a8O zH*I#QE>(walPUS59^s|2(#wZAp_X$7QprVBV6vMZId5mU#ywnEvsLQw8*l}168ixP z*j-VQm(QMi|321t6!$TBkeCo+Ustyww|M{RUe%<8AXAJHa*RW#N|Xl>aOqY0d#?HQ zqD;1pDhL$MZCnQ80=?co*Y@*D)~9VDUPH^3U27sh03~f6gSA2 zAdPs8Y3hy9ilUqJF7sCyY0}I~$sO9~J6x7Fs#yV*Q$Pz8N7tbI@k5k!7hF}g=S5*~K+1U0WG$DbKMqAGn5)1_C;@L@ zd0P4D>bnz)`3JC|$UXjFUd+Q+^W-+O0cme(B4)xMNy{gB%KOWBnaxP%0)Drp$A-j8 zI>H-Lce443S~q;n*zc4ZtT9oB9`e6yihTaaSdmL35ol~Kq8XwWO|4G=Bwvd%LHy2{78>rsgb{q+DJ2v7yRO8 z-3$JKQKPsqKbPd#9u#zCozNLVUf>!xfr*gh_NMPVOw3ld^p~!Pj=m$u==JTY@~V@b zq3{%)E{ zNia14E%G#b=$Wn*)Q^>sp?UMxgt_%NXl+_Mh6EmnWc7Q6cKjqYHQTlTY35lvtTF?1 zMk_o;;x%~lt>%I)Y;r<15mJEd9j*M5Bk>OWDqRS%5AI!YJg{Zu7rARo}(pJpPuukmi zG9y7S(F^r5MJ=h?30Zw3C6D_eZ36?+{o_?#p}v|XfY!SK)~L>sB@yaECY>&b{`L;6 z^yyn23a(t4d0Mk*8!xgAtbQGze*_!h_Jvs zCsN>j5}~Li?9J+Yz{UAaYGa)<6#x#Tt8a2=wj7hAA=|bo(M2Kr`BCYiGsF?EpE!sTJ(<% zJ9lSBooDB^7hy?YL5(D0x2QPxY~K)^g4lX@VO4Mc(eel{&Juom^;V<@&4X4v?l4*S z3$ykjWHBd;rK3xTPSaUhN}6@Q;k*JcDFqhN6e`^mTs$`Hgy)-95gkG2uXLKyvUE?R zhXk5++|xU~x3w|RTw77$mg8=$oewZ9{ss+KsQ*90zB;O^ZtGhR6+|Qj>FyGcP6_EQ z=@Jp?ZV&`1>2B%nIyBNDpmcYqbi=pK^XPN$eP8bTjlmyhfS$9@+H=kMi@D~;MB4kj z+P%aaQ*O}Nvym!9-+8RP3OUs#rNav+4@4!C;j(IpHByqij8O}6L&2g19T0U5l%2_L zqwRAw29Ew{9jvB1i)Dt^)4A~#W_*)-gR6bfk4K!7`&m71Z;DKC`L$&{PU75dp8&>( z@3j+5c~(n#-wpZiBHkQC4h|c3)TR_Xywj@1RUiVmMv&tn(eWBkvcpGLG;a-#vQ0#L z&YE^YeibMdWT8H{GcCWpmY!8(8|TRgk-Tl_CBw3CE3o~9fKS&+ImfO*vr?(rWX)%6 zvHfb}Mc)m_-F`50*`;=^T-FbnS zU#UCGf%z(Gd4*YQIr0;Sh1CjM@9GIIXB@2Gz1ajgFW<$XdrK~CEc@H_`m(0{@R%a+ z15-C7o(o(shbPZ>uR*mUyws*wp)?I6Y58K$rvKW@nO{)>+Bz0Z)73a0H4FJUNN+y* z(jWUp^NJFZFL$x>=~xZ*iN{!FQsmZxZ;&O#X6WNi*vSsPo`r=CUE#AN{Jg;r9B+hL zJgf8-vn7Ppd>J(|GQ^J3LwYjlHCzeG&bBAgF#CffAbjY9e+|!UmIeKY?p{Cf@4Brp za77IQUx}yLW1d8UrqCCZnjCpwrOf#U6|k*zz)fO4q?U|51Vgk|7V?tl5P8uYq$aOS z3tcZyk_LM3M@^|+G*s*RNzHMN@nB+yX+_&6P@tyDD3{^&*IJ&ZLC;96q9QeZAfE+> zelcg&XC^3z{+S1ddGdN=^c^ z%BT}MD|;qor;K{OjJJ+0d+7}3IX9Yt2EL^=IBAg{^w=ad6=+J zln^amWga^|;!d|y@y0Q27Mj?L!SI3PC{ldxES%6Ue;@l?>U z(nM2=@T!k&RozC1d?dc7FzP+nxt+y1LMieiYj1o0%zH_-_@s8W#PvL*`Rl9jos+4V z6H@Q3!{~HU=-2p|0&w>=Tsrsf@AN;)kmlbG+$*`Hnuk^{V~%egi#`i&FnZcmf@DAk z1D%xjR(_1yT&P?fL6vrqtEwN2HvyzM5)9vpD>X^ZjL$o9Y1l0vJYKz25V0xR%p>j+ z&9{qAI8=SnKVz-$du8XE@RCkV6imQz(=^ZfbUC3noJF{t&)->_Xb0SEFbRb|JF*GGZGNdCO6-FMZgvZJcZ+iA|?S6&7EoF+iS%cg>fSD^E!zQHz> z{9``loP5RSX^m8$$nVPEA<+-xW-9p*XQM&8Lp-Cg<9Xbavslzs6q(KmPub40Osmv@ zM`1sU_A?681cpyzu7(?j^L;x(HIY5anlB3q4R*LHMPFB4j1S6w>cw%i8*}b=6My;E z%!Gf2yCpN>vp(y(smaVcUQ$3um)|x(e|yCcJa`EPJF?=w4YWnv7-YUuF-fs`1fJ>4TjKYlp?bO96lZveQ&bu@r5!1 zq!VLH7Ri+!zQW^!MmKBcU%E9FPUY|IczuOPYBO+&#M?2la zDpbc&`Pv*MI_3NMVO2{chB!EP7amL)6PiQi54=#pvhk*m&&I!!s5hwI&k`>!C84HH zd(~EQbxyA=ejh*KupVC1=Q0itmo^5O00U`svtjsJZ+d`4gzTm7R?I$Kxui^V20A4f z6W2u#Uss%BOW?DJ&~4Smt{j68L4Hn@j~9oi=jO_`byA_SL{=UYgO>cfs9gMZgcF=#>A$Q`J>vt34@nbNbu+mcm| zOLt|B`BLXJDh-eFtFlr`&QC4S6P#}N=P;wf%@D~gA@7YX4lg6t>=La6N{D;)BAL>TiTpA;39?)R(e|TTB6X~m{6`_(i z(gRVK9;*TV$A`H`M!fXg`Zv;cxgLBc31gRe_n&C-t2a(DQ;(E3T3pr)Qz?yX$Ap1l zL*j+DQy%Bp+G8X2QbKAa7P+9231y&ht`f&OkHhw^W*`9~bb$E#gxM1^@0=fN+3+|6 zboOZ+UNr<7cGb#TZ(ku`eFmbU1Q=}ATln;wgoO%dFpy|kN$JXK#npt(0>_%f6Rf3I zMz~}ph2KMTmT}bG%=Yh9Khv}O3QhoK7{@(SZ9pqp%CM6WV!z^dF@m8`KK{IK;WDAc z_AwLp6Q;02ZJnah4H^mjb1_6OD&l~T-r=xlBz{EX^k2QH$cbl=a81{4okG4lI@rIX zw10Paootjm6UTYZ?eK25VU2fBjm(H3IVL}qUFxxQ@2a~B4rV@mQi=unO|tt1$?oU5 z7;F!^NHf(FG6T<>qoQ%4*@9t3zH7H=`A&TslId}8jyQYApJZDKH|;BRY&z7B zv~Bj6&I=DVzQs{oBKVFEY=(pdIQ4D@dNsRj-wb?zW_EC$!o?81=q_@!hs(7<;JRa! zZpQC?>U}hG;mJ3bzrxl1Xk?aIePBWq>+SNaxHbLJyYSP6(dn=6={J;9^KH*ct8W@M zALv9{#7kG!$bJ`tB@?*&XTtNI2xLMpazYf12y-bd(9B(o#?Qq) z0jVtTepWUxPbJbczTWlK|5h7Bd z-_#JhPKmqUdF#^|v*O++L{0cc{b0E(v-bY0^N%~E$68w|S%qKnm^fG*qRc4&>$mnr z{|Z00;HL(I5PI+b`gv;#OSpah#$J1-*kZY4^M0n$#8d3LpQiZ#^*?2>zyw$hIo*51 z`(OXb-yfMl>DlTKEPiX&8u3z3Ek90y$ztNj6myH>zg@KF6WNa>(@*$?O^N^6n{Ic3yAbZ?dG zxYb=T;`v<~eCl^zchmph7m6h-*_wnozZhrs_X~{d8mICP2#H=ld+smm#~!zq9UPbm z(Lq({(}mMVUtDfk>djMT=w~=^EmFWP@aagfIrygL8XrFsRN326~oAz&#Ht}v*y}rk;1EcD({{x;E?JV8rwAB zvWP(9ZpYsu|MLm|dO^eu0+*dTgzr-3?mhoI7ldXaBsw{2za!A1+AZ2$+*l;GtUI;J>qy!`H^)3V~O14l>FcvFEZ8P zQ$qb}h1&bt`#y-8RFKjzs9Qsv)rZX~Nd9f{{z+eFAt0sk;QVVr?uriRUy1Ict>5;- z*Wg$SRnqaW3AX6B92b6H!+&4VZkB9J@=4pZb=_r+P$!x(q(6oQnNdv4Hb>&V>T+34 zN`&6XLow7jFKPubF{#kv5XKlSRphRbAGl~KD2&jMH5EWGmM=@K*KRBq(H}fQTQCQ^ zjBoljZZkbUA`q=$_2fxt&v}nzpP+Jv_`dVv_S@-a&g~n7h7%i=;%CdH(!n9#3)179DS|om4cT)6>}-MfXOC&<(^!Q&2xS$GQT>>NxP6wyQo%Q2}r0n*Av) z1qXrUwp0NN&wY2aak4wYv|@pefM();J44juK@Nr=u}>D{}?8m;zO7Yw+Vnk@?(~ zlRz8K{wa3Z&6P^JSq^A?<%!9UL|Z2zYWCq!$E8Z2tI|7In~8WA5}5aZrrT?ENI`1M zRa&+N*Ig&&o!XX~0#nJCiL$sm17=WNP)jaBIo4ZvZrPgH;n z`2iOr0fjPz^wYBQI=z~QUV>>&h2*V$HM5zaQqb)iTzlayxhFpvgo|m_2cpq#L{{YA zhSt}ZL1vjCn#pU1A_)`WRQ|AIYDvwQBy&+_tLqO2u)K=8pa4T(bJ$qQTVt!P${E#? zpjeYRcbCfZQ&C-@2Jyoa)Qs0_*Zuo9$kC@vgqP{5BhxmO!JLixE_l^Q;uXtz48M!rDlEgyFQ#; zh0K5_I4{L|BB>4J=j8jlBPbu3)p^`Ddi(f@D=9_h;hK3ZnJ4o&kr5FIBs;BQJMGr2 z(b2^C|5;~lbg<5K02^V~x_A-H6z!UjIRt+sqZ;6Eq|;(Q8f&U5IHt>eem1w~o?ltq zT}bILatSR`=vz-@*)XoFkzxQ)%pIcz#|nDY`(AQtV#|9Yiq5f_xa6`4y?>Ier>M8? zjrg;SO=gy#)Fzl-nQ`T~r_j+1@+;8QcHZU~N7EAdXj*sJs_G=*1Pv3tXb{_`Wu#1q z+XW;SdP%m(!J3rJPiFQvUb=JQ|7*t&27!{h@j3Zf#GS=KU=#ieH~`ald*k9C5)u;w z8m=}+3u%?}Rg_y)IHo4iEc>sI$8=V}AHqI8LY7$TPm1!hAIg$uwwxDmI-PNl#^ly* zzQ<`f{}Js}a6Pa|Mn(n=AD><%07G6-r~WAnLyvIXz5o~GaCIO>2Z!C94xh_b?v~{r z2yjEHn-70Fr8w97B^^=u6~KAcXUBcrB@|>7U4e6KLrr|SyJE&_d0{s-DrcMp7Xlk< zRYGOVjXbpi(aGxR+5<~blJ@qOv&+9SW8m54(G2&F=M0bIkO1+D=J9hS+eg^%IcAT^ zP66B})q;h?Go};-wzPjZ(xGvZJ(M=8R3({InEIs>2fTiczTkc}6?3q3PTzeX7^*=m zYn-cEk!9;zlzHZ~yxi$~C&EI4m-9||7^?Q*Zd^YeA)v3HR$($GQ`2{{{!@tWZ2o?C z5_dIkzMSE!&^qfCiHS;+nC0FWSrSrGaxyaE*THzycALXL$Ji}qdE&V2@;(TGES+f# zhGdoKzKSB~uSS8ZtF6sDl1=83e_F$7y)1XDQ)e$d@~N@;)#+%V1`T14AZG-nY?VvX zn?GdZ?Q7Vo0LyH=c>4SSb2+X4HGHZjTP@>?%aNGMZ~*gJ+9@XoH#;R;0t&Xp3ILNUbk>ckK-;W0)gV2z53xX z(>Q}u*@zTIbJdLxn0X|J@iI*D*{9K`E5$C^!!8}dkWvJzviY{hYPQxPSodv@Ure!4+*!Fxl0XPol^e+1m11Gg#B5+uzOj867US$PpQ@7fH^9fwF_|bc zEVBivE6Yc$%CI+@`Q7*Daoi3?1#e0P6O)pnJ~7;6u@Utou^0uL&ep3D@VggW)LAYx zJ#~Nb`$M{l;7p$_2$paHl=F;@u^R8-G9NdB*jvbSgSZN*B5~hNEYy@8e0M&!9)XQ|`|8m6^?5A%!X~p%=j$#;Rre^~F^>q+ z>X3~G!;^x<(VySnomS^L02Ja1rkDILJhToC9tkOGwJ(8|c)QWgw6iKtiFD$gU5+ z+yFFtk$$L8DwsWW2#J{)b5|^m>ANl{Fy$V5J5@n4-F^I8GBCV5$*#}-djMRFSFuF) z@okcjtGYu*1^VGs#1?;q{t1d1_CfQkJUwjqFSAg2{UuYP(I=a?LRw-(xa8Zoe{`h*poFtw(9x z)(?!PLSgbICtUrdjvAEDT%0cSs5=;n0-ml~TuC({M=^$LSY{T~v`2wVN|1m0NO^&tBl8n05g z8KX!z5h=DX+kT*kkiL+4&S;|FIpW4_Iw0&8ZHJF3Mt!?TjTsgiOC@h5-`zGbI^28L zN`-0xvY((OntXj{M2exqE-WmdVPa-&9_;U*nUvYP?Kd8vG;U~?X>X%eljh3u?d(=pMF%}=4 zL#enKglR-+Yc6HZHC~BUuKR7B_5_DqKTqwiz4NfTYSYaMr7PB= z##2%PiI3S$5YDeEBv@YQkaQM-0!ZMxKBZ zmYU&LVq?xbTh6{J^$7d#U!}NOb8jc3EpDa>>Kd!fUgTpi_1)N%((%;Lh{uyOJ|NFd zs0R94R*%NyhtKu8Kb~q#@Pl&qI=^nSytS|T^*5|c%YN8{fu&ySqg@2@U>~CA`buzv z%ZELr*V^mUNC6FW78Zo`Ko;F>SUQ#6>g@ptA6-}*XepPHewmCG_Mtj zt{}2=rjDuFA~95rLss4u^D{TMc*DYX5`d$diS)VNk*z{^pa^B>b0Yqod(e{1^AR!u zl@*O}rA5PWG+MS`Nv+f2yNO1(I;A`%@nn}>PI3YtNd`Rg^z`(o91NKnb93{aovIHb zc{FA-wRYqj9B(k`)iZ>PaM{fx6*46_%MdYRg4hIKY>S<0C|VbqvCv zjaQBi@BcI^YwlEx#F=$8JH4TbMlYMKV>g9@UuQa*#7p~GZI27%F-!dF_twKfhp9*6 z)A~s0s|QCh?ey7_RzbZ5r7h4r+B%|jR8-P22qY5CNuHrLb0M~kdDXJj>DLh}uN<#T zi_i{QYox^o>*Z4uu8IZObF0hJC#nSSE{%%KD27t!im!NH_{XXxJU@4vu79xf#j#S8 z^8D+h#swM5ggsE}C+vw}dM3;QTeuj9NIY5`We> z2!0w6y@;p%;n(aTu^6T18wW|&xvWnh>+bgNi+F+`%N(fi?FnFOY-yD*O8N$A( zrrx?$I^=A@m+#%35vq~Ew0ROZa2ekP|$G+n`F&Rau^0*CdhoASKjeqmpcEV9G62{Ahv1+fak zyZbiF(Le+aj~sia#aM&tJhjQyXmTADiAHqu$- z!}AJ$d4F5N*E|Yd+DDxu6Vlczzf`ZAnRLxuKBWmRTPcN#q-nX>=sMk%ZYH25e?l~L z=S~Nw3k8k{9*Vp*8=RE^G@=A4M3F?(rENG@(eUQ#9IBSMU>LTUb_YYkpLPr_5vP2O##{07KEC5MnWEPfu>! zOY4-slcrWH%Rr<47bzDbB1PvopD##swv-{!Cncj}6TEIfx7X?C;b9C(_lkJwW*;G& zX}|CRyN25o_b%Y@(GTd4t^Z4n_?z~dl39X#Y&>3)3N*0Zo9jz*huvwm=t%1-AW8Nn zSTs_Di^EV=RrMX#lTMAGkPd%kjy5E|Zf}3z ziN}YH^9lcR3&(w$6Pp@=2Ow2<8gszNVDJGc4C_$2MAD7U`z~Rjc^?S-c02~7cVD6i zLWSvsLXl1rgYQ#(G(ti`-xIX{a>Xnu)Zz0HRUOU>1{)s>K!`iOV3VR#NQHvUlOVYp zdP?k`%x;ma39)?D(%Sk!aK-cfBMDGjG9C99pXNsV>vBT{KEPi&on~y;+|)VNI{Re@ z`Z_{t1FI9Nd9dUJlLxN9Ksd8+gCFH?5FY;X9siYP1?tvGHkkvp-nwRsc#UHk)Z|#rvNvnwY-zgxJ{BD?L#WDuwEqHAH9YKhv4r z!2=6yR=+IwL`nLG&$;aJIvtOwuFN*LfK_1J-Q5KcsrBY1WqWx<(?3DoFw0QxvycfWs<;Id+x{HrK~2S6{Q$cWyjkSicbmo3h;D% zpJ0=iO;uGEepOn=;j+y!1vT1VkJ#DHw%P1#DTE&r{-cnPP}IHi2w6UHg&puVZ+oNZ z%ZvsHtgNlW6FF`2j=p{SrckKPN%h5X(M{&^X!-4n5uvDH(L6( z)_1r)aCctBzyC5W8l>Jh`}J|lyS|y{5sBsTrZP6KbA`9JH=Aco1HI=Zl_49Am35Y383O@Kni zcs)RzobHT`eSUs*I1put^Rx!MZMb~;Yk#1z_Zc#i-j%!lJw60Z$f03@?{boOS6C1| zgEOCo5U02v_(#&J${gI zz|EDD=NnEM8i~ZjL^#+t$jHe1hldf~-f+~)d68%+s>RwfZ`Yb`E=-{5C6ZA`8_1_A zTw)JvI)iX2X;q6bzDwM8!8yN*?NUEVFW9C`{>=7I!T|$&obVW>Z=@|f`qRC zcpd^gyj*_`%CAZ8o3kZksFqAlLL#VM@tzoD{4$4UbRoZ29PhR6>QA4t<>Axr9vvtt z3$0FU-8B66@4@x&uWy0l9V2@G-H0^bpI(5wZ*wd$3P>10VTkTaVDC*8^39M-6ig8+-duuOfmIi2To4KVcqT=z5BN<6~vyEP^(QAE(&F2ac78R8Q326mL{V_PK zA2?+g!9|=HDL()RXmJ1{O7F)aCl+wRUnpOqgwQYa}i3d=5`E+QR(7)fge_o#ug@%R_34MMN1}LQ% zkaHjqh}frxNRcf0dZrU)B8^~YK(V(gNc@8t;^;0fiCZH*`2aMK{4iET&+_g&3UG;h zt9C&88%z~?v{F_FtRUafOnO&PVtRI-FK$+vjO8``fs$Q*aCvlQ48SbNGcJzYAp&mj zcfY>33IDOM|LhH-UT7OfFESJNo!wiy^e~Xg#*;~tp5v46aV&$4ZZ7BBqFpj{y2T`CQBVVo)Q-Ld`_5sDlG_1;n5 zi5v@ve-vu4BOoAzEw%^tf+Cyw{%6{|^4p88Vu_4+eh+s~A}o^G0u#mmVAr@b~7(r^9u^e)@genbBZ`PEb@#>!d+{f|l_2YKLaRAMMTQR9VawC0f+k zZs;q|JwzrbNVG(4a5;@eBbRs+#h*^zXuT2u*qRPN!!(|rp2nl~U^_}4uJ#!MJY=kr zttT>5=MaNVDbocE!S=pB`kBdZ0l$}~IaI*b^^20usKo8T)#^}&x~Bx3$Q@(?8m^&F z!G%jdS}*w1>ogc~4-Ny$%@cWDQ~f=_g(QUjEAIbZQE#{YkWj zJ(0wW|E$R)v{*K?cn~zA(R6AgNH{M`YO|8K9eMyJ&V_>jZ!ukMczWqnlh+$JQQLOFZ9;u?=AL} z90i?_b|kuTc#Nrd10z>0CqQWH?ZQ9%?|;1LfHrKA{<)`R*f6G)uGbfKZpEj2a~3~O zdM2JIeqP@NA^O1Fxpo~yv%vGVf2gvezImrbcvjfzrLQGpNj+g+Ys!> zD$J&<;I?F|X$T4lWg2t_!!nrzM!2JHAtx6~bJ_)litWwvIssD7g>?fU zN#rM{X?HNxp4@WLY_73kd!|lx)^Q2xd^N!$15h!0Gektv-;hZI;dy|i5o(Np<#wb~6CwiJ_3e$@{{Fsviw@)$G~wi3fPkYd-OZ9t$N+Hu#1yUfPajqSd;x!x zG6j%aQXGV`$rZj=mq;5jq2HY>LV+y|C_I<@cj@0@9~aOJeE3Z8l`q&rfMNnDPDJiS zXcA6fVS&f_3f71rknYJJTRS)m$iR=Q{#i^#SW7OSakQo;OJQlV`qNn#>#b3G@=fn_ z8sSC|*U_*K9%od&uUqy27*9!um?w$XrD`}QZ2DC(z-SV}^@hEeb;EJmKElGnIfwIl zt)Fw5v>}$MP~@MdluHstqa6P6f|^I50cU; za%jxK?QQc%r|;{r{b%Q~ zp*XHYsSc;bkctLV>E-c)p&|pW<_MEq6yqj}DdS2}SLW>D;1Ua%9LqWw1To z94RvyW9Uv`FNStqybi)4e@3fP0a1as4a4#P4vM)I9|hU(o#cWF!EjaPD%{Iv?C+)b zeaZM!K%0I{Ug83)T$PpXAI;;hYFS3+31C$ZfLGAntdK2(PyMDuH+tJhWk(70wj8|Q ze&g?P#UYMmgG9){S?4g2H}kFu6y0qxqPI7$2w-8z$ZNs#Av_}}14T0>a*O;)rjm(f|ht}9>)?4+UTPTA?<7RjjKKbV%R{5#o`^tvtXE@p z4JXAD6r|fgw;g>#OYMJv5TYT}7xj-+inV!jwHs=kq4r5!5&uN)xBU=;?wykza$+7H zwd%wEL~72>;ZhvYA7JeI@stIPPQR))YKr|%0Rw5ek>d;7oVJMzvQM0X`{C*XWQ$$O(X_zmed+at+jAl&+XlYtZWHHLS zc!g@7_lwEK8SM=qDbO|&R=uuJVlM>NhkvCNfgZT|$>ZzOxqM-_i=!~Br4CfJhvfH4 z166D<$81i09I1c(z3+5 z)rZBmLBGup6_$M*1Zg+GD)bDPyUl=gp>Kbk-`X;P&uX@vCCF#@X~lt zm`Fmq+B~U1y;33olRo}uZTHFFH0Z4!>_RDO*~X&G`OYX|*^_ge3*|* zf6;&R)d_4OKyz;s2QIgs@%>XrqK-n9!g#c0P^g)YpOwMYp?jQ0<@<8VpcuhM3fm3r0O zwxQ!DV@fB@#zSrS@}wGp`i-Vi^WE{R2aY`$5pV$nd~`DA=9uUb0|Rhy*Gy~u{t@~4 z-WD@7dbsSHQ%NRZ=i=HAF>e0X zZT6Is(cR%sSdN&xy~MxB+>+>C!;kHDX4G;z|q0Fla4B7K(q=r4 zzXWO~L7pzprSDn2g5ub-GskZh=hmi3hK2&R0SNwBI$cg4IU>67Mlau>-g&-SsfQWr{G$bpL*jy5H&zt#q2!QW7u=NcMD{)52^0#ZC!z@cCi3SD+rs|ow5JPYw z(vMi5zw$iM5ZXs{bHLXXEAOp_@ZW@vCmCp~MHCp8_+Dm56>DD6Y;bEZ1|46*a&oB7K~|&hi%uQ<26GB}_?#q3IY0QErDL~tT^qfn{ zb;mL!ibS(B7+Z3n7OpHDp8agAn0z@G6-NZ3b{cCqKu7ekyGWMrTsSiWehgMuD%>27 zgoI=`fI}*rl+ARU2p}nm>XVYiZIOHWGVqj*m%H5LuuRXObkEo{ND(}J6?l}#6QyD# zppaSbFN&~Ys{(O{{N1+`dai}yLOa71*KzO^j9R;%RIs7ADe5kO>?r}>b`b7CC!6$( za_f#LoWfyLF@5-uQ>pEC)?V?rQUq>?%XPFG8eZF z9D<^m$x4%K%rAD^<3AT&v(TPCe<3h${l(t>WWorL@NlIN!#vz}SZIwUN`ludeWaOT zz0yMtEm#D+&N-JbPdd^3Ku^bJ?+v$V++Y11&6h#Jnf-(R&ZEm{Jb@62fH|^707Rmy zoc!eEWM?LZ-6pGSxxf;&q;+%fR3EqP*dDB3{oIRF`7w5eWE2vD?{hdVF9J9?bL%J6 zyc3>dQHF;S$?WcqT2&Zrdc%psBcfBXvqQ0EZ8%f%!ouSAl7SYVXy$7d;8$`Q$>e{K zbJC4n-8NNjB=c8n{n??;k_OKX%FZuK@N7pbt5S&3Hyt-}V#(;7BkkiY$@!xV%r(~N zP#H*WOXv3u@q7K%<9WD0Yix|pfmS|P&S|W|PJv83=#?`xGQ!%oX(&>K4zb^fkN-6) z^4bP|g~{-8{M~XQN~7RaDWDe^?TPF6HGdMt`)no9qheuYx8ZX{e}6!+*m$u?yCU3i z76Wa3j4$zqcLoik?@9%>nSd|#5|zcqwD0L7^xrj@81_vGTo^Sbve`sg`(`pepX-~~ zU!JEXT3EGSgB^;7hL$E=ZCTN2hv$9>+7#$gQ&W3Ho;BU(ax&)0rFwB8CnUG@5&L;A z%&J3WE#MrCTnl~%@U;u*)#NW;$X(khLCubflbw9HAE1?}T7#8Cw%v<~<56q-C;=1D zFTT~FVQRb<70o|1{|mXi6@@)BYjrpUEtE$yzK7hqV~uBvffK0W@kgA$C6{88zdNIS zUgjo5{fDS3phEczP}82D>7pX1Sao!9h;&lkC|KMI3&puf`QM{jPb1Jl2kP~lyT;YU zQ7OD)TQELal~uYhA%Br@0>k-%zia{<^tpphJZ`n~Zr-=eo{MAE7U9fGpS+gsr>ZW- z5?x)}$cY4aXp~Sw0-m)4q+oS$BsKcf&xcSpAOx>}YX7&~3t>fo20zH&^xQ~Qz+R$IBXVYlb`2?ku^AN+CCoNe}au1cE3en+9W zCW?-lAd1Ex$i0hb&)-KHLjmPUE3)|H*8}B(oJ|Ktt;wr5ZJmHrc}XoFx0F zySe*6K1&v9E~=GhEj!iX*6OAD%gdu3>{O#2cM>|kVsorj%^{lYO9%{kSMZ&^>i~np zJ-I+TsiM&75EKs{x>;Qz5KNlL`tUYkJD%6uv-C{avXWOz zois?n)C203Z9qO#Try==__n{l+>Pe88A&r)ti!(#9-OSkdX`B3`gPtI(^KQ^>5S#3jo@E9ZXWM_(Y z!3>yI!@; z&1OmCZVcx}{51GzkkI{jiQ_Zqc=F8&!@CUSLo}^we_r7-5g#zRQ!cK3 z#-BCSSxUMAPbs1TVay6;jX|SI3UoW>w?P2ftH_<{@-E!7{Nw>LIz{hzlSdO8mJnTl z32GwYI2uZ+7jjWMAh70=sF*9~zgm%_IGljNx5+a?V6{EYWJixAlPNXLQ;|sk;Y|zT z(0N^hqW70IO~j4b3@WGb5ZqijWVfxY!d^AzArcN1rM0GmwM=uP=xbrqjNyyjrpuRz z8mrWb$oD7zF{+OGBn;T;lTB5Nxv)NPqO#A(ttO}gC&qdD@QFcig! z{J~?Ps@wMZ*cBO=h+lcFegDa=i9_-EBj(~rq%edDi=ivYd`4>Y{?GTjA$;L@pn*sH zzSaDv)4A%}P9KMD6s%8LhRFqLq#@Z-Olp{I2{n{bDD3Ey=!Nj?^PiB()^i(sN(B5c z7w&BW>Qz=t@B}vtYX`@~&mmtW++?}S179piM9_=pIjh6~7x|0XzIf_|3_V>sf~SN< zSQxmpq@pvm;cG*(($%e-sORiLT`fLnJPzh%(;_E(G2AnCw8TYB$v$irG9-C2@Cad| z=oD1@Ik}d!DpEn8H2l%JBQnC%Pgo*Nj^1}q)Yz196&YhHtm)J$`F{%X`Fn4q1rtQM ztn%H;;%Zs$sFS~xrT!HMuT`T!-u;6_6Q1XV)dS*CwigHws!<*yWJKRfv@q(6&kX2h z=o|r^#Zc&zH(;rH0RT%-K$b#p^!bkDy+G6w))WSrWFBgFclVRRwvItZsR?+7`|n!Y z3EC1t*p}zmp~Ty+|M>P+9o~|dV61Ta3M^KnNrGN;`!jR>XvjRys?W#P$da)%nYW9= zXWGyC!L#jnT}DkAEyIG3sC{I*460tBBg}i7LUvd>M^)m#i~c2Eg~q|V9`>Z=|M@ZV-s z<$yjtcfhp5q2Mr0ByhL^VW%=hi`GC7O87%z5Oh&tF^yvp>6Ds}`7uN0U%e{g)Q@g} z9wPOZJ#$F&5LnS3Y#PjuHhfF-KVmomL3h`7sUv99^U`ZF2ej7#6Si9cHk~rd=(Y@;vWXaTanvvj|8U_SwgT4=ytGC1byk$(gKEqGjBC4#D9(H z_ePJDE;+StZZ9Aldw6p#PRNA}r=-0Hz+7AyB(*47f?Y?XLJiryy0 z18~6;31AjDu6EKPy{0#MdOA5V#~UUexycNUD`V=7xL%FLyWAZ#r_Bb1q2*nLjm)@I6b%I-yw47 z0}%V#OdKmTfU)HJpaqAbzJdzY_~X&ot@qAHl=*my8{ZhxhY?O+UhV43dzH@3#ArK< zYr7pQS4t?>WiombIvp%CO(L0QPXVit(`wQC#!ZoAn*ZiZ$c)MOIf@PHseZFwPSrEgmtqaK&!BV{8 z{$jhtSCfHeCl%oOMZse|^(?MG2D1&q83%A9du@^);%2Ow$uv7rzlZGDk?7FhMD}! z%K~TU?s3=YV%7Wk!TW#tqJ%ti+XTicmWgDJ&p-uduf0`3wGFYPo^BZZZg^F;2VIMC zBaUf2DZOX*VNZ?kA(H=lYXl4u%=br&FbyHQ9tH#SZha+ql82dwxG&fPAFZlXE*vQd zJZiisFU%TEoQd&Q4dSgOSzduRQLTEkKV?of0 zUP+8M1hlLX1V2y3T3Tc^AekmJ?Ci{EYn!Q~lz)W$xUIC7jr!;EVmpPe_!il36(U4h zC>{WNOU0np8hNR$#iSzJ z#^3mR9EmZx9$cTI)mWtPSu_?mVGev42|wO>7l`S+pLbb4AW?+Ok8wJf+1}kp&*|$8 zw<1@71d05B?`ji*Pm6L*f)5u12tcL-vq)o$Ms*&OgYfk3v>IcT&l&dO!WAN>pUFbY z2UDV}QH1D)?+FeaPe#7PmK%*QpqXAL#3)> zmJw~>8~{!Ywu}x4w-sr5)z`v5XBMv4sK&%Hh{q!B3@-SSAAnTf0xaPt40QG1Q3>D70RnM$5yQjKaQi?nM3gi3raXWxPm% z*4kFHa6{ujmj9Y;ul_K&5fu=CiVuOqM`qVem0J{4c4n_qfbfxs8oye?R-jQWV|E)* z()M#u4~a>^QiUUHFkJO@K4KaQymT?+(w|-cqA&gyAel2-G+kF&$G(Cp>Hc7p0C6V{?r|z`csTT@e!I!>p^$Y;>JcC zeq9Rnjuy9-!ABQ}ytfN4h;YE(ZiLZU~QHh9Hf8cGYAVLlMYB$VPNQ)4{z zWS(}gLhcHK`T1g6*s|!YqxlE3hG#TNp+#DCkNbJ?QBh`r!chI8?ihFtIk&$B>07jq zk{HvgR*d)3G-r6f0OTtP1g7C#t`ke%f#7e{UB(>Y4l zWVToaQPt`&fZl{@_ z`@L#6S8jb1=~!b@hmAk{dg7BdJ;ji4fPWRIArZHHKKe`98>I?Xd7CB8X$u>Z|}Xn~O$Ot@C?V zIJ5l(OULCNDzbO~%-nKT0gh7%?ylXg)2w;(+y;Rv*@Vb66!DmoY|w=ynW!7rn5#pc zq}GHQ39?A9aYtp$6P8*e5GS?+1BY)INySE!IY2{PZ;%Tw^4VmL%9On3@5Dr3NHBMS zpvY_zNZo!WN-v!MOd4B^izX05kY0ert2ufimaB#-^9l(1OcoRgLF@3v zp6c@g<_#I+6V}%euiQ{%SmnK-1lI43Q7?b?06R#cJ~+fRM>m~o*o`Z^|EX9R5Z-0e z)4eTka_lJRO6D>Gp&CP%HZs9chMZW3yQhy!mV$8O&2!)v`b{U3ai3W{=}0kW6`N?IaX1l7$+iU zE9I6Zx{(}>UO=dL(w&V4&>crTjO~wLKz*^>)t(xF&M+Df9f)#%1}cagxYDm4g{c3s z&Y&n+5rXve^pO^9Z-vj(i)XYmgsQhk*Tchymt5W5Nte)~T5CtOKQ8GTrWAFuXXSG~ zE4-mS-lKW0cLRAaI!$M`iT)CLCA2uR03@glo8#IYX?X`b%3kjd0#wY@O@!5OI+jOO zXNy9ZJE-On|9AD+V*s{|J!%KFFW=k;-{fg6wh6=Z6qbz~=3g%UH2OgSi01wI?krpg zs~CjDS@al-$SXLN=SA7#M|L5dVEGuV}DQ% zxBi%IjP$Z-&R?g2joRMQsAYcVA$9B`jefi!w98>3G9Y35sGuWe4p5EQjTk6` z{>V4M`rc$rq!%+D_yCBVWlKjS6cQNIPspgyW5~=|-u+U&iz+ z;>WqsAVx|wc$^u4D#}6B&Opv-+TmFu1asB7_;YeH@XBO_*smi`niJaLjf1r0G^!hV z-v%2&&qV@H&m=JCr)e+s_c8R}UY(W9+}_=7wOlM$Us_#14%u{1*EJ*R+=1#+=$iS- zFu;~2a3zch5l7j>{5{UL)!f?(tK&1Q*zm~8NG&WZbfjQ02?-PBl`d{O9J5)yFuphm z0%Fc}asOMyJlhkJXN^5E$&*2&EJ)I)>~CIfJq8Py=j_!(j%KJ{*cQa1bACdzaD4GW zK-~&3u=_DU6{+av#KFJbcA^;;mmSxQBQ~tnR--%SVmn{X_Z3f8k zBooFPVl+?}^56b1i)Rq}q-;w)6KWPuIHlELYekrB_2opR+<%r`NI}xByST1kX84YR zq&zqU>3(z*#-MAluB{?pviE|<=JhcEb9@m}u&WCd(cwG<+=0SIYjCCLP+tYdHn34{ zO=}0SBLCe1tg14u`*_}PT1#xd-wo6Caog6w4;^Ls%uvr`_1Axj+#_CWiDC;~02(4g z4wBmncnE#2s>H9>Fe9&aXd|2aO|bsbF}eGk8?<~Uwxo1jm^!J~fqg$^WqCCj&-kb| zcE4@ob_O5(o5-nH1&w|IOF_D<_{vyf%Iz zR?FPm4eqWZYVuTB9L_*x0nf;Wb`MPHn3tPqk4muXCErGoF>^?Ho!es>G(+bOR(B2Z z!#2MgXg8+|kypxP@`sMr`J%^g!9e zPw`5YRgYjS9I&+8YXC?E#F#JlA}o!6ZpvDD2G_8=U|h&kXP{SMl@ zRDE=T)jk`Nr2Y}u(R5JrK7ScCFaabS2LX#N*RxZ;0csYiVy}z)({wRucVz5tc>Ayvb_D{QrNXf%`J5#}W9GtcC zoIf~bG!Jh8fy?5>MobGJ3@f}^`_)Kkv_HE$UnT}ESR0}&UzHdPOrhfzk$aJ*KbRB4AdGmoUNoLpp*F{IY7-(o182QdGzXF(R zU+bEClV=}Ud@K@{mw<^gT!+_I4r^$41f>qOOJGIY07llAlCq}Aquyvz+}AwtLj6=w zZoClP%WJ@po2sQl*0-DJP(kCa0dDcjWfNem{F-w=Kb*5#aNXL& z%h!B<$4P^Jkiz$~7Y=wnzPB^jK9!{nX%HUrIiTs!_j9~#E53+n$xffqbh15N5OY$r zi>tls;!Y%-Sm#0gW}fPUhl~5B)95cj(}%TeMw-;b8H`x>Lq&9L8cw*iz6Krg+@Yb+ znHWD)(xiERM%X~}_fQ%!0Ly+{Y0}{s$afx@wEDo3&L1GLhLn6JmJOF zpSxp4{rzW0WnnQ_P^3Oo)C_0w;qEwKeP_p7g%5u;pCt4iFkNj?Z+_M<6Wiw^``hb^ zS3d$yq`L(z$-<-9pq?dKjAf`4s{1^R2H8EgmFhBiX}RfAIXnQ2gW2|c(RIU@l7YN> zH(9|OE2(FKkPP}~?P|n`!!qLrt#?VeVDh#;!PnL4@kE_fFQ+u~jiu;Z~STZ-csRutmnRk3%Yeu9t;n+L5UqjgwnY z+3x-vp5bMRnF_3Yme%k0#yul^=`wrQDL8Q#0R$_(9xW=_nHp}wRgW*9EQoPu<_HZ#n907$a%SlU9l-2mBCbyilrsK% z%VQ&X#2fhy+8Kc`!ktiuj5-w{N+~n+_sBl|FO26uDFHy`MdIvk%K}qO()@bre@5JB z;>$<%KAyfy=n)Z^78|{8`$UT}%`IkFd8kV1NGRFy0()l+po2IH z+2kFo(K*P6;Or?yNrdlRpf`T(y03Fs;dJPcdD5r(OlO1!p@t(d_WaHGZ$@Lj!wFcO;nUMPyE{2k zJ1m9Z>s!|#A0gCr|A<>{5+Pu3lEfz0a*+OuLSMkVpjq^-BMdbeSm5~w$cDcC3R}f2 z+2W%NX`cp$AZzx|_<2i!F*C)g6osbU)ji!zSsX`rYZ^>M@(D=4y+PHIbN*o_RT~=v zW#Nl-urVD}hl*I-s3CaFza+c}iWG;)ec)kHaGoGv*K-ZZ=u3Ut?SV#^CfAKWM!M*| zsuijHrZjBaQ8`^JtsjS#4r)gq0N^8Uybbu;u}75jmBXgY2`4PRq&2iV-POzb8-M@)aud#JBJPGE$a)Gso}%4E(rRH4ip|+ zap;zQ*PWUNE(6_+CZF4#!NbCSl^Rq}<~>zG%NvpOyG|wDvs-$Dr+|R1hPE4%!yp1I z417~8zSevGi9p86_8(b)^7)JW!jLGMYn-(h=AdIy=*BvvS0Cua;=nX9S@i_%C>CSI zI@s>=@jR*w5u$xFkH#sZWxmo-LAcD2N?GPfbSfrx&RVje;Pu-LQc!g>?VeN@r+9EcYo(T(*0Yj zIC{%TNU1{i8-3cxAM^ct)FO=>-3w@i97P`B3D_3p0Gh1&9gQto+dBZ@1cbg7>rL)T zhyUgQ0bB$gs2iQ3oIYgrO#nrkyY)jdALd(&@}eFfe>SmvG+^&;(QySdgiWE%lgh7k zhJFdc_eeVZ`}d87&w4jjtp%A!<2d@Z1Yci_PfKG~XKf$K_9>)L;pI-8Z2Fr3z~A~O zEA~*oLe?hfr#fBYUibIArVJg6AQ@XE%t^?xlsvLCmo)DkSfe(V^!>Nzol{U`rwA$j zk8MNGg5FS4bUKh68ESZ)Loz(sHJ4%&X#EY+I z&Xw?Y4YE$aN;Qe<4VeCB*0?>RILknEe;=xlsU$$YE{l8gy<$ZBEAX zwzMul2p54g=TL%X?^KQ#`k_BuP)|xBnyS z>nB&EE6kZ>jp@j$m&35%^85nQ1M>FJXID_0N?t9MHVDWKb+#%?uIB4D-?n-VT@Q z?E)j`-L3Q!dz-9j8g0+xWt~m~uwq44IiIJFt6wOBEKf56myj@_YfFXv^3asCNHvWi zh3B2R7DSTgQjMQkQ?PVlc_qLUM~@q9#y4^~GH#-uc2M84(nrQRizFjZ0cEe^?ifP% zp4~*R-{6xNHu64Q>%5}zn?YykD!m%{jqO_y@YW97eWgtgqP4}yykZ4krLJoZxAY+e5sA#=``S_v`JDHMgm|Ufo_HnvFAbZN907^Q5$!?y81Su=%$~M0^&a- zwB7V>p8)U<`&6A(tGo@B<-{8k>`dL@0F@H-#RA&6GCYHjm_hiN;P0aUh*k!<1hrcN zW_y9l-bXJYMf|Q;HVW0K1|-a6KN*CyUB8mew$+?i#JT{Wu>lsHigh#+T74|k*9Ag< zLO_*Z|F5c8Y+UGtDwy&4(`$I{os=wvfnaoeIm39I%-5?-`FQr8=t`MMs0SRnusBCx0fT6wX2X1~XTKao=mH@dm% zxL^Y9o{N7263pM~PVRtOcT`7^Iu(^_>rBZ4vj#YUtCvbc?Mf{4#wh9^>q&32usTb?a+@XbztlW zkKn~iatCusrH|9g?&!-VMa?cc51;EO(QE<^%3d1bBoH2~{z(Fcr71@>W%KX}&F~wq z`9^+oUs2R17;Aiv!u(ka{3n==8!fdeQ`^2Y3_cF+Qijy>jK0DTBE^YebWe{7;FbQCqW zCSxEQ!rGi@RuN(i&fXxj1V|4mzDP3GP59$fn7Z?383pE~&?1*sk*xXe)hA!?u=U%b zTu@RVafBcW3%r_L|LX-!Jh$GNHtH*BEa1oe`uKO6sITZO2~DG8zbNv(wy^&q?HO{J zd;(=%TsKwqLBHmgaLJsV=%vD0R zOjkJplF8M(dxa{N02!PuZckLU;P>L;&sxGT|8TpzD?qOW?~Y*wFf5Yf__Jej-AT97 z%SETY8UOZgE=+(!@7HpGLI+dheiV~0io=f(KEPEEk<)TAo;w{qO6O7&>-yb^6z~Orv?s9G>6SaR zxZO8H{P&ne9QSY+%YH=Y5mk!D12?jmJx8+>%1k@p2QWz&d_B02;{?l`-*HQ}vGa$K zn@R12F@AZR$<#|1adD4QNcGT59E~jYwEHQ$)o)D~YvB^+*8A%kaE=u20jcR%{~#9V zvr4hh^vD~a9R)zT67Tcpf~5apMes6R|!3hxn+GR?3JQW&9O`)W1ZK5#e=|am{jg)h zH~#xBpwrqj1M1Om0+^~bJFGXUd3eo-y|o}c2B%CQlyBGFh+OruB3xWBegE9AdwY`% zf8LS-DPIw0y2QnazfOq9fhB9+_ay-rwr2I2x`Q8C9N{K1x;2p4$C>hLMVbhk>gF<+ z6SREJ-*>X7?RdhuKDjlRg89S0bVaXJ-;74Qc4Hz~rtX+5Can3xyp`@Q|x?;Y+{~c4QE}lDWUqgnM!!owmy;4)h1OMaQNZg)cm@3?qpb!y zIeT@|H?!Di7bw#_jY_wByvf3S@c#E*^gk~q>F=P_ucJOx zeXxiN`RJ^Z>_M#h%Vg!iXuGpWvWZsBg)HNVKH{}=;12BFZr+EDu}G4w{tTy&=^455 zSOdO;TnQ|GA4QrRvocL;*~zet;DZEB#|=s&cXr|_LPkOZz6U=OP8zLiNb3KG1vuDT zZ}n63jS}GKeAAcvMG!h|RcvD9B5`S2+ROt$reY>x$@Uq!ZZ>93sfa@T@?s=SS{?O7a_(7&fJQ920^m!Nv^i>^sm%>QI_g0C zO2-CxhdWV*xmz<<^AkTI7Qgf+2+4-~Y4&x!qlo}-2rL|jSHLv0{mX~tX73I`o?St3 zB$4M@fvC^9J!7}4*oOqlEiFk@N%`DZ63(_U7DwV{teCvq7<^n~W&_c2{P|&*ubhd) z_Z=^$JyPW5x8HIvkHbN>cMzX|4h;DB8|0Y5)Sh<`oqo_ zegO_!kno9WE^H;jpe~UWzLyRUic;(19U^`JZrFSE%fqCXs3f=cFhtwKcQk;L`al&V z4GO_w8-!dFpI1Ks>FXF+lWKRptA>2827`XZ=!>WO*^^Z^6D*8im*BHhS7Ux@Yn(HY zdbkTgJKr&+x-LZh3%bZ1`=kp)R(8j0n$@fW z+6#23O*MhY`4No848+*EHE&DHp)UDX+X#2~VyxAPaag?aG@qxj+&xmqqb9`y$FsP^ zsQ=)o=>y?&CA${lPce1EYTXKdjiaFTXOIp7j*dHYk_*CRm+~%&69A5&i_qk^{Kcf2 zV9HOJXc$GxL3R}GIXM-#L&UP#nUn%l*LA(ZRHj^vb0!$<7C`bbQi`{Y1GRen0yK$U z4G#@KT6Nfuc9hyU$E&=(S_x6?m5)ge6N-Pa?pb0yfIyy~JoTaLAVHV0tT zB0XJ~3Fx}FDW^RO@!wb>+Nu*k7iPG>ejguBNp%hjG&uF$Tk!wj$^CiOQoqtQ*Gu;M z@+pkBj|Gy(2mP+7- zSsfN?IzcwiZ%&alvXxW+a8C_it!wxKypYx(2^|$h?JxbeuN?HGT@F`3o&PV-P`C(80=8-^w;sQvWpyTpOy3XOL z!l<1hMyD2Lr4{c!RzYJ2qB>FTZ1qlYziTl5S2$PaKP4%kg(Tr3l9kC}BvUWJr1=8{ zBDBgw(mTgjOC6{qf{esQu`_ICaT$VYH|AIstcDZ*z8!kmzXMQdG{W!SQFi8k0O@V! z(K?iyh?(9M`#7t7-giKQBoP2EKda;or>TDgjHp(Wh%FYelzn3MLvOW*e5-%Ki7LLH zx)tI~gk*@lAnySLxbZ7qdfc5|!f5>dzODu#NSXsn03>(k0sQIk zVPR`QmbTRs^j7_u6L&Nazb$-7?CWb#VYTK%ejDRWJN-cG{3TEg%wg-4p*g$xLR?t8od1lgips4{eflf zt&>-Cd z;3T6r*Kc(CWzMTd)kUO}?<1V&n+a_Mpu_J_bDs@L(@nE3np+~gj0}^HL|TcCSd#Ox z^FBIpw+D?-<38VTv(cc4m^|6w7F(3ul59JDVvaL^S4(ullc#1x=Y+HB%vq;Ub|?lv zs9Wtm`t#}YIG0y!_tmH)>yBRRo-}I6%Xf^Z|HuQ{#DYD?+ZTy8Tw^O01|3;90InwE zpwaKGhgEd@39zUthRiw2S|mxUJYrrJL3m zI;8yYrvsh~+L1-!`*~XSjeG9e0<}r%+)%clr?P=J^qHzJWEtD5UyxiO>NnC@7Cp&x zzdVD!7;P0-;o}SQNxj4o1X4DZDqs>P5 zUhXJPxr?fhe1Yi)A7~qU+Pdi0MRXfhtwt9Q`?`{tHze);>F@Cfh>P`|Pp2;L3VLEM zS-{Pp)NC{HfuFV@waUrQL4dd1Bw9I*oxxw8Sog}{vrrT;$gA}Ps(>Nl9}9(j3VEl?0MGSQ|e6{X8`F!}})*e)S|}M}e4&$Kms&+Ir7pXs5ao!W(#H z&n@MKtl$UD3i=EtCjjeWj8EBUL}Yy8+4evsoA&x4iFJl)cSY`B_3Pnt!&+2F;#UpU zfZ4P=T2XmZX!0_iKuWNzRbBqmM&U%!)*mXT{*yET32(;d;Q>e*iT6%%G|uuft@4O% z3WLTs`1q8>MFFQ&!MnjgVsY`4pThUP?t9j3LM7-ihem9N{cwg#U2lv6ur4p;KzkiL zO>btg5EFh(U_-dwyU+bmK{&u=@*tmQD*_dvUf8c4o(Fv<3s-xqJI`S3R2FQE+W;nR zgYYs3OkzpAu2v7sd97d(+=DN@ebPHGJBQ!TEWl--*)-XhUJ+HGwt`M94rC zw9UqYGJ)7qq%<7I$O%<$ywSuszc^+x4uuaM>Ooh!5y~E`@s~b%-OX4x%dOdz3s5qw z1&r~{HsQh-YRQ~k#nbq2hn4~GoCbx{oEMZ)_Z%yzT7a+tsJ!d2>A>&v`+bjJZ=5W^ zOtETQZ55*gAFTQp;hPmvzRIZ;th^uMMHbXlj)I!mV6VpniukL8;fC30^>0@EWr&JU zUkVAXIRES(@Er-2Li+~az&NV)2{Q*i(d`^Uf1$xX|Fy6(=JSr|lAzHl>rmE{)+9OH=F&J0A%d3;xFH{~HBETSpme3poEizu2s#IE} z7My~{hZ8FI+;<~GsC6NHyi9YP#j54@OAiw`TLMw@%POi1=or)cZOLTi7ZEx+e&+@PHoS|{_Jd3cNH#Cl&H};Zi9f!o5A#i) zFOnw=PA|(vGmIp%+s43h>elK8^&d&;Z#1kV1pjIgv>lPo@=_%AgJJ3X>Y^){lUSC zdgIG`Zw)tFc%YAE_{%8Y5F#|pL>9LFt)zkKJ;1UVei3=`&3B!o*}DDct=#{s zf&J&@wHo(C6y)|YRi;uji6nX~-^tX{6g32*?lg) z2&2EZtfq3~*ZH2Pj~i-mnGkyDK} z7KgPnx~aY^h$tBTGB_E2K7s!$VnB){pe;QiwapT{w;H~G?Zl<8*M*9sa(qLAN5*QV7Q7o0cl+Xw&B0JPgE^{5${BJUH35v)m>77c z?18PeBM@cUQJv?&D8=AXof1%=CF%F#$&9Lxh;;WWT*=ks&V(SKhc0R(DY(`LNG_Ul zd5-c&I$~-h$GU+umXoV`PhxP2jvTvA7tRAQ8O+}0QNaTyXUM!nj^yc{#a0)#Q9DGz z(q4yD40xcr*I9lZIucYi$*RHqHiv8&n^eTuhH?KP1&71lc+^vw364~F&_T`rq?t+q zEm{2Pb-2y+jptoAAra~0@PvZ8;i#XxdruK1#6z5Ch=}w!in#%8FNP7 zLyqB$_in#dQ?2NhudXpQ$WVKM06ta9YoiZ`!(YS`{BXy>I}$8pP~(=Z z(I@p>>#OMz;1RA0YoqS&^KNv{XPK2Ej#;L`(E^d^_fVtw`ZkzYidWA0dxJ zeMUzq-^+@$?hfy?n<#)R9t6;8HCj2uuNGOrHDfeE*YHmo^~TLDk)ZD~3aYa^`6?4K z)cvf?GvWhKfOas~P2>#Mwkgxj6v%@8syzF&?MJ1ak!VhZWZZrCY#D2|^SzV#1`O2S zu}xy%2-~NyUv3>o=YU7_y3NmQXawGvm({9nI*YZ(`#k3`?QRod6A1S>nom=XSadzb zU4eP-j77b%M&&ZF?%B~`M|{rMvZ@Z-m9mdNoc;2q{(W18b3ki!>{#Ua~J$S zb64how2YgK8nF!OGVU{m)D|n!!1mBo9GA@@zJZ*7zg*I=x!h2H@wr(whCT}S|2=W7 z%LPUT0m7KsDR0nuu(R)SzKuM25M#6TUw)t#Z&!1`zmIxM$KYoS@NZr#ug?D6bGh6~ z*_h-SwTQz)j-14NRX{)~8D6@fk~f(eGc64=RjYoLSl#fT=pH8S;6XA2B}ErWJIk?K z2M*RG7`~9#EZnlt+}XiU#@tSt8L-x#ACKFes8t` zUL&lYQ$Z+cQ#n&8+Ed!jItGl?0geh$_Z(=QM}Elz*7bDcE3-g4MUrc$iM12{@1Xe& zkBq<5IlS3#?gNca+Tb^iHhJS<kXg0WxJ;;Hy9L?7BrP6|$Zgei>tsIC7Pu%y9mKQ}Zt5j+(Wa zh*8liHcOhj>fAYQL9mZ|hN@n>r!nWRmo!~3DJZtsirc)j|{x;-7Xz+tG zE`D;r^F5eF{yS0iYozN}F&@zFVjq;`HAK%GepR=dX-A_z0T%7V^Lc@i`9$fVY4THI z4sroD@OBO#1qmqMp*~s|2U%_BZH#v&vfD>L%TCyM?%^-OKQI!OZ5Rn+e_C>gEJ)0( zKj)7R-w|Pnh5h_Xs+zQxJyk|~LC?O6bN|7#2Za1-wu?x_XmejLc<+uPpC9omP4&Zy z9fmgAd_=xP2?ieR8V{Z20`i*f|8scaI z&|m?cQS@d;Q-%iDxSkfSi#L<3d?5pGdFAx|fYV}B@T*j5TopoUa@HDh0*Wc1|8rMY zc-nozxOv#)s`V4boc8;WS($N}x**|O*g*c9tyJ-I?TXrFrj&$RXhfJOXi#N>?Z3^i zm+Yt%tGN$!srDmcT`)lwWu1pfeeS`&oKyj&5vO@3KI@T}jli8gy_vFv+SmD$5poNq zpd{%C5HtXiA#PwEODl|Qpe%1vuc~+CO8Yr=BsavbdkQ>Wj%H7J>IpxcxD4s>{thlF zWPD`~!_d>jXvu^%*^A0@)%QL<1Fqc)Z)hHp!|fsti^qS*U@R}xy1#)4Z(PE5Ru7gFtR3Ux?hM7~dj|5Y z`!Kuxini-TcpK7GM8yb%^srKd*))|geC*bVkkNC(@7jz@j9btd=kk@8rr0lKy%sDJ zR_EOfA2daniS;vluBKKRNKBcBmJ;jyc*Eyt2seU9K~qx}?@{TWbiUbv<{$CGa1;j6 zlvJ)O{d^bv67N2UEzUFWEOg9~sgFMpQC_=Vb4wAM z$I2hT={&dJ_bBn$v1#%>qq-p{H#5p4U)kt0u|oL$BKsf~JLx!9r%l#JgX)6Ob5vpS z`Dz(SZY+Q}Z|}}2dC{qLkuNk>@z?v>xm#7 zfYAeIgdDuR)JHkv73X#v!PYA?*ulj%2pQ*JYwEuG4&5 zk@x9?YLSGv4H!ny=NhXB1L1=~OW$H+Dy_5#jt#PFeVd^m~K_@4n^+7}E0WX8Iuq9Fi_;T2?siFD$Wh@~4P z`?}i64(~|u8-ocvN}U(c6=(!J zkV^sfYt%eeL~v1L1fr`Vn(6LW%H{Zy6m01|?-UlOHa3L)E-46#N>4o!(+pY$jw1b* zA{1u$P*v}FY9^wwK>t}%y7lU8hoDA9qOB>^)v7iKHwj(?rik4Sy`Z>`8H1>1&3$yW&=pQOn)mZO(X=)in%aGVj`qn$F`t~^@t%V)X zIK952JS`hNw>OQLhbo1%^+RLN7hIRKjQV@R{Z^N2$PS?xMY9Lw)6p>z)<&{T(_04K zp5b2}6fwsq8;(kkkEw_2y_HnM=!Ck#E%Fbn^{lRx-@$!8?n#G1UGn`UP)M6*2f85$ zLdnClVyps{i7~j}Z+z?(8Ne^Igc{5RyZYD1@m&+CtSPb+(jZcXd)P>ov^`wQck44uvneURj)TbcoZ4g~0h?jRT?X-0F#At51U&{(gk5LzP(6ZTGYC!J#k< zGu^8yxeJF^hhOGdm_Jv5I^BJv6lE%#xK5k{&g!@>1da2rRz3D(0b3yT29hm?a5~?4 z(&wrw3(aD&cT#R#G9ST+_DR)s&RT>>;0XNwkmD8~Q!eg8ZTu0VY{TDZ95 z6$Lb+?0)qCd43$!5FM22^GJo*8S9uV+ly=Y&(~^w^!3}K9vKqrWldi#yDE*XI=iIN zztgG=#~wDueSgY*0JRQW@5q+Il^6}u{BWdy8%66^v*sqiCmaDBtlly3V zh7t>a41-mm(4sWxpeDS-ySrSS_w(p@M(o!)$!~<*jp%0)yEaO|-B?w!hzl58mb0(@ z2Q`A4o<;%J4u@Ge#B1mXw!f*A7CSpg7T0*gx`Ut0Qs_`Kh7X^F5FFwJaI2gP0nMsH zAv5+}raqPCJ?nIRKNCb7dJvftP*3?+mkT9&Jk`F<)D=pL10aP*#Js)SHQY-@!ot-~$|~$RtFi?EtBU%)^LkMA zZI858WTM;3x9;U)2Q2_KV#DH%2lLS<8Y5^BX!Yol$eVu!rLM0yGJI&uCQ1c61SWt( z6T{VcZL6Xp=H1}AN{euV?R2aV&XgE0a(ZM=z`aejSVNrB`(Li4$K10iCs-^4pLs?z zO5d)ls-C5=CI%87o}PG=?=O>`@S0Cam*jUBQ3J9R1Isf$8_~%x470I!Js{=mt3-=G z;vuGKk=>)7eluU|D}POl8@ zn7ieR<*}bLHMm6A-BZ}=eYk$@Gw14(Gddhhw6=~B9pdBLWCNjBa03KP{UP;j_7~Xm{9nM+{|+l zNv(-es5!Xt;Ki9H-jU=~V3OqkSKrL$+*uEqkFM}B)r<=}v7FY0S!pqd_hx|V0NxQ% z-2Q9VzV~98V$>CI66X0ciFqaf?R+YglS$t5x;(u^8e1!PqO32{gNyj;o%9pRf zM{xzTM~{H-!SJq*$xYk*!jAJS=l(^$smDivZuV9FpDrr<3Nk@^M`PqZyHs$B~# z=i6D!@P#56!P%C7PXo4+k&}ZJZRboz|D0smUJCfKi%3ps|8^&vGx+O#iq97xSy19D zLOXc|=*XPL3Er&0lduv;x-y6W&&|67#f z&p)awE?0^D>Uqic*(+_mKJOUH0LP4#XSDgUIjwEm#p?DG$GuaD#u)euIpvyN_6oAj zgbXdX7Yl30u-^5>hz7WkA`qX20=5m6hnUiK0jpqs-xAi^j}7<{mxi$%5DNX^Uu6U&z^$4BP~8?Zy3z*}w%19^QO8zO+N0B9%1^^+}; zU`OG1BZHYunO`61pX0t@@V<97cBi&p2b3USp5QX>DL#*6E#pL|u%$Zu3r!KdqtEn( z_QL$O8zJ`485&lJ5ZmSM!t;N9@0KS~Fh2(DM429t9qlc4o@*Z9_`oHD;NxKH|3lYR zKvlVJTR{|1i47tm-6*9#$k(N-pyH!HEW78p>O2?+VyX$@1d%Wk}`_3EV zj57?>&HsOEeQV7%*PPg_V4;Rf7NT{c*XK4_*l_)ff!GRlMOLf->d17e^6NI2Od;Z| z!oavy5<7EiL&n0Ts@gfghc_HpHZO{$I8^mZWqrJp zE46DGRKz(p8z0u0iWWFdDnS5L`f%X>pa-*1p}$A{p?A^x`IxF$mV)Hi^{u1`2foWN6AT)oIbC!g@$0uFHi=SQP#P0 zmG8&qv95!=vnus=AE3$$$1&XZkHg!O_@cLK)Z}$1#b}7o6G}$T)co>pxLW-S;C}{e zw8{lV?p!8kpl<;BBZ_lKWPT}R>BXw()&BC))&7Gfav`C+4z?sT-?D?JlZXh2fOL?x zAV22{5KeC2)!z+q|MYyEzEEpDO{M09Y|ICEqK%-7+4`d)(P}t0KG`c}48dpt0!^PxQpq)h z7YWTuJ{Dd+7S{X86X7|@e7mDQDU7QPs$m=!Yad7)&hNfiyD<129`XbB4uhC`C@BzK zc#KndVQ~#sbdLMT*?x2>?$yur4d?l?Pj10Kv5QfJWa&f; z!};Bw&%foUsnZ3h7-!zt_AU__r{3mV=9@xd$Oh*b~x}%WKWm=W-4_X}j7p5OIQY@PY>;c{XFF>H7V^hw>?yjse z0SgL;n%AvZdvm%KZul6DT#6<92>H|Yq@d5CBZ3$zXo<{Q$vLk{qcGldMZ99kFWoaJ z?uragq)=G`HU`Qqx!ZzYy7}kT!|*u0;)B-msX0?YE8Bx6@dJaV{0+VeiC*(lA$gl7 zA5r%-?ONZts@RkJ^CDV#B-rkWmr&s~wD?_?Dc~Tfc+4mB)B8XRR;FS^ zn{igJdS8|pwfAR+xpbGahQv;W?nFE)Ey%;Egk61)pj^N!#T|HmUQ)QpU)68P@$DFw z#b8Zs;naWCVO9pKGgt(GJ%Kt=Y!@RE$3f%i%Ht*jrIKQuG2|9`cgt_SRT6Q!by!eB z4%{71E+W{2PyAoQK360TcB3)gH>M|xOsu0|zPgO*nLdn8s{7ElC^0T0H5`e=aTL|F zX4pU)j51DI!5kaj%~bevv`k5R?K%~6e7JL8w)$Z<_Wg7+bJF zd!-d`^2I9sDNUGCO0$$0mEKr0XsIW}TCXZ9$()b6$CC+cX{?zG9M#i|4%@Ai_olm@ zskj&WPLkFcSAV#WV=>z^TOgYJCQa#7Ke7-$wnV;xJ{RtT1iZ; zl8t!MzGdYsqTEJxywnwU*pH1fES~G3H+lW~EiX-eVIAWL&0%=rOoKL4n-=SgV?JEg zSrx;|K)vNz9>E{@_wRZ^;khT^>+TtQ5vFXJcr6%%C|@uNj0 zcMC!biTxb1YB5vqR$|b4@8vBY!vaQkjS5S3L4B3y4(-1}jH%qXgzrY8&MRsfJ(00$ zfuOBuR(^@d=(KH_GtQ^vO(%xI=R3R=QyKN_w89~}3lrX)71Hr6ZRJDI=1+LuMw?PR z*qL(_-R@$i!i~vIc5rRT#GKlq&*azW9VlxusFWe9;~~k@$5>Kw?{}uPKadCCioR|c zn5kH>e||BXd>5L^;V3`hTZ7A72gnsyUPGE(XswTlMB@U##8_b)-=V+0SFs)SL$5D=fb~&nxhx4z5y*U*q1%qvDgXUu_P=)Y((izM`uqzOrZ`*Zh%|B0}D4!S~?7D5uX|zuc=kqZUMA zkP~~I*1bpHjeI&_bLq>omjQ{yD{7UEBurX6M3o*i5OQlQ>W)MSMl|6|e<)Xbkr0xU z5|7ZCUe_--!UxncX&{TdrETsuJFHA-0eBW8Fg@hJ)vOAY?U*H5 zY4p6k%pWdc)pYlypp4*!H7iQHZH8oYPaRW#QWLL@q76-wJ#+7MW5!rKClQ9#0QbN8 zIKh3Y=X!JY`&%=yst;XeKi4c3uTL4@WP1;+{&3;LI6u2Coag*HlH~q9iPLc)y)%`q zkciVkL_BbHzfCB5@%xnObbdId=lT!j0za3t?d9xVJ}-3}6b~moYX*E-p5Bzmi}W?I zSk;5Ho{O%jr-LKT9;jZ5D>vH-^Dhf+Po}>oE_W4?>b^*%RLEDGb&lU}Z`Uc!RRKy4 zzidA+I8MJ12;~sca%6aWlMO$HqGB0dI8K~T@sUby_o%9j24IQN?#vwLM_=Dh4J>D_ zV_Tf49qni_VQz<*+!T9wAD%B0P&yys;qB|vE+p2HacO_Ergei|{nB(g#5niERkZQY zeuhVfzg>W7Q6n!5^Xtr&t}hu_$l{ihDz%uG^+`N$XK*rZU2@S7-jU+zLC>|;39aO7 zkQlAH%`4#MxFZ(9ES173;o^Gb1JS@O51gSWykFx1!dFgL*89ghlp*MhySwSQv&Dk{906YUk zv6BZ1A#+I8Mqt zV=Gtidvy)5o>q^NqI2lwTE4^+8B7_$=X?wsI-BhZ(8=X#Pc{PGB;*r2-5W*sv5Fqb ztfd<6R-15A$GlxvUvY;4tKyk_$|zpfWxwwP_)OY|oR<5>MHE^CVA!3c`E&-l{*GnG z-cWT-YcHVN)OEZVjIDd4pMrSv7>Md_fKEGT$t0IdsG3s%&HDfx6$X6mXu5eNVb{0u2@w?=|2MF{ngM7NeukI);MA zxQ(K|cEhdenHW48U3Kf~HMhr6+;_}e?*7n)BBFP9w9!|t%LP-;fheRmOq|)=qYA6q z@G|G={_%RZDD~(HJ>|=t-TG!x*wN3ee+UgSbP*T>Qs@493(yy`(vpsRrq)#@70cw_ z8dty0{&t9tvLfi%FO$85iWal{fs*f&2kaEG_gb5=QG*x_rFRIp7!ELL2G$@RJs8Qmumt!WX`4e-) zVNm9QZkCn5oHGhFP%&bS#JH59@slH{wlYGq<}e-X^4KAsyD?#sm%?bWBr34EzGr`F!zAY!~FrzP@Nj>L2==CoAMk zgG25boT*oJ1X{xc(Z7dG=)5NjxhyV&OBRci!_^($(c8Ev z(Ur9xHMARo6k1|RXX^ouyi7$5`uz&WX)z7tqy-I|L+TW!FV1RvjT)5OWXgZsknE+S zi}-Yla&EoPemCCgh(I;Weg%?1Y@aXrb`3t7E3wLaM-WWE+AG4`5X)t^oK<$CYcuZT z8hR%1ZA4_tM5^KeR5kha$2C7I$X-e)7xSPEDQ$+Gu9a}f)CwPyX2_-0e$$)x&Vf+= zT*7-|ENhi|Sq!HuVsh?aZpYw#Jt=k%6pyt1sS(sX?lI(n?H|-AaNOr(bW#p)D&Gdw z9u(3=GDDTN)#jKa5@~{y>TN|=P0@Ihcd4IafJ1!yh5gmae)E2JkmS-crqr?ov-QY0 zNC2AS;Ma!qtrgWaHjmBSDyX&l+R7fwN#Q4jg`D;M(_XCZVtd;3;Z1tv%{wP)zN2_*i2mF%S+{ROLp}^K8U8m03 zi+z(y_HjKE2+fT6jt?{S?C%CLvr8dJ#M06Y>t1N9OeK_g*SR%vhQekq0{{02iyWtSU}`E)>iOwdr`K5V?crg zyv#p3+4as2`4d(%JsRuM2HPLQe;zfqE;1pORXC&X4c_}lus$?n=1%JPnxzsw9Eje6 zg2Vhn!{BhlFhDjxw{P?GZH{8F{-6}){D8~{nFML^!W6_su=D7m9@U1dW^X=&7 zfAP_tpko%vBF4WuIUH0f*A-c~o||$f9t65Z#TAYq)ss*PR@{ad2C8FysBgXl;(^B! z!qgyRc^sfbuk|bP0T5w7_2WF=sEAD^t`(spf^Jyt|H3nT3^#xVJG7_z-o)t4@G{=E zUVj=aITCVo(5sl}f>LcAtAjJ}efu;$(o&$J+wx?HF|>nk$zVoxA5pO2qYC$;#dR+~ z#rt(dVrlXfER%4bTrU&Ijrh$#ItG1{%~b27-{r%ZVhp#%7-wQE4s>uWqc1ZF4-P)W z*T1ijSm1nfdi9x&hl&{{s`2f!V^eW-^D$(%6nGJLu;XEtknU2W*(dS2h7P5owd?C%*4f zTe?ho>mBx&bGj*(z0?oTgWN>uDBz@y1z|b|smDe~t=fv|A3|n(rDT%@7%OFKD6+IC z^h1n#!>DD4r*(Oxbgs{=4)_cdw6qnSeJGl*mUl!DC^ZR{pKYq{d4g-nUVv?_LeoA= z{T^NRi~J1Qsi@^bi6t*Q3a^1j`HCZeB@z6SxTyvgCKKDIsBq2idW#pm28<74x z&mlk(#3{;-`v?q6_xeUNe~%Xa`wPVLQ2^RR-Oqav1es?cxC;fV_VufyQ>+PUZ*C+a zX(WIG?bXd$WNUbd2R#Hrh)mS&WmB4B0W#(6y9-Z)@$IfVv+O?mnDWt z`x!7Q&U&{sF^raSyA_;=bWmI>0FT)-U<`~*9on&jusr}$VQ3+BO?M~BKI#BRYzP}= z$I&C;pY;M+OGO)C-pWPe7TkZn3v?keU{#e`kwTSWhZj0LE#Hzg_pw9-cAfM-ReL|e zwf@XPQTPgnZ87@%^}<6_kQAO@;EvzTv7p;ZsW`@UT3e}xxc)14SBcK5Al$qnB_$aYShhxkfiLEgKfdC#i0j3-EyxXeRt8qe_Q zn0xuRXU~|Few7Ji{l5B2of9OQi(6i>9fhjzcLj9Mn;Ts7y)^0!!P|$2#-^f|nD)>} zO?bp?o8^_eUNT?2@QdStG zRP-6h#k5hL*arYSmJGwYXWM4H=hxLv~Iev4WFPtUE z>%&sht_L!dv&P18J^_FL+X)0kd%GTR?%xdn41%t)V#ADgYXh0?=)_ojhhh52x$^U1 zj5|>6U_tq~jUd)-qqiI%b(YW?_KHcTmX5i0SBOr;Oyke{fzV(r>iaErbxa6+dfLQ}vvrOA?_~CGF72!@2%vJ8f5^+f!z-o{bU5iCh7k7Wl-GGc zC3B3yHbonFh*(ZlDt|eo3hHSU(kzNM(0RlKIT!q|QId+wYpHfim1#P5E{y`oE$< zJO)HcB7C04v+KFpT)KXru1P%(+Z5aNJIn+}Bk4m;{E6!30NZ@f7QOW|4Zvg(R>Ud=S! zq$~YEThyruvF+;ZZsqaJu!ZC{eb(gVJ_y-Upiq6$kVv`NGb;iou#E4d z)&^fSp=2egSoPTc7i*#6r<(K|8NPYeBmW(-j#149j+uq1Ktt+B9x|aiJ-U$~jSI*Z^N)+Y%J-F$HoLc3`@F zumhUKPEguPR|7o^LL_GBLNLdZ6YV5j$R;p9l$9-nRheQIIYZN%Lp}jL0Q}*BJTcyTo!F!cpm`@J&6aqR|{L_Ou5K zMxrl9ZXV{5?XY<;re7PJ_$IVkM{qpBh-Jk|d@W8ON(b!$dq>O71f2=wQa*MM7!&11 zU~=vP3d>g7X7^s{@$77`6(^a=fx_g-1Wt{X>VIJalR8%pr5|z_-CsOYj9`2{@-e!B zDmJ|-sKRcw2NGEU&E{sQ-u|LCS#@W+-lrPr{vgUfyuaC4M3(ga|FUGrEvDjJm3)@I z+^dTdtg0(N1+d6W1>`?iDU^LOy-~4Z2(A5U9$0n6i|O4?QX_RqV(BN#Z!9fRT+yehJ0rD z!=UuE?Gx7s(!#IJPZU!}UVT_AbKmjn$Y}eLqb4Oe!otf_A2?5nQ$JAl*?0tF|4p#4 zcVZ#%nzuf<+fnE;@*#)gWZPV!1#2Nr>V?<+g9u^)!Z$l{)!rGB8fH%@M{3v`6NMxe4&jQS+<@_c?DmJJi3TqAO3F?_#$Tx~< zME-`myR1sFEQnvOeNRm|r`VL~K5qTvTRXk$l&T!eEXH!ZGKOh>Cb7b4+*qm> zLebWGa#|Hxax&x|X(DmaZ;+9J!N~6<_?~12cjE$wD^TJW(I+a!>LA4O&-i=G?@^%s zpSI$rsb9TqEjLp%E@#EBvMPs;*WQ=>Jfs<)S^MxI^m^VY)Phmhq(6>PWv_6A^x1IW zCGCi#E!^EQc7>=|O@YhuHYX2&*HY>Siu1bZ^nIAve_~7N;zzG1Qp=79P9@VE( zyeN3Qeb;AikGAva*xb_O)f*L=a7Pun1J`m;Do2o8o%>8+BozLi6GGOCEf28{7>}O7 z{&RKxXAU8IjLcavlC3Bv8Oy|oFjs90X08!A3+(5=kOsEKjDS4L8d3<7X!Q4#>dkj} zy|0L&RU&2q3(Q^)A%wQDuoEFmGI_Vm^)~ua!F$zCeZ(5tLqlNfnM*&xQGh1kwf!!8ml7 zzSZD1hro|bt=)zS?=#)O&^Y{dY13VqDc!O(Gku`nxX?t91_#?+R~?;8ea94Ez}h*m z{0zeiPbJ>KvW~e})~ae3>Iuk-Rs*yfB3^sWI0M5B1n~uo)^pGL9&s&qMJejooF+Zc zFA; z)Y6SdFb>+62q3HmX8%OSMw1p4fq4>`-}@6O{}sHSAg7@Fr&ljA3a^|7)H{G_rsgMd z*rY%kllfdK4mU>o(cS~e#XTbtLqHorn97?2|3OH+Nq3A4*r`v+vbrs_0=xw}*haMs zg$KZ7JmzhT*q@Fm*Im`!I6e~exhko^TAiXif7-AJ>lKCh7<6$E+~CYk3S>F z(rQ(Szy!)JT3kK0=bg?Dc2y)&nd+iCwG{?3B@#FNC{U)HjS`cpKBx?cYjj5NM?UP+ zk+&_upfO--?dZdGF`DW0w%Z#J=}h1+9q0Fm853I5+rP)ri@0-t#{UrYNz}Ssd{%(# zlJOzr8)8aXlu*3)^U>3oG(Rz35=ola#fUXjKhMHWZJ`pA4xFy#fyK1<0OKF++DdmE z?jyG<^#AXs2!K1$2yka3W%T`@Ztf2b`}=MdZ$%;bFpET)=?V(kMTBsj1oGZnupVLr zmZ(dBmIaWh?+$^Y;{o5aDA%)n5ndO#4I~_Z1@!Yyw89`h76JmrkBi$$B14FoYgSu1 zY;&(b>5CAz_M&9*@Hkt-kGuhD5b2<#jsQbQ#Ro!K1C3T-yLG<_mGto*u(ftee>;B3{o{K_cJV1KO_ezV09#C*W@tU5Y|8fq*f%c%ncuBlx;0!!h;n zQ4GQuzOybCTwX2keayK(O__tpd@vXVLPV1&cbU@A~EPGLZfK9Vwr9D z!{7hFR6~+Mv_?uervN6jn?3v*w()+F4TDNGlgNZeR~ANYnnOtE`B3~8$I_EQ4QGvG zfIbGL6@ErMTCxnBS@lm?k>)>~ROYxot^D4k{L_61f{>oT`t!5@tlK@6#PmQ64y1`; zE%+dOk_)$HfJYR>lHD4vR#3Cey#|7^g>$aC#-#TNf|w08;Cr`PlC=orS3!&UBo>`a z+#R6Q8P-$PpNC&#@lk14zY9mca}VXyYKuRvSKJA>vm>7==X3xhEBLk@aMsEIvtsBz zr|s-V2)Z>K_6s&k(UPxIR}BirZVRXMhOLT8Dv#^S{b5zzJIb^&=_m>;NOz0QY>BwM z$HUEb7%Slve)ebGV!uak*q8v)`21R?Qi2f$B;RMoh{Y37cQS%@@eY{uB(jcyKLV5dZ{7U7plJnmGF@o4keO+Zn=44RfEzOT z2|(_GrS_FoweiAT*>I`D;AmjY{{R@)CNA`1h6I9E#l@#Iq&vTXo!h#K=mH3X$AXp0 zHpxDxj|bGaRHtUY-?kS5JJ6FbuO1TDL-4yml_E13xWe#Ew=6#5$qTbYkZ#2FQ7-v% z!AJa28w#a4n|s^sg6gIsp`R?6_y@5h3r7QuLAapBi$9O?KhKolJ4hU>c?1H6;Raed z02ww*>LUNi2#}I8%kcqNb{nmZ(-NK&h{qr^TqKac2dMz%d>?M#F9Bk8A<|)Nl7h$$ z7z``{SCA+`!Sqd*GqB=B04cz*YAL~@0MP+H}@aHXV{2&T0TCBp0*j0sdJ+U&|Qpe+|7(DG`_ksbnLEC-Zd@Gq_@3 zpj*>V35t%mD?op<$RenbQj`%Wd+qBYT6CJ$(tDApK<+fR|5;}Vt$D_uwC*DcLh|e0 zz|jBmAn#H91g;e~AaoHR7G!nWa+|up4V`yN|a{|DZNRW{T zUyvG10X&nS_!z`qhD;*6#E1%`8amY@BpSL|nomEY!L-2_GQAIQT2ljMN-_-Xse}?@^P%;|L$wT#1W52{7d=c|K|ufqm$JG+v6Hn zfbO=3@Js+D|1v#8|)-2EuAouEA>5E}#SPkg$cIyBfA{T!%~52FP~vSh8S{z?lz zji~@;fQZ=^HE3yAg(d`F5z@*ePyy^Y8W_4tNJi1=`*gzQ=i|B?N&2b@Ijp0=dbTP; zH}D|wGmiKgift@0@Q~!X>jye8(d5&SIxM0WOwIbpU!MzU^({R4&*LPBp8f!N+cX+X zhT@{%el!2}rO*`5be_y1qi;z24@Cv!_-n)yyFB??@!ya4FQ%LywkK%wexjK`Wm`p# zz&9cq1so&$!=$ow8|ue10nXWWhvTaOxWxCzO;bC;a*#D#7t*Z*vu%3lxI6o5VN-JU zH~>2zoWTeV8=b;ob&!QFXKt=iEvIYMvl>y?F?6+iD4E02`F}O1v>&xV?3?A zsI_!~LZm&W1XI&MA;24Bfe3WsGtdtR>o~7vWRXP4FjDnuxa#S$?d=YoGXAd3f})yyY#yThoO;;HD8qpdLp;& z4XX$6smTZ8GqW>QBg#f#A%#+--mzAHbM2xiTkm=%?M4sGCbhImjXDkX4L+{-@$9hP zRzQ=uY6aI}P#Qx)Lwf29!_Pg59CF}VqUM0xdUxWJaL(cQK_R&czQEkK3pmj*fkhbS z4l6X%+btz%n}B;_?$ZV;Q*oB>bGGY+lYoUn{qf}?-wE8Xj~MMI!B5#frV=!NE>*;` z=YzK|e^GE@{1meg_~i_4Eo>)bd;_MA@R#jW9x6My85FwTmb3mePfn0o_1|MEzi3lD zEh9i>Y+4*f%dOZp#q)tle;oR|35lQul^9L`Rc84=YMhAK4 zHd8p!l#Sb-UCsQwUt-dyR{tsHGG4?7OKCirjOoIi2@Na7@0TcVjy4i1jim}smpJt* z_dy}Im0+SiyAJ%!O@*e_tAtMHsU-E$6pzR@o}oi{4Yac9ekZreQ1y#Ty-*{`fG0`B z+mljut-n&XpdAe;z7}gw%C`UW82$-CK`)5eXhHHNC?g}-tu#PqEg`<7Q~-GLY(RJo z3}3}3?j#?20xoO}fuiwW9V%9VlG$js(K7<%?XVIpaH(gv zhmpU==?T6&Itd()2vwz$oMylZTG)CPqP5(Wy|tHLnojRs|KK{aOs{hkAT#kMM zxnEHt2tB__>BIo+fOWV)eUoNQyS&trw@f5O+b%<(L2E}H5E17VN5Ia&%%fCZvdPe` z{pX0ay=>*nE5iB$1 z`^r(DsVyYDaoD_{QS$p^+cw;^GpHA@aGbf_ICD8%mqqny5BODU>P|`*^sCbFk8e~U z9-$`{bz{pUP~aIJngbn51Q^Y6No+)7e;VYU$Oc8mE7b!~nf!jdVVsDB$C*_A@c+3Y z;vq2GIj`UL+@SY41Cd4La=iJfTo2@=Hpp>WD1Stvb@kIMDty|=%K{@MF!E9R_?%RG7cF7K zVZ7X*-FzjkIVx`VZJ&2%IjNTXwZj|BfCvydO2}{1NAUYj@mv%oW8=#$qJYppSSyvZ z+sM$+{qyW%!CXMq#p~Y}@P7;izM#}%3knBXP<)#Jb6J#LGjL24`A}=yR4v6k3^fMQVt_uRNeq|WbKy2-FWOq+GXi`= z>4o*!Eb_u3(r3BH*W}3!&OL*(a+)$x3RQ)&@wXJra+htcUZrTTN`j4$s6UI^6RhUn z>dQOp1m-m+*ShOpL651MTQN?7bP4kEuY5a~HRW=}#&=(7zVx%EIy7u@RWZ+I+Hg3% zv1Za>)8j_;@4#;35puS~$BpNesmdQtUN~;U&Blo}u>L$mNwQlYPrk+fFG=RVa|1py zBIon&cTgg{!<@h)^EMf(b8)1EYr|~y!J|yD?TUH|Cd!nc)5CNaR9m|_UBIG@1GW_> z2rA-USNHfSILMEDI>0p)^GzgBluGXS2WXn8UVnbT;;^pN2{McD{;)LIiJ;NDZyW!v zk<8{TI^KL{3}5p8#Q2^EpzU06#C>M_YrPjX#oJKrlJKxshi-|D|5N2^&!ld!f$yNBX`X8ayd*Z-mc~R>SsKqf=Am_%FdW0LLg>z zk%=4u#HQ#9JQeFFdD*ZaT2uCes1s}k3!m&b-Y|q;-~a5iZ>dyL!RsVtqd58r21VIU~1O?6&>s@~vU2f9@;?0Y(H<<%Fh_p_|?P^c&BK{tz^M;4XmS?3<{0k>ln2XKzo|_XSH+kr_J@KNx;Km zl-)$k8)_?ojk!*HwO*uw63fW2F0?3Ex9XMA()5f>37ATCf?+3{TcdychCnQSOz6R- zhtA}h0;gt^;Q0E^?{`8)y}O=U*aaE}ll70dmTwanRzHrKMpJEhX~6z^51vJcSHxgh z{dX<@AE_j40G&*lklz)SE*0B{2K5d`l*ZTB7dGnku1-aXR1)06`JhIK27L|Mc6oRZ z0N}*I#F+{-E&BijO!Qdkil$$r5Dg(R=Zu&^5N!dJhsA|8g}6M%P&i|3V<;3hzEr$! zA%T+so*VD!toMXAK;Cc#pZnQ{!78<+7Cuy6oidrf*g%r2!>_zivNP%-$t6y%-=wd( z(JpWo-l8>X87M9}T7AE8&9$59yVyn%y%aCP+?C+ALFLkBIgn%da!oxW&8U0#W-}T6 zgN*7)n=W~#xHMZkZ&ch<0~P8P-WFawkzx6OgzT?b>Tf&@rDVu6pLl)Cid%`pqt1$_ z5)$sXQRkWA+m2eKsgkgs*X}>y3${N7Q0L6_iaS@;j#KE@gG(%56TOXK0p;z7oe@9y ztN8XgmxZe~GvsM?9e@`IuaJA)dyLkGYxnh9KOYaP=iR~#Pq6oKv@5Y%x+`DhXrFCn zJkM@>o}1r^)Q}eUZqYQl(EVCtRXvmPdZ%ZHbJg)-4zqt49RkmHo>IrlUAt$u$0F;f3w|2%%&$)Cb>P54`Kt1AM^G4sQG>6{rUP%b{AI#^lNpT{M7~B3hCrskEy+;#yWv4 z);lj36_H0)h3g!)_MWd6uTxxjoUa&4S8vqZsWD;-8kTYQ@52tSh~`r=wIa;#jmx?H z>znv(Z~hXoXIIl`JJSt{?G4YSNZh(xw`HEFB;LXe2d<|d36RfEo&h$2Sgr~wQM&-U z9{4Gsfi!~EF9CLxq~GR~z}S)M8S=w9nw%wr;(t3g{_h{`N$3ICP_4`iA{zAA6ChWb ztYw^)0rv~g#H_fkFAwAp6HMTn-qsO5$D}tB1O%T90da*+DGM7=wV64&xpPuMI~HO& zUeeUxk2TNEU@*JC)cvEW2?;$mWP!`iZUKQ0i3UlMs=q@bpz zCgGy}I-|n+_NvFPowYS|Rn^f@I~&^pfA7ZGhBm|g29I2WnkchRo_Nmld%54UHpW#F z8~6Hnq+9ZbRoH%XhuP|H(3-!T>&(SMmWOD&$K*c@xiUR%3`bLHczW>)PVXkt<8u6~ z6-{5cPnkr~BA5k&B!!ylLi^M71TEiYDv4LwmP&m9|&Rb2^Jr=e~COex2SE>NU)&2jH&K_t{!c7%w%mq-+a#BAbWAI%tmgz7B@as{CK9E}4n}kz zS9nBlD?iJCokxNXPoq~ zGo5NT96O8|EDh%8)DLZso{#JE~1c(-bBOP|YhY>4Y@y}Si|W|6 zMbbbSYk2BybPipfyYmiYyQ*F3(J30t?ib1FC%tZu-StY=nT|--vFlNT4V&VZ)IPk= z;MQWbC(pAgA5xXNLyL#BmQ?P`}dEF$>qV|!^g)*4`@Zb;SLQVkst{BrL@7Q3pzY770G z9VmLkz>wr{dx{X;@yZla=7Y_xkO#oda}2UNUi#hc`=jrwP*>x}H~^kP1uM@o{P4%@ zGJ3WB43@POuZPD9T{+${56hxI)Aq1QA101QQ>D#Gaxe&AMy4+;id7w#tROSm1?pkMJ$R4jb!(NyieOQtAPd8u@%%VHc+jFA#o9JSUpqOQ+S^ze?k zQxa_sCPYynODB)1Ebkp(Y399U#X5Vr=;>$VvVEz8%Mh5hnbUSakrV=2eZ>*o>*Wicnhvr-|mT(!p`Z=k}`R;_X?UGMs;VH%3*8)-!*!fbA9m9DlonnMv zS_xXpRs^-81^Ik`2zmoy3zPlTug9+6y{msqD?8>gvFmutV%HQC zq^k{_pHsOH=~blpZuLH=YIMQdM*Ay?6}f>cbK?3*vCw~KHAIs0M4@>OjKcuc`jPoS z`kTHyzz%y2D11HD)B$6xpx~42DTgkIx}L9nN7t%5oAdVeev>Ip!1{Jbyl(vklOga! z87tDi4S3t10Zn=VNasloF%(oPc>DO-+b?!_0-*9~r7VQSX4-Q47d?}Ou{Dl(0Lr^< z3A~BO>?Lh?I08F86&2QwQwnJ7;ooWnTnUgJREp^TDDWUK? zs)sPeJ%oJGyiTAg9dabN`Z__zO0T7hZtB5bu2fE}dBj6Kb}>APYcI?6rrnNbsbwUl zn~~y}pW%Ua#fk6E0)38~OYE}bM>8hjvPtIy)V*&mo)aGq*U@G9I13X32{$l9XTr%H zoCDiJWGM|H1!$onAM;2l3@kT?MR=%~z+x*qrW>RnNpqcFu_|+2C<*G&ECy-cep|=g ztdK_8?DqKZ@zF^6sGYxN9<1$2yFmYqNUDICo^5O48xCCz@4;vl)*+PYHm14(`D3L> z4Bckt+4^-3csc3BLkj~IQRx#yVKU(%24uiGJrFZhH-x_*&X9?FzlraCeU90APAgnn zU32tgjJj0OS7a4_(jTsY3uy=x)bZ)84%?W`9oWJkvL9vB_M?cu*8(%S3IP{;V@@F9 zFnRKi{9-}PV^O^hsooc=K1<)L<>4mLcl2<;naG8Nq23Ykp+9*>(U>uq`&8q~B^v4W zUA@^xET#<=O*EQ+Ux|KrNCCEnpF)!*KBD}~^7eoKpZ^hZnrLt{@GF-Gki-(rQ?CIq zG^3D;VMzE`W;@s1^{X7zRqNepfIj%e>0p^XP`rMX8#<2yE>v6_Hh%ZZ$5^EN!a!&y zI!hg&;-2+r0Wm1PzN7R|G~%WFD?x=ZTZ)V^-DsDYy@|4(E+%Azo{XU1I?WkvsSC-Q zj`~=ScGCXj$Iqf!{JTI~XShLZLCC?7p*b2dsyqIfG}NaiAf{Udr~64yknL0VgQJ$s z8-KT8LdF~c3_N=ufiH$z&mYZ^Y}9I3%{>vD6mVYX8uS>_hx!pK?RlH2jgE|=p9o%H z+%@kLsP;KCZ5HWKA2j*=JcRBWW%E&$BeL83(4PucM`|JP5H3d1qxCHdCH1pc@-DCi zQprejii&2@&nmFTNK!df3`iD+UU2@Uu=4c z-L9z}ly}Lffro)_baS@z>cN?X*LzuE7%pai+vnWLa>I<0YCX?MBPH!|C7z1fIm@U% z4fr$G!(>0>JUon*FW1$WAFvO_yKiL1ndstPklx)QG?mX)zW4j3;}jtyUAf@#qv3PO zJ0-pWb6X`!P)h_v=NH|?dHwZv^CUh}Xju#xG5urappp^vfl+8|O_bAvQE>)bOC;`M ze@P17;6bWdsM880LU#fA<=r-1y{gKnH(}T$K8e=}2jJ^%;H|!?b3Q`JKLE_PCIF)B zfxK9)BrNxcjfV8d;J@RsgaK)QpQewz3RjG3Y+OqSyles zheH}^L;a>CI)bCbq*`cl@Ag!fhAC!8TB9^5<+%gz!$%#5LPS(qi3x^u`xx54mC`Fm zA}$ccKw~v%)BTrnK7rq<-&d-8zjl<a!vBF7PLK>=)FU%-lJ z8Xm?=NlEelK(nVE!!>vJ z&K+_F25GKp#B8>1zD}*!Ks<$)>+AZ=12>{iQvtp`b2C6eXjb$4a$%0Cvv}VtuY8`}}?3a4^rw5DNoegXq;D zwY`3x^?sm)#5o1LfO>LvvcEn6I{phXC(Ff+Wx;Io#?V+Y8428>p(8NV%aHj=zO=OA z@2G<@l#T9nG&pUdt6EesIJz9v{#(r7_+alWX z-3w!$+l^eyh|K4Gl>Xg#+S&Yj31Othus{3`8wgExdJ^vm+;jbyaL&BK_$(}g-fKru?W*jj8;N8G*=Ex?h zGVo7WPh*RU*Z=|Iwu_5Pf}z#ny84?pZ}dPz)eJf|*#&dgiT3{Vp(nH5Y!kS41= z(q(BYg;nNdV$lyB=%&fB12ma8jS;k^ecX%Wnd&WT{6ZatA0CSN&oa)UeMp0jCx>>m ze(0F7_#AB$)!cBZC~NW)e+DCSADtJ*9%elO9@XXQS|ATiY(`xc3v^BACx24Hv!Syu$bKA@#gLZRBj(QOk^pB(hG=UC9 zt#co+hhVl#7-RzOIXpYO+}xku?92i%;n_PIF4I9YU^CJJ@4)v9d`$v_gS`M-fpFJ2d38k`&~k_pDim zn#XVR3Q}pYq1xI^r%Ga*i#zmjN@T5*yhUr8^LDgtG?%2Vt-bVdEy}Z2VtkuMpH<-; zWrRHd>4XcuL)q4$YFSniw{DECMVC$HCk3u@LE4WFss9w>1$RN>k^A<&lj0~%GpfJ{Kc&2Ur=u?!HXo`TP_J!V-;$2ATb=gsV( z1iyho0QrP!v0jQeVP)7FR&5>(-#n*sAJFf*dr5Nm#miVV)zb}I8|L$p#Upc3%u@km zB_NjaX;!ZEVgu}G_cJy2kHzrY9>wRffo{2^r%)|xj3>P##17vh2+?pzPVXqIatt&@ zU$i?4A3fiYk&>i^}eUYnPaJj(X_}J$X#4(T7+!_HnkdVO)0bBt`eyd?jPcUT1)IZ1)Bb`fvjmR zf7v+)Ysnz3w|fi1R)9~10!GUnc$CvKLU`i5|I89HOU!3*#{nDz1drdafE(o(R(<^h zDddo!`4H2n#Yg?Ov&j=t1_ex(y$KqCjX-Pw2St=}HE;pbMY&;odRh<6wp1z~2fY~O zYN5;hl|Csm`i3#k-p^7^jb+v)1IeW*&{{_!4^2&x1Fc^jfUA{1f+3(OAc7tPsY}0; z^c-NSGyxv|^u|Vr>j{ISQW1NtdnLVU-X}m$IR*%Of$ND4kS1b+VXVQD4-dha$|5X? zc(%E@X=P`JsBL9fD!M;mwP4%Z+)VW{a5xyz%oDx@{-oS8KTz--r+7WLj^z7a62&8z z9c}7!+iXRd&TgWX5HG)zjNVV zekj^yH^Q(=jvvzGq~`ggqn-rYO zPNI0gS4=ybcfg7VtH93)`Oe`F#*)%*7TU5j9jN0^=Bhx&AH19Lul9$rNSi}flT=LE zL1c!k6o(8w*Rp8dP?>cq5cpue{uO1sFWsi$@=)jf-MphI9<=gF`@*Uj64waoNRD1M zd=#+ZGiue_ri9+)V^?;5vTa;Q;4^4_UwvIs)M?8U96>NC=jE=ktYbys3`8U z<&;4=8}9Uu&zBof?GS;Xcr5*iN8Lr3%mj%?9yp$wm+qs5&(*c{(wAh$y3z<&7xL4R zYDul|Wx4D)2!#imM_B!BcF@sK8X8r@loNzWIf;v-c>o@B`v`%YfCPWXPOjBVisS;y zeZvu9^Rr%ytD2U_2j!u9t#7AmyaWZhy;c}L!dkwtan9XU1D3DrBPsEBq3r9VB6`_l z9nZZBM*0on18u)q?W4l-qv@arAj_Q_ElRj^(eaAeH*NF%)m7W%m_Lj0l)wMa7nco` z-`+sh!!%4EqkVYc*cODAnPj!FB5rqxMylmuJ7$M}X$)=~f;nphMR7I8@8}Ni?bA z*U?ArTNA<$|4M0j`E zL6qltuF$6PaVd&)V7w&_ns1sHg z*H%&kYNv(sYSSSms=5%`c}heGmIp8|rv>#1UtiCkyPD2yb8o0GdF@?mQ=_en3107Zu|_J|Dk_>&;A-{?qQ|*?^fy3W%aQuf0u(P@3lx1KSeUln$Y+L?TcGhCrjDB@R5MT{-ScW@z%Sz)tj9e*SbS=m~l|Lp% zU0{6Z6Y{IHx{F{V&{OnE9>An%e2!x0Jwdr|fh6GJV4k+=GaWEMWM?7P56l}^y{O>& z1fQ``JreNrdL=yww*n`VEcYQv{C`haX5ZBTzO>cJt`Sg6J`G`Y+I=zqBc83RCHSE= zXfeKj;^Oe)M;ruj)up3!VWJ%3A8BrCQs~jA^d4F! zYj*sY6e>zT%ssFurTTVHIsKg|3~AKzS*!45b+bUXC8g-@J=!Qr^7V+35p0`a69{Dn z3>~S~#rs8p@P~Vu;ZL+x0wQy%DCL@tbolF1DbyPsB@&<|t5=dAWMb^fq!O0uFW=yu zYXM~*Ujx>iky=b0{^x7S0>WhGy5i|E;`tw^FFX9(HsWyN`(C`0Q_bBNc*Qln9(P}C zuH*3w@&E$0M3=tN^gtDKI7}*1ryFe%R_*QhCd*ynlLD&AR^qlW$yP{D4(vC<{H@XfeJz_gx)W~{{0I7 z^%@HkaUL}fe7c8aF+h_KN9B!&3qCtHJJ+~cudyF4_uh8}^WbO|zoI=)mkR+2uYPZe zXgeeptad#&1YVj4s;a7QoSij1ZlFHmf3tl9sC<9*(GoG7lvh?nO_6d$YH@itDlXt!_wOt(?GfZ8;KI6QZt@OmI2I3AUqXO`7zmfdQz zu-x(MshL8;r_vXWR>9?t{7A5#uUVqu2m`Aeqw>JE{0^l#e-40i6WCjjz<*6k6u4>gb3M4iQ(gh$GYfij=j###v^&J#@ zGmD)GZ_bZd0mt{>WrL+yv z{&}M8>oYT6Kz1^G&adZn{dB{X;Eie})tJ4%sQf=~fCO^DvN<3@0NN!2FysgX4i2Ay zqR_bK)6<;tK+}Z^`!ygMKw7X;1YJd$L~hnWr+7H-WP<=I8yMi$+SKE!ruzi2z3R#^ zol49&1a+@+gNM@%L4hwD08ZKlbyFOp6F_}4fLv5aMm!|bb|XS{vE3UD4$(IO$auV| z+#-k99hqdGFX+-mEW7@?6*KorFH{KO!FDfJK}nf(TjO`|KVh4x1$w=Mx=-Z=;AxLv zeRreSQs)049f3K;$yJaZE}026_dl5!PyZE|uF=6dodf<_(#LVrO zUo%#D%B~X7wjc6=0oqd&P`^h_3w#5>v;dH^82o$zSeNkX&_E{~*Ln1=CcTU=9e`4) z1R=zJ3i%a+elPslWRS@0bSK`A_>7sh892OvK$D)BHm{rqO_2p}iJ}5_*kph&%K4fmnF{oJ zg1&iZarW>yQhn*L(&`E;mz*}F*q%RN({i8*hop5U@WbJMehDHX z0(|c_WFaV_9iRl!*=b2hNvBhw&eyZF%oi!0{YpPOo~4`(q+K`vaTXje;Tgzq1$Fi7*3M3J(8(wb9L-kE4+T6Cf_~@`iOm*7VK@uf@j9}Is6Dot z7EGA#fL_P5X~X+q<+PU0{$)7*$8|Qn_2eE7cqS)=2p0=I%jYZh9g=J@&{&s32>e+0 zJLs&7Tr|UND)nAnHH#}d?LRoLzuRd8OHq0M6Z{SH9Ww;zI2TL<1zf(K@b0TZ~%%>Jk+fjeQRna z^A$c99kej|zpMdW)Ihc=4)1ZZgHZmBKga2>LscRN(QHFUlYIuKy!}v z*(Xn%qmr_+vc+)Lc2S7MyMjWTf2{8eE~(+4x4Z$HxE_RSG?lNQ_#7^`ef1^VC>uHs z$FV@e&C0>SFDwkxk>t-?1rgxwo|~U+CH}7uS3vCrqIi}a0H;e|j&gS#oF15h9uNee zq|HF)`Rb7zn6nne%>~jVYZYNT?6sO<2OFEW`T3T172g>D{w?SOto_62tkHUYb7;;- zsz^_BNC5}L0^BpJi&Ohs5+JlUg4hYY4d2ytyoD2(qk|Ym9Df#;3w&XHq-6V#)&7qk z1>a&9958ENTwFveZUM2W)#?6g5?(vxr%#_!{~#eD=>cA1pu0#2Jly1;eQog`<26ip z+iU<|T@)xd@YbH|W)cfuV?^@3#vEpMiH4#p?ysBxB?H`~k`HD8W$Q^Fk0ozy;*rJ< zxQ{?39U~$r=;<6MOJ*Y7_U2#}x9MPz8|0impkFPb0W1?y%56vzuCsr$1Q3u*tNC+6 z|9d&`dyS_&(Ca|x1tQp600RxS!2lq?IKY|S0~s0Z7`4bP&6kN-kGWr-;~dZQplLZB zh7rL&*oy8m4&egwgC{bAJ?q?FVcSC7lgr@XmVlw;sRmHRD~BNv?n3Q@lzNvtzYbDZ zt$gQrC-7KWn&T{Zr)@L*wg6fDt*KVke(Ryc1=i`J{2W%T#Q8U8{P#db12M0Dr1FEy6>_Nl#Z$f4Xl3R{L8^D!f12l~Wotm=m;#@WlR+tN+K=j)4@UF_(tW1BL)M5d7gT zi0saP_Xa6k74xe%q$}G%_ku7vCldU=g%edk*y#>;^@_Y3q-zU30r z43(KtYSd?duSJ|W>`G|lo|~N%&eNtvup>QfS!=jl_9}{V*>5pefkl|Vcyt^G8WEEL zAQrr#WV~8mQN!YJM@t23(ObE#;X&2CpWmQ@;vKnzIY+fMa*V1qlZh|&{lK@cjfe6L zpIur@S^%`Vv*`vS;ARc7!cW%pJxTXaJm}yLzu0DN-P7{`(0b#I`b!6$w(!wXlGBhO zTFU&uFsE9p%@_zU-u{*GH=sBIE)D@u%}LIHGhGY}j=G?HGm^;3&-i zXfEs1s8hDV%MEg?Uqm?8XY@9TdueUSlwlk9%)ep>S>d=lg*T^;b?f z;#^GPiTt7C+^wX{w`BNgZe%lAC>h!_Ok#V}yOT1{wK!mDmQx$>VIBhKa&YzD;1Fr( z>%##U3<_^O5GWfgv~OP}_lSbIJ%vCuOS_hW%q1ma_GfXCGs4q{(X=Iylm4%pG~%uo z@J^Tpjic?I9j~IIqE6XtFiSM62Hd3?kpJKf&@@bdN(xS6!7B;t5W9KvCawCnFGRCo z{sOVJ%;@(sH&z-zYomJzY00!RF~RDta9RYSZ)GL>D`V zb}44h-IJnDjTe#$gcH?BGoX97`XTsWmJQ{G`qUb z7zuN(7$9Ks%pwv_12hFVo^>sfS{=*`w{yIwKy7(y{=t(m&a}nLh_LaQxMl8?*C8=(BY&lM-O?nA zebrNdon`v1tX146k~xehc-IXtyl0u=9V!49WsXZXH!DFESJ8%tEUKHSs@=$g!W}7l zb)-!}Z_!%T5Afy_jDHyJlq<)1oN_g1>iM}2_qY}||41>REdi`VKf`uA1&)EP*29WH zs))6*dj62pchVW*YdlHSx7IkFLU^l|rdrH+5(QG$R z0kO~+(Aqjq9sOnB2Th1zmF=Hvz$-S*OGk>g0)YTxQee`Qbd9NIC&J4yRAD)6{7g9P zJSRWEFZyj>>RwDMD^0r*bDEZ7#|uUa2M1I@V_96QEVjk;CNmWXR?Ott$tp6$V6act z=S=Q@d^V3wQt3ei7dvo5)foRxb?}`3Y8iVmQ6s2BK0o_JdWxtkZ#8-^G?S!ft6L$< zBa@+-1t2FcZL5(FTleR#{39{D=AQQ49}q*#5M{#^Dtm6O&{>`$SqrEchHbhyGNH_t z+(@Bxq$~@|kJn-4@;K2iK_g)%`t0+|&Fvuv=Es%T#Q7V~1eR=VS^!Joe&W)1q$QT! zY5CC;&_A!2lt*-0YbIr%>euG;DDLC@BKQ98)W+G(GmEo~ImRINIv{9B9WR7ppF3f`-|*+nmqnWFVH78c?`jdZF{byy%Yn} zbeTGr?$jPU1k`Cp@p!Z5fs%qz$~r?iKOmSj&o(&x`=_jG!KXD7JTZ8$iX-3#vAP7X@;$prT3vJbLQy^YwLr zH73pz0Je=&KK8=T!kgE*XcG?z^K&GuNdLe={-bf|1*2jC1^q_A2{$dN#lR_uk@mjgZ-?FaR+Nq#aCg5{0`$65N^8ia4!TX9R?8((-?l(KS z-n+Nx_nQC!t1wiQ8*}nzi4;;X`VoY8?@OXE(C84=kOabus2hXB<+~jlx2)i*jWd-l zWu5Z(L`THdR(AmyPMkZEF2CS&YlACpzkK`n?&&!X_N65P^oQzsq5A+-v(Hko586*AD+Rdw@=PnRzNsu4G|Y?)TiI9g8g-!`0@T*H zpeA+j08rkgrfF@DYRctFV|mc*H*gjzHmco_y$?hBMx)zT-{PV-iWXRt0>Mh3NH*%) zg^v_ZhyI#CCF|+dwrUU{*SEyP)QYM3tt08D+4gY^71;Mu6DXAHeo+j}L{@D{vN0nY z)E&XocZZ~e=5r#Vqq^&tcBZPz_g5r2Tk-!S65VDRI!KSdJNtd@RYp;%Y;o)>t&)0U z4cN!Q{F4?7*uvJUaB-0$oGcH^>EfKJx^a7|*(FPduxDM95a0FeY`;3Y{*irw5E#0`(2Z0t=ZF-VnhR@@`_7ZEq1LU?6+8G{O^ z!2yoe7u!!PND8Z|Q>y8Py$~55=mB!<+E9Kg@U)Q40>k5pNk~&22FN)~?p z_vdMARtiV}bO~T?uPJfYIL1m$@tqIXQMkCc7<9~@aM`XYx5->5w&DA55O04dj_tvJ ztkZw{QDPP`B1obbn90CgM|zayV}Sfav!LjV;$`sG0tuG|NerWAOonLJPUpKh^<>YQ zxRO`#p~qj%@Ts`7f%b!Q_?PyhW#?K?0mZ=aO6Iv~uu-MVT6Bnc=e35#DkM`vjwo1q z5s zXJeJ@tw+%{ti~@H8u~YgA+*6P%Cj92<}9GJ`LQ@8K-437HTx-_C=aru5IIzWgdE_p!MJrsn;i1OrA)_nx*s8-Dq=B)?@1z6M1^hIMN3RT)%(%j^om`2fvGW%fFxQdA6|SRIPHdeT>do zZ#6XoY<}1W(LhF!#{FP>yY3y~BDr6n^{%m9l>h2{4P9Q!FNlEc{7QU|%}jKjB-wUR z?RvH8nZh|0CAj)e{-dt?$B$UD2$Bo9UjT9l2-v7HFFk-$6{2ElO>KWW-+X)tn90z011ig7u_pL<3xAC0^dbyAY&U{N#n0Mu^v(5Tu( z^-$4QswU>mC@>&78m5x=8~R+21*mAMuZt(<9OrF*U&*AizUYGP<$8HJPAGBSm9wmEktF`Y& zo~)o|i6S%11qT-Zc>%K15ng2Ic}}2yT9JkJ)zlKel9mfU-MQ63M15mW{<>dJ2Cq7O?Azug5D?|dRs6D zDOx2@G;+kMLLxRTs$1$t)C6A3h`?IjT8vl%;zz)^+zdug>lI({O_x?<1l#Lk#JfCT ztT2Gfp7P91Be$3KP*T4Hv$@>)ZMWGt1}ytSz$cf$Gc>Bh!La5f75&SVss7Jz33v5{ z_m<0PiGnMPww3?^w3Z_-x259pJjSMF0L9H76VmpE=8`X)M9-d5uA&wuD;856Pw7(b ze}rWok`|&`)Jz-2nmnijxYS4s72!|R*yN26{Sfu%1~%>py^<#V7@MdW;*{Q~9hHhb zQv>pF=Z_ogGnM=Ad4defz1=jx{di~2b*ML)iP<%(Xc8iLah`0tm+|deVCmquDS)Ym zK2i@(u;=t3$NS2E^h4AvFK}2VY+WAVJp`y=U@%zS*ce~E9*`Sg)5w2(SG0p8<&#~h zst$Cas{?1#YGki)j12x(%s3#^wo&{C^z^$D`uklFN`r-k1!sa}sgKvV@%45A0xl@c z*BjRc?QtZHcd`p`TYh&5rHWnE-^@~59gLulzahJdaV-_0RmhY}G(lm7_}Gt3>e_x|U#iMVrUBszGaxrqt*Nqk>$28nF@@n_o0^QFqfr);!KUY4C-^tjIXpKMqz`GV84 zQEi)07PjYKjb8J+4d(#XtKJ zfK9G}-Eav66BHONp>%!fKbs$#hmQ04F4Z$DF+sG0rWLf3!+_{G)X26iXm_OXs#ZbZ z-3gO*a^AS+f$+HI+MCi|4D;6~*%gaFM8UF7PCw54!v&aiKkw^3d6HSCR-$)F5)>a0cNaUeogeg9O94FiE*=9xXIARablq_4@`uLM9$BB-Y$e)=+U)k`jtrVQR@BFWvlz$5KHH*Q40J$x8pHA|b$08vkZxc|aht^$7O2XzXiUDU zEY0Fm6=|Ji99Z$wNwQ+z_Bt*9EpvB?{OPiBu49|Y6g_a8p2*K*P=X-XD$H?Gf0m4I ztqICdTcA@?*1y8V3mso+y~3ntws>|Tisn{u(SocUd0!)Tw`0vzt{y6e2eI@ICRt0TpSnq z2dk<>fI|Q`0%iZBU-FM1>jqKKh}e;JbaV*VUZPbvZdt4?eCWr&cMtKz<`Upn)Jun3 zQ(?eujixOTOcR2OP#YT?Zvi_PiEturT9hU~&wxge2h^0ad5YQ&EwtJo*E54vkn&k< zHhM6Vo8;@@_p0x{_|73NXN`(G)Dr-u22ACf7DWrDj7G5$xt(8YLFoe5&vhyP##=L! zy8(|YkME9&TbJF0`78!K&8uXsw;;b#<}(~ z0E2pvz~rYGI5P4-PW4f{%18@o2{%7c!aqgOfm{U{E8@={({&|-X~^GNz71l>KYt}S z*FdfnaiAHbS%3_q@>5z5!D!Zu6D7ofoD0L=>9?XkMWxyLuCfbm;_$`TT{F3bYGWPsShK)0x|I~gaNorC8sYIO$ zGF+((nEs8x)R)C-+W$&EH2)MdSKh+6x(N(y_5@^Y)O)m^FFPA1@&W7Yh7-#pe#2|F z^!WIA0Z6AZf@@B z@p9XOvNHK_Wzd{MnnyhXJ(E=MF;Jf0LZb2-#)`ACBOBD2OnDwKZKYdjCEm8|6QEW* z$)RNXCM3sFE5Hi#n6$afMNB;X1!(Esm7Zwy-J9gPYS_Lt^L9MI$JvJI(M*yx`q`M% z>&@a*-xbu|ebXMxROSZ859L^coa%Zz;|*ATfNC~pBoV&4r=01qtXY$Adh%&XVgHJp z2C_xERjiGYPw0oZpN}FvvQIySJ|SJgHujhrI^IHDQ3qs_X^q(q*~H!c9WiQ(`N8?}= zV>*5r2wO%rvA;;^6Zc>wzITogX@VAPv)O_^S%c}D{t6C zgS(d(E+miNVC^k+w}KtqS-Lh+$>oP0T=S_};|8$F3H0>5aeRDSH!?D!@jBigaW~XLxKX(M^jPk=7fAq*M&?`ssLuB zZ;=$3m09Cr_5uxxGa1-n(Q1BC{JD8Ub_;6{|B2n9CLnK<8C?#x%Kl9} z)p$ge+j-tsOIQEV2O0CI?C5z{jR469ug;;yq)^kP+@Sapj{ud)u(ZM&oV+w!vei+f zjUZn>QX+A1vYg7nVg?H`n;$LoFj)VAcGud)Y!#r=3et*%#YLsaT+mo!+vXKMLrljF zf6nF)aqJ0>CWJAGx~UtEZl7}*1FT%uN#5dIB`F_7f7c+XS@sjF^duDG;6RuQ>mK zFzB#6D2hr%{?q#Wn-%_jQP(PW9o`=S`J4}!wnv2=>W5BDRQ2@$_*)ADZWa944ahcE zQzIaI{KgZ=P_mh)Eho`Dz`U|T%PIbQ3g62B-b*L{*@xi!XLIFG{)_Gm2TC!bYPb$M*tV-yp#_lld#!L!kU4( zgnCsMxuUR}Lh1TS?Whwa+`%3i?%?o!{YPmp{(*Z0C(dDU;OLC?YN2ky=}+ot`9TYZ zq#Xq&FIaNWu*R&tG0VfNNwqaL*zC}<+Ii#UyGQXl-bPD9vrCH70}yFoe18SFCH?AS zVtmB|hCDWWi^Ul}&TFG{`aOmNDk5D_2hPyz>a!nM;rJF-NFFb}_W^6;ASrsmc0unf zB)ps4)W_9fqtce3qbt8YyDYyXs+Dg`uzen7&+mrmk6<2t9fZIObevM^hJlIfHn+i< zMt_0HkmmU7j)^Ba=&=vo!SkZ}zv=}~4&;B3Opra$U0lT{tDJH<=K+5V&cDiD2)2OM zOmb8-4Nu~s9N(T4H_T_!c-?Dn((kAmvaGyzn({LP?2QyY2zt<)o`@d&vLKiB@d_M{ zrv^$FA-985v(W@Q%KXeK4}(x#Q9E!RoybVegh#(jSG>`-dRN>BOKW{;F#dHG1zL1?(j?W z;$JByeQY<5o%^^DGUISW=A($&m2E9rcj|a18}ym!h3t!4FITL%*x5G`|d3vgGU@xgE%-uk^%!I=Qn4?FCl># zk6DMr|9-Qqi6+j>>ZUT_O!xt1PfO>Q_~<`pWgQfrO!)-kd)It6lKH z!Kmy@-DIkhEZ`9eVzAoneqi?40}M8@=Yu4%FH{##;jxy-W~ejnT|NH;py=c^+XxPp z&ogNZ^adkDyx+6hpVCUlIk?_b(H_qjO_N^vq(yc>`ZVOWD}0+ar$S3Ll`M(cUtxMY zv48P->pA_#3yemsGw7=dGX;C)B~8l}U(@b}q%yde<^y#t!G1N6Y~*qs{nWX#kn`bN zot7VVFbw0IY-^uNQSKE3P8jR=y1L)$W_WarH|Y=18SzG*=1?~Ll1d)E8?9?9 z&R4j~iz*_)8$A0CWhyk^=U=bR5*{~d7PW^X0|~hzW_>MAR~CFVjVt~&quftA>_lR* zb5p4J2kiL|>hLG3Yx`^tz}ftbtO6h;Dm?dINhYZCQO`cNW*aO|Xm^iSyKw36(4`k@ z)i$KvlfUu=-CP=~rB8R{wySGtX$kOwrXw4h z$A5~fn@`{X0N?+$p#`y_pIkb#T_wTtIC_&b0tD&3pPqWFg{W6Ln1g-=QnK5jdd_ZB zX;GIlcOkERv%}?jaxrp1UGa)v9(80p5c+k6@@VW+{+=(Lq$@(u;srO#zZ*JLDK-DdM38qs`%G_!s+lwjOPfO#3WnFU++7| zUQ7!WkGD8#HvSPk!io6&$;%5t!sygo1Vz_1Z&(wXeQw%^lQ} zE@xQ4Yal)3x2@Cp^j#um(I&lcq^Ds$J=sNz1rO<7$lv=Bjz2>B4T`4!=5&?t5*-$7 zK{fVJ!AOKsXmm9@fp{Lf-kGd5aT^mw$))r~Ov-UJD5LMSY<{(QKCZWy-tBFMkt|4( zNPhcCbL~xcwMjirsTXu6Jy#oN&tUmmo}dLoND{loZ0Y<8p6mqhfnQXZ-R@+Jq0~Jc zr|t`1tDQVOyyIlY@aU^lSgfUPX7`=)O&X;uorBmlqs2hc)um}iOStq%V-k3Am>MjC zHV2UwQj0h0l9)~sdY)-{jHs5!)q?OX{cIk;L9Ce|mxF{Jt~+VpmjY@&)y&iQt$So} zws|hV`!{(rr<7<(eG7sP{tRz$Pl=c(5i?4#-k*2d^TS4i+Dd1G?wDLjj-h4#ybnl| zZKwR0?w|RS_k|Cax(3OchFFI$*M$&Jqy(2HdG+dUfPp?NPfY_ir%N9+43yuktf&C2 z2(}7EeV4y0`u{KUjHm-s^x%Ssh4F_Nc{_waFs`hNkj!H%7D9UO#X}4>)i7Z9;>>8C zuT%XjN3HOffd|ZIRhLTi#s8J&%!>bbiG%YBA{kS6NbJJ8*u#G7vz|!glSwC}=hS7Y z>B3X~DFcfisfAcBYIaZUA)g6RUx*@WKJ6}fK9qhzJ~jQbZ|(gzoojut@qL40z!r{2 z)(OE0WN`qxBI=XcDFMjrII%(iOtvW=8%h?VT0`t#v&@|G*ytNx>sx2VaB5MLT*+Tn zbhKLYB4qp9eo$6CuK=(P=)6#=ZLG&g_`$oa5yk!?Uk^LPsO7p=U9krE4xmq(f}7j(F{pR2d)GB6e5y z*?l0oV&{v2bsi4$Jow2gqE%U=4%;3azR8yoYI8x1IvX;k19%|18zdwvyvVgi=r2D6 z1msdSFz%U7(4G);g=@4MvDs~3t}MIel%LPs^b3d}(!IZ1KIm%56>Ng_IWHj77M|if zA%5@4h|TT1PYXCOw}3M2U@^4m9(eZlj*hK>nZn|BYF7-bHd*ywbr3bOm zk^h|}{tH|>jzdw(e10343rP1c8#OjGSOH1pW9NNj;O^{=YzLnF0RE;85d4s|eSE?P zT-p7Ar~h!j%}Qb#@XhuGRJJiCa|S;|17X2LQkvf?A#@Vz1KXfBQ_YTrI~F-VEf;ru z;pf!%?GV%fU+(*qP0XNiq>K11KU#Xq+xFy&FT`u@+ zC6;RJlS^i)gx`xhORM>7pHN)yYc|?<;=xm8JjS8`XVLL&@@snn##PG@vHH)JJ$8nw zvVS#2A^kfM!iX@q!$D|*OJ~JhX-EtUZu+9G+?5~I(okHkrf!|dcpdXLKj@1Srjs9w zz0p2oo+6!)BCKFp+YNZ;J~I-n=4eEJK7}aKCaYz(;2c8RG~w{GxBO0C>8ZJq$C}o+ z5Yq7b?R0thpnZC8K|^ZdKIWy*{UiB!7|zglO;W>b_jIkdA;MGYe9+H(-}X#*_A=A5 zvdit^xR+Do2yIdcO!5ib9qCQ7I&6168JYYI*3y=CuBxeLipb-%I5mE3r6iVD+2}Oo zLk;fDWPzr@yxk9r_mSEAoi-U{awHR14=|M?=wW*o%(naz%?{FN#aQz zA1i1;q_JiWonQH32zxi66InH0af$?d82V1QqR1o0lUG6<5U{_;LH6N|?dVb}+?~l3 z6UT@jLe9&#*mEcxMQT>ywZ6r7^C&7@xeWi{2u4NK@v(~33R7NJK=Dk#O-h`bZ^5Vf z_R!mTOrWA0yL23|e;xY+gYY>|{RnINq711rI_1{ntV5l)?7M_VPEs!L4ZR*sm8kTHdK*Mp1BGF3lRxXb7ALMrm98#P~=)GO$ESW!cK9OA5( zr<_Q;OLScQU|<{=d;RJ1vf(d7LgEcbd8p5c3K0Gt8UJ`8v5yES;h{h+uMZ|A50{w6 z+LS&9nw*qmd>TQ7z<5ta*bKFpyIsBPyX=+^k`#xCO(DgFs`!k(zfrWkVVclJG=1d8 zo(lnkqug62_pJ9Ph8(o3kg4XZG-Q6uGq)jFJt&gbre7Y2Y^7cI4z#TnLP&RGAb0*$ z+ky>n#`VmQC+>gG&jeGbr`Ohk!BF(4h#p`Thf02ed;%vUn5&J<$5$!xgbPm|{(0qO z9_s!8S^GAs1}YvK1;q`x*u~|DNjJ>&CXi=%eGw8Q@jBZiM6sUkZw^(VhodA3i7Bub zr{hn&rkHXXGm(}`h&*yw(cpxINRA@f>-*J1^9d(%SBJ{^Oz5ZuBPcetH6-_%Zb*>_ zb`$ZCPki#9BIKWSM+CPQu!YwL?8UZddVHWz@(1yF1_&7d)td%P(2Z$&)xGlG(;TOV zM+P|c{nDRr7?^*m0FK}KU}T$>y}fUGdOEsN6E-Mhazq7n4r;EtAc&yzt_n((D}J{N z5&+n+VNJEa+0gzlcP2HZW@?;059gX3o{`okdAe|0Ed@9mRl5%xbgIO2NB+LW57dEk z3huRg;rFgS4V*qu3QwueZ!!LDOxzr!6olk$BpmF()KRr1D_w8`=SSb!od}P zW}aU%0i{~XQ7Ahl#011{s&DToR&!$#Nd_hZpmXtnl%}8Jw=XvB6nN2)p7JRqU}_85CauG7Jhc4T1I3#!TVgHs_M|O`aQTf?f2iuVg&I~ z3>E*ED)pZ$=E|hw35WAV8Fx(MKj?jsrVd<;xcfDeEy(?MTi z>&D{5RhW5k@YQR7-_GKAZJI|41)0M02EwmzE%&BFtVf2P%$Y`qhhw22BeN_H05fyO#%l~>C^0I;+*uKCqG(si~%;A;`B^Su^J?lqL1#*mb`uwVt z*4<2$o5FVyZ{L=5b*(ytd=`Qjg7^tJ2*Iq4hTRN=BF_x0)OY%9|PZ>Ap7|G zO3KKf`@8*F*er9H-hMLZ8Yh{Wd>QT7nCEIvI9}E2EdJ3arHP|mNq`vv{9{5fDeX(`?2&>dR!-a-L+OFHv z_1}PrfH{{I2bssY^$0jq-Gb(`kSW4bJu5QDfOMhRrM4sVo;Z>me{nY>M zK~j98_u_Xuh0h2q`2ZRQbX*qWJvG2)tXQk+jRGE%mT}$@d?Xy;U9M)i2R*G@Mo_o3 zVAd#Z!>W{l)N!1$OU^U*7LGtHn+o0h)B|)eC7zZn) ztBQ{k-L^HFM^xi3f8C2jbx*83wtS{)%0E}|uMfwGbv)xw{sq{pEXKoU-U97he+@a% zJrtV`$$^^3(A{-}Q>x?-7XU2Cp8$#@R%(a>k2=t^*TN$G%a?%K+S-ho5#{!XR~Sp9 z!W0YhmNw@gTVT_WJa&~EH2{FmYf|7|z4ot)px-qn(G8l)RRAr~3 z(Q=xQfO$Z4KVPMGZxP=xtB2$IQ*tMGI_IXm&6Jds)G(cynUOoupkifJc9a)Lpwtp(PlHg71-YG)knXW#aT|2H|7QfC2lccvbdii-63D*C0D( z0>XqCAPF%M2nz|pm6z{^{y)~v<*mdeOLv z^zUv)y*9!-$t@rtK!4(d=t|nF{qqXwIG;Nzf(fDX%-24MlpWozvy`5DTy{g#*c0S3 z<&@x-sua<|sbA1wU}KYkCUBZ@ZZ8Vx#f=uyf9SrY9G^0Y;J2M_^G?FP9aj9|#uwrP z0kaCsXx1-T03IQPi*JM&1--5Mz;N0zIKHQGd43{0O@$Z**gBiUV6PKyB5!b;W%{6Z zvljYUKDhd<%ukHU^UEaqqkJ4OF)yC*w-kLJ@XEOH zo{-X3+z@WzA$i_=baZstG|3bw6sm(gV|+TdNBqVOgm3FobG@Qb!e6f)#N0BSv{HN4 z64Soy25GOYmWt0N+Mn2|xu<1J{SNu4Bz7@wl(^;PKJIKL4QQOW2o0 zgQ_4#0qxUGFd`kNjTq~IN3#~}_z<{D{yt=~K@ku)3+^Z5dTg)viKA}pImyh6|%M^)}+cjJ{=$K8$k}t%5jGrxF;QizCR<(2Y zv5&ubxN2GBJxN9OqA{t;ZN#t>T!@QG2Q8!%pU}p#cWax?*}-$D&yKir5BtTR4hR0{ ziz40h8ZfrS9yR}IF7BD84s927*;fN4O7<*Qgr5J2U=l>VJe%K>OW3!(v}8!VmUK7& z61Q?+>m4yoT#FXY?vJV)?CoccXesZ}!gKf7rK!KF)>KD4Tr0rrY`o;9_I{7XsUYmV z#6KcI6W_)RGTLzruO%{?qDYmA*$}ijN`_F348u=Ten`~V|W=T>{FC{cR1VT ztLS{LfYc^zB>Af0FFv4lc?QJRMwN-@dmN_)yXVC(;M17f;*7(R--OuQQ8Y-VjMuU2 z$5kl2uIbpuM4F}2rs&#SDrjhEIC8gTUK#(Yr~6S@Szkr!u|uKDuT4IesxL$?HFYi5 z$`#W$=^i(rCv>=k71j?p=RxGn8kO9&<~uH|DWqwqaDQ`19hjOz7u&0Ahw>6?z?88~ z*WzP5XnSWgk;)GCwBR8AzI_3Z84Q_F;Pq`r=fuIws8KdWUm{u>^bwAM5zHCZ6RO>F zmuTVY2D^0(OLPyZzWEH(-EM|RcYj8!XU64-p_gBw_>v=st>0>Pm#JDMyvs{;z^~&u4uhJM?(8#X*iC%u(BJd6j)2k7u;XE3svd7Ui%Czm>141ow$?d$ zN5nnt0ApL`lseG-Hn*4H1+yOVY3#ZV4Ke> zW+X$|k5vqV_iirn!9lQCoKlxwV!!fB^hG`q(bLmcL61Nk`4mjWM9BY@XqP3NVWQ}4g z5bK-5-D5(<(z`MH%I1bA*D(FdT1lpp@26Z&)Ngv2u)BLtKASnrYnOT>{e8Ulrcj%J zdT+a&F75SeaoA7i?up3DKYLeM8LC-bK!kRnDJ7nIbeQp7ZLJhqr&tod?Pvta+z$`p znyovT@#@fAK&A%ez45u{wAPND#U}}WK6lH4H}D80|tE+&sPqSPP88Vbfb19*S-1cq=xs5a@30aCZj{ zNplSkl#h+bsNbItb)&;K=`GQAnEP^_ojYFJyyYG)LI3{a=j36gS1fQQAEKnh;A52N zHjPIC=~wkOv8Or$=oa2Q6vt7mQSR>0$d*q$$~Qj3q2_MQInqrr(j&_9c9nS=CY(gOH>pYKely0@gt%Y_ zXLAxcO4+YkWPjd}kA))8ua3-m{4w?o)O2%-`k6{UqC@53Le2${w-DhM=H!&W>_e5h zxR7{Y^B1}aZ*~#bmSqawyjm<&kQz|_>l}Z6G5~uz@gZ5tY==$2#Q`#)=P-u-0*oSI z0kLkdj=Sq&?g*e4UFZ1#U|!UASe#-LbX>@a;*#A|Ze@8mL1^8;%PgItf1fPk6fJ1Z zn=Q7`|MD|-_$5@Q9N<_lnbni-;9qqUvFes+A#OI%b+p;MVbdwHLvxeUd#u1X;##Qq z--Ovbin%E5?nN^UPJn`jM#xeF066M`;e+)Gq%UEA>Am0L(ew0jlTF6kPW<-AN#E-{l6G|`NigU8L=G9eXeMZSyUn;+bTw8BAVt~O~o%hlX7NDLT zgyv`L_48PfUvn=cIU?**l%i9t0o_SVkm#yioLm*ni>p*|J4;JT+q-mp<}cDB8A-{; zb^4PhZyCPPCFr$z*0l3c>${p5PblfnT=R;o1@F~3W;sh5$FspCllG7GXQ71uDq zuE~cDL`H}cl)~^)ugSdK(NZ^OY$PY-q<42jZma$aygzqUq>mjJzrTm!$b+Bp?JvQr zXGaQL?FH7!0I5+1!}G$d5cagCzQCw{G{R99{PbtjPEYT+5mr^adv`@kODirzC0%t* zp|DlMs!~HgRkXzDLHCoFTBKzv?pV-lx{!lYzqU|<}srOIFD^k~#PU7)?(wB>6Wu5#F8tZM${JZ*h)Ya#@N(-9kc z`cH9-y1n}eNqHSbiM55rL1AHG$myg{?9XC;mMr>)`MKlzs=aAH!5~@PY-I8uzl(j~ z5j6QS0=4gkaA`a5a@d8z&WYQO0k3H-F)E3Vz}|$90zfC2&8h6C9_9zc^=l~|D~vZeadqL zm2GQQadf8@9Nis8>k%3e!OrX!`r^eYKv!ni&D@v)<=0`YdO@!xjbgV&xJ|I(mV33s z>sNq}Xu9{q+@HVhmPvHZ0bdQ*94KNK7#J=Q^`SL$Wg+Yjb8gRafjPk^-~<>SPRu3C z-23y}TjG)fJ8w*+GV(Kq{v~L8>HEIWB6>sc2$P$!gTtYB+#hw7HGVqmzy9Xtr}P4l zm1KV|+CTUC8tdQU`}_Cc;Yn#}h0*THi!)%lpryCw9IN@~8*h2c69mNN*%1t9cF^zl zNz9vxQ8#bKT)cGYC?71X9O7Ew=F2hzZ2$X)hK6Si`XO6^=+B!IMT3J(K?mk5DX8$r z-WhAeJ@2Wv^$ukcpn>_u${Wf4T)Vt7&eJ1^j3j|8^>8!S5I*W36u7#8?vn+^KHv4V|3bOBJ8!|U<6c$z>#yxEK`c_n^|v@tf6`wlNy*2M z<(Is8(L=wEzrQ`M+X*y?J{0d{Khv^pU%Wi0J2ER9DAGRQ39n3MDgxyelSpNeRKvxL0c_wWaZn*Cc4kFBVG0<3`QS7k4Kgm21^8Bjt zgoMc&N6!<1w@?Gq8nd5e5UM9{aBcrSXGj?|A?U-~D<-S5fs5pmzs4A0CyLBnRL{4J z1#ro`+pKfgkmV>u?BDMEQZm0jrC+%QO%+HA*mS?(1LW>!Ya2mgjS*Bk*}RE|zl;vF z;vG78@VZCf*uiaE>Mo4~E^81lfhjs3~aAq#FO1_ z&xoDuE!DkMAI1b|{vg0p++JJn-B^nZ1Kd?0hb}v;&Dv;J$X4L&UIZ8O+uqFt|4*^_ zu^Z`EXlQ9K>^VS{Qunfr7!7BUAa8i(%0oy2U2RVDm3{i;$@7GSQ{Cuf-`2Z2+id(} zRb;TGB51|gCJq{trUaH6sMP;dzBQ}S@sk>B^hJ(~FY^ry(7 z+R~g+`$Tqb9bZQ3RXK&{dO^lvMNS%I>v@tw{9j}3gmmN}#9@#V{O+M`R6QL%*2Z3S zms+g~5^q^z$Aom$Le}CHT;dIV_!T2@jjqJwCHm+qQ#T~srxMGvu&_9E{5UI+@-^GM z(I?TO0q=tX$^?jYMV(8U*Z*lmx9-!m-R>J})2`n@R@YZn67KIC1`5jnU>U6jTE{vJ zkgh0)@S34^?|0bJ`R-bXur>b@u^*o}iH@Ihl$X^O((}E-&{i2M8f-~ccYVU&z-6T* zMRw^_L+!<464kz_`0Rw4~ZKQcGMuP)bn!A|(f|yUFsJ)$Ed7 zU@ASe^<$qicF$36qvxvO@al2e z=9>y#F8=W{n^(8_d?AlA_DHaA%kK`Axte#64#zqzeY2eWd^%D7<=x}LE?ujaEv$!u z)$9tCB23hFpV@M*-KIdI#$6trBL3}eK)$6vQVvPosNZ>W!P=qbSwS&d$lF%N2|V&o-^ zQQgG*%1SB7Ht@vuh~`ySS9eri${<4Zs|gIE^7w5gp`?~uE>{t}-87$jSO$cG`p^f} zG0oRJO}@N1o#u%GiuPIlY#66vRBC%|>&V?K;8iGWF=M|6l2s?KJtv-ucXeVSz{|Ng z?z#~yJpB3n;gQI8E5@$7a8=(YaiUCCmMk{}V~JK%rsl^^EiKtSUb3^ETA(@>WA}Oh zH{w;o{w#Wr2{@I2cs6ta$a+zXJC-Hb}cF))Zj@uLBg|aLd}&Q z#^GZB$$r+~?+B!5uK-qfPe*-j+(B}3Mo4$mk(6UL6GU&_3!;^`{(gE6m_lPOF6!UZ zcV8J!D}Mj}wJr z;rMN>Zg3zjYx8O|3njCK8K9Gm3kQVeo0Xzyge_vRVhdHTZx zb32Wkp;hh$5LT4f;9R=#!68LH@R zTe?(u^&49f|1lRN-Z||+%kTbE!NK&)_`GW!xJwJu z8`VAjr4r-Sy+faz8u<*l*@NQ&u9wFH(imJL_*spWc&l^`4%CjZ7{pqt^Cr&7tJudH zu`ub5d~zC9HqM`|avhEzWcoHcLt?r=dsq7C;CzhLe9pJGGrBRZqUlYG>d|%t>aTfq z&^<-yd5skXTl@9bd`rt?{eO{KhVz()s6N&a&4aqZ!0<2w1WDhZ82{OO3|b4gP+jK$ zEs>RQAHZS~KvEA%xzS%H&M4v@-t0Q>`$l66q2B}qlD7+o5F%%UlLoE3v1ogC-2XZm z>(wadRGTstVKw1SfI;M6oPWf-?-);(S~c`kZ=S~_lE;1eUF-LoXiopPlvrM&&pInz ziCt4sC`z{Y08+GT9ix38b2Z|uI7h^@gJ3z<4Awu)=aIA6*F1?EjQt?ENdB<>pjh4` zSCl_*&e}02VVL9ksg;7Q%sTrK|jQ~66WUVpRL+j!Tpkg>{}`s z4or3~R8CU<%FFV3z%UBM6&Dvbe#7><=3HgicH@+wZPp^27k3({zQ?-qGZ#{sJd%Z+ z8EI+XI3XErgoI6COS(CuK)QFa^RfM&)$%-;PVZ63JWLlnk6FPjFu_<-Wy}qqV$xw9 zrJ0heT$C*JpvkwNxK%P6z&6bMnsP+S6KAqw93r6>*EBhC!@|EpD?sDNO2kg`qdqf- zCE?_42res?7{AV0!-E@=)ZRMLJbY##_j**$tf`I|#w6@?jAk=!SM<1&X9(|PY39Ah zbe`s;AA;#1Ez!X>&fsy3goJ)(W@an=Us`v(j@gCk%An$Sycv_5KhAE#{fCuHRbi$$c?I(Vh?_lV;h7wv{oXp6}}2^L5)-9 z<9?5rqdwwz?(2^LXHx?`-(aIn`ixEcn-EmU?Q$wbiz=?fkDFSi%m*BBtnHsGJtL@^ z^sZR(rSy4;!mn~UBDa$nb)Ku&F%Ea;(s`Jhb(b(ya)kQ{GAio#`>>qB?ABU)$sApd ze(y!v&^nQ!J1{i3CK2r7f@#f`e(KpypoY0)OeogtDrcDZT?99^#7eE{jCL*PsEh*V z1>%LyYAb!y@e1k6K#ti20nK``dad(}!qjrU$-HU$6O6g$Nv{@zKPhozuFz^JaVoW> z2K{j2V`5_s3majm-_VMgTlN8Y+AbihuZGQeKG>{R@#K?DHJfgUQ(UE7*twS&<`kTF z=e^~GmM#J#us+6;NFb7sG`ZXPxnTvRE1|&Oc-zxfQC99iXIlj&&jgkBQdigDvt#ws zWHPi}09$zE;NA0DPrN<-`aPS;V`fFPFc_f4-uC%}tyD=X5)7Ftj9_ben{pne9FvnRw4moRov7*aT@#7q!qZ*)EPn2jrOAIt~bY^B|HKl+M>ps`J0 z-zZ+FPrlun-;))i0w=5Qozhm_GUr;r^4s;B-ihkKRm0p*zE35Z$Ly3Wnuk@u2@|XF zg0@p(MGGCq!FTiYYo7ZevC+Lf`@V(2cwBeC7cX95j_6}XM}s!ro7!C*u7RNv4qy8& z9Mkbk+5ZwDpj2iNsHZ z@k%Ij-5JW!-0Rokx^?+gXML-^?`8B`x1YP>!tY$RI;)nH6PO$Fr0y`HsI6;SnwMR{ zsusH7sI!E1l-lqf)hX4@nJ~~!KA@{jj|6W^dBdgeYA2_scUxO^vZ-MpHCO4*2~cHpVStY-3|%jnHPq_AnXM%m(2Xa2tHnc85S* z3Wlp?Qu5w-hflD7fBmA9CRN|yARQq0Y&!Kn2Ns{^{oS&;y~b6Alm#%Hd?ao@<^+j| zaDo_Ux3-)gS264@DRu(o^bJb&)ljvk=iyQ1M*){rv<~kXY?kXBWLms4TkrgybH7S2 z|AKgm+F--4o_Zgrt{TRC#d^^P;(#0NM%4v+s11X{+OH1IXDHju?KKr%N;iG}$aJ#u zFQZmUD=Q+qTVY+WdvVJMm?BMBSUE)5=xQH#jw=4*1Qqq8iQ)q{3#YQQrzN}6B44oBR7hiV1)gN5vFLbKE&8O3$oS#{HrGntqdg0Z;sinos_bC=2NGoo@;VAOn z4>M|#ST(Y)glZpn(^ge=<GvwgA$@XE8DKpWdR2cnqIOY?IBsEXYtRynJ7-$F4G4#l6^t8 zIOc53j@6T7{#GVywf8z1AVgU@ovn%P;BGtHESr$Dr%B$u2P3$?=4sbzF|*7tqtX6m zbuc4VYdCp|dPD*ZnT@1kgF4xf4zu18Hb(YHzg-3gs;6EF8;nbMUR8OKP4kw5wo8#= zDgbVN$l&nE%iy>1_5=1?igOg0b^m5*D_we||an`Q|e)M8Fa7A=fZXoi^R zOD2sms#fo$Tm&mnr+#98`G6R;Ssv$ix{?Ki{~2JIq@_lurr3ePI%n8&gg=*Q==hN% zrvP|Xzi79ATh2hFOksEPG&gw8)J$sVFi43R{oVQGErffYU%w)2RWZYHnQu{h-`1vv zjGw`Lg!~6oBvp>b>W^dd&qNRoY2a7=p3Zku*bTQ`dtIvN!@7%maDsT9EZ4#k?Rmd7 z%K^2qrE2_HJ#H#AvvKFf%FKe%s?CVoop#Qu)ir;8mTK|v8isYF4v;#x-Om*n zTjz;YN)1;(@=83GSfKQ48SGu3y|=exwP5LU^!|Q6(a0tCJnsW9&Kmk5McTs0pi9@+ z*Zso6Qk+R*k}4G{k=)=hm!l8he|-A%32LDvm_2%sPJQ)FaoaWj3nzHDqM~99WWqWc zEFA~On&gDpfRFyu-CSztIo|}{B%ASO31G7^T0a}X+v}v%9ACwWz2ZlZEB?Cv&ZFPT zL04IkE8exi=KEf_o4|0tUCx8O$!(048vDZpPlXGOo4KS!OW`gx+j=gobc-)u5h*`1 zyU0y#uc2DZ#oJaw!EVN)W~OsSxa8I7WhSeJwS21Shv1NXNUSx8nJ++D&bG>==02Em zl+U^2Rcl+EdayiIJ}%5;DucPwc$mDr?zF#q@b3d?y!|{K7INS8Ce|W?g|2$0sKMcya8c@h?`o z+h_IiHzFGCh0HLN<<7%zdo0hbnOS+;V>~9R5GS>%XXA=DCBG`*{Ne*%KDc%ZX18Q( z?57gAZ5AS`y8&;Zm~GTzGMXH2(=4vtVFel}UZ`zbwIe;C3^1d^AUJi_^cU0z5EYQrTWNUfr# z<7{irhKP|sm|#(m#UaNB=6DPxw7u(V!d4Qx-z#hA7KNYdMLs=V&Xy$P^=&D*DH}n0 zVAiAe&}vPj+-Ld{Vx6;z_D`?BM0lQvwoA0!f@0grqHqySWc6YJh)VpSF=ymzGLy%| z=qMwScBP_XA2;TPUaC_-ch_X|cHiE;B=akW_H6yexl#ylMs@Z0u)1s0VM#fG+_f1g z##n2UTo*$gb5tugXfJv;L}hZy#;=f|Kcdv;0(kvF z;7!-Z!q3ufK?+y6w9Qi_uqSHKK?yDWQcv6gTsUMX5xrJM?Y1gaX zjRx+VCUzfTFo-_*A|0fleDRjI;N1qHlYo~C1O~NMK60iZ)~)ay5Uv>jRu`==2k7hX zVCyg9QDCK@SC))PyC7?Ic_J;J>=t3a%~<2573C;MUOt8xap^lQ5m^<4_QUz35luEV z*aWuQEP^ju=Z+KG3LrO)OBpkcCqaKtVCT2S5O&Mu8g;9EU{NLPX62c5QeG?>sEY-T zyIWI|U73H7bTi3$RqYIYe;L_qLrsvqGC$gciF~$UozM2_D)0V!63KlIF3*Zo&satA zvee{=@iTm_m7n#AWYvU`YV0)@@!H95Bc~-Uh~*iih54V>EFW^MTzFx?qB5Yfl+zZ! z6nJ~ud_1n#|7Ig^<_Bvi2MSMRDNmKg-^u9^cR4LlA{1t_o)595S5ku$c~=GA3n8?E z_I+aX@aK5ha6fR=^h3V@)IV^#WJMN=4TejXSi@dIjjm%aKxeYdPs(mD!{c%^dlV;vg&X_Vy02?Xedxu)!D!Nf*nyK&2~tI$kF9B!WOP zv(nls@$EK2Ffk+Vw0^tN#OC|m%RS6hMj=sL^F1xjKr})3WzP4u+Z?fyfqon7vr2K= z#x3puT?v9N4E=kx6vtKY5@@EP_|haG#a|T(u1Hw->0q3^16`Vzb8NhV93PKTYVeyc zMBQ0VaE&wOaY{jFC3h$$3C}$bD{)eF;qyHqOxep~6x3t1u^3ZF#-oRydvWFvR|<-H zHd{G;sjKDA(u7vXEwNXk_3MoG%KB_nwp8`aOvfWb!k3_<}R@A=}yHy8v~ zi%0GB%rqJXFdsdA!(*5{y?n2Sd8%6#$+ml7Lw-e0jvS~qmR6=qvo(~fYyifY->Uz^ z3<3LHcH7tP0{wORbii``i1yc4rh~w`NkUwN>0!jhruQ*Wm+7bv4F26+jX!kC1Ygbb za_9R+Ke|yLvd?;A@kfw!;BlYGf?4fHy+JD|5_P$d=2P-Srert2n;to}(y$sl{jy2t zIHPH#15@uHp`%Cz@l^MRmo{(Kn$0`~6qjjd4b#j*nT_K6%lDF&cX#v8%HSD$nyUyx z=C1}K`B^rZ7T?n*VM>6IT>e04!OOos;&pp)7gBgc5~en!qIrSW5k#ks%hK?1MuoyY zDCYHK5MAw`92&Z5S{b>DlGf-~l*Xnruv_LPg9{9O95DBYgqoU~&1nTN$W>4-wE@+Wf{nQ;+2vn%vI+o>k-7$`2N0LOJ zY74x?6olxOsd!#_8~WgizvMov)Q#~vV=ilh=TM%Sb#goob)CHlO=6EFUOV}y;7#&n zXx^vyu3h}RRSTLGM5p5+Zoo(912rilw2*^Ml@4mLI;Vnb9FK93Yu#8-k5D$V+wcEF zS6gHeqb{tNEL}XjG>~TY!P&-onr2Yo0r`TVa?I%bRJUf1X=j|aKQXF*j6=611=sw* z@pI2i{a~VURf57RLg9SUhO?gL^XaXsCSHl#YRP+<(+wJ^5>=7|Nrsek0q-#c31^M3 zs<@`m0L%_at15b1sDml+PlCIn^pr}F8A`*^J!K!T-ufsBK)AvtB-OBouvv*{!G+pY z(hU^kfu=D}KiRx4HpkqETW*$(ZwN?ZGkVZ2K2@6OWvnAZzn(91%e7RV==&{7q^#$E z46izK*r+G_Z9}Ef%@3`d=k3StJRJ>${GHhN!XeUUSuM9;SIEigo)4X#t4Yd84n4cl zK-Ad1rlRmvz0xDCH?U#-yX}LY-j(ZP9VQP>+6jlzXId zBs+p#hxFKx-6SYs`qe^Q%PAv=xJo4Q)Hc5JjEb9IqGDo%q}=+x&@`Jxnsnzpy8W8; zy$g*KM|%skc?yrGp?FLI8C#e`bvM{(F{HU!Spr(ytgSvAtbkC2{?w@}t!cWEsa;U( zkw&^e07Tw;iN(X*`t@FtZGwAE-yK?3j3PzOSPZ<=GT;G7-n1wZodcw>sXXVrHA@U8jZVG8%K3oyh~7Mu35FK7K@}Omk(LN z%0w=zYT97ESv1)vfQeD=c`7|*nST3q2 zb6DVm^JS-85!Fd=UOB3H#TRE+tgLtu-b#Y_5SFF|j*h0^0tF{`ocg07LX+@?Xx@1C zGJpdv`J{E4whLQrrLRS7z$+GD*>)F^f8{6o&`n9_<*K8Hjs{znkvw|RbWvCQhyzhS zpX8M}n5BjkF%p^ z6mmXZ!1x&J9@K(e&={#~-@d=l{q=G;=sH@aco%;L$JT1Nj$C!8GhE!}lDo6zXFCSz zi#^Q`A3m%P$-9BEF(yDhEYMUhh9K?Cc{32KzO7ZlC;C*CYN_ZGiu@NCf|m4fr9Od32n&YHzk%reC}8Q4;MVb#Ycc zab2GD!HH%~e6{{y)Z;{AG~wX*b(jiz38b6>Mt9U_A204Ll9H7r%kBZfg$(=ITY~9| zyS6Xs(#dPf&i4p=5d7{YeubocKA(8&uYHk&C9Su5JpM!S${y_&&3#Z?vG&N`Mi~1I#oG3SRQmq=pXS!WQ+9M4_z?06`nTV6;Cro!kG*QS14DAj`>^}xTHb*3D*{kv%y!8xy`0y}cON}_``M=>U$E6qF66w%evjWaFZMNG zHytr-l4?txdpWR9Rzn_)h<_UB1!Jziz3(YlEI*p4?E<=iUO+%=b#;}H8;|k#N;W=C zUlk7I_@O6AAF5x78iXmH59x9*VzzYGwU=-ZM8N&B{p@xVa~2@W*a|vf=oti^`!S`L z1j86Sj5Cy792X{Ed+95rQMO^tDptqPB$q)n&jLgiEs!yD`s!XZQ*n$7X5L0^ZFN~0 zB+2#_eb3sKJwi6)s+b5M30guEc^ol4^uWtv;eFOMmKWw_@D6~tYyYL$IC;K4j(d)bz-WmvesgiH1;{{?eB2p)hc9v>PfCv*y~_#Z&=uuWS*^|`={|C-FzN27axKGE&xu? zGUGX8QC<&0x(#=hZV{kNfKhHPpGJ^N*76>e=c#!E+G)a$8Eo5`bMYboksO9xg~r!~ z;>8~SE&n_$Q@vozWV_=)4S%!lON$~w&$8fE~JAjAGLie1O6rq7biPQ z{kP4VVW#b%eUV#;;m0+DgU~(->;${_>?!vEsYHHl2Q220zcLFJ3xJjWG722v(D?zZ zR2t*t!eWRuU?9>7_6}T{`I;kJ3vxlzVrLuUfpCR%>9Y}{_6qj z;TC>RR5uZCyap!z5!*Fh9*a99N2(K*;z4#N+cWi{KRmkUT@BVJ90Z?`d#a=9{C1Ip z{!LPMU>dH0k-ORXaGO--J%5mEJRw8k!^<`Z8_3MJZ-?e}ZT5{E-M-spl$*OvcjCm& zcMlH&%yRF+tw$WW3i9es9@+YP?EPbtjq#h)bai1)Y@$r;A{hNZfIdM2O*}@o)a4Kb zh0#8o_z=C#@@T?J_8sOsTiNBGKAt)L-%ZmZ%3pavZ{9yZ%?{Z#>xUgohqiQ}3DVKQ zgZ}V)mF|Z?9l0C@g=)*GuJft7#ShF?yqTDokO4KRI@WJL0PvmDI6(K+R(Sz3n7$wh zUZJ6yF7aRgXui0AaP#^gezM7DPW!d_wtY>)-dNl8KYF`ho6K!(gQSAk4Xw_J!u>f) zP9E>96$AJ=&>D{c4u5G+y}6~OWybfF72{M^tvq=^iif{sbHEepUtxSD4kd&LC-XlQ7+mO7FkdbMzDOcf#jve%;%N2j2fh6MM z#fwM0-CVXy(Wz%{OdkA@%?LiNcYiOHz}4oqf8&~v5g~{~ASt;OQTa(U&uFv_Cg~pG zc^G=yZQ{1E!Hj;Z_2zqcrQ%-ULGK64*_cRL?G*Ot^VG=g2KXDWI%KNVe?P5R$KM@p9>Ht zz5OxD9gw(ML_M;hJUJ@g&S7pB{%WQ2w^|D#o`z(l3VYpIeQ$G=Xe9fyL|)%NQar!qdT@P(7gi9 zPR7sC7^C^Jp7Vg~3aigs}=!%F}Erp#W8Y(_c3qZUIDX+MI# ztw%mSZ6(%mp@z&i1)yfAK&!=#D8t?lB}aQ+tG1~MJ{sly@nwB z8)Wc@^8)`<$=D zLfeJX+BpH3QGOmXd;;PfXE#6si$9~B&69z|pZe?s?DokN>7DeM*tOmg zXT*QC%0!x^0iApP(kv)7u3I0DAopn*v4S~oy+OOYor||A^@iI2moK+dZ$SneK~v`C z=bx88K(hU(-E?+S@R*!}E<7dz(O8E>u@kV8Bbz{lp= z=@iR6gIY4iT(5&vt>L;BX^G04{;aIC-k+M9l%OB(^O~xe9P{`#6=!}9F8?A@&OyMF zKx2E+x$a-dp&McR^2$o+sDir|>0j2ZfZL9rw;ifFn0iU+NG?EN2gb*Za&uZxxy(+$ zuVT8tR}~s^M8(8lfsb z{EPd9y^!=#LB8AV7zM2{1HjP z`vB68`7eZjcyX*J3B>%u=aCD~*=v;S05$hw|g>hmPmc~-iyt_h0r1?Pn)-)`NEz~2(iXN3?T zMf{OULw>}Q>@$#~?4=g!a#}~adAjl}JR=$u_x46SNO*Hx=k9w`GZU=yPn)Kde8~=$`IT7 zx;!uToIf6ZjoN)H3c5M+A^00qG`*mNxxkH;kpEqm0I0?nVbK|-^)Nooba?y8ySd5Y zO4D{hpI|kRq(L&3J-P&I$o_}kjvu!n{pvL&n%1k>J1gK}&UR8CXZw94{+Q^7G(^c2 z5ga%Wgk)tQFZwTkyRX}}Hvk&c{y$a?)OJ7|<_Gx=gXQ9fvbAy8zK?Vyq(3&mh2`)1 z$6NPJ;%sJ_^ilrA=*+ZpYulqHP>e|Clhz*CLmCs67!^v7UElhF|9wZwNXktcV;)U* z=E}iA(Lui;Sp(!r|MBq6V9>2+Hw7<>73NL1f=GE+rIjoj#qlyur0V=so}EyK&MrQ! zhSik+*xWExc#b}vR=oBNia(^?eS-tZEz02%?%n$wAb2%}(>&UE2j0k4Hpb@*gYQGCpOekZ>)daqf2<5V9Qz zlm?Kk)zENK^?l&Mte7GEfO$1gT zL~>o(@m4#m@rFB;5|F@y0eH%soSoSseE#rX;Sh=m5xOl)NazOhnnmnA#v=_i)cAjA z$w=KJU6v)1)Y91DdgatON$=q1eo=r!b>3K8)Q80AH)uUT>by~via_nSx#-7N+=x*J zFIXM@^Bw+hz<&Id%B|Ey)iYWoIzu?b26-+HV~_1MTo5mU;GMtw=QGOMaYJ}anO)cB zgUE^~p>YO*F2Rt}uwv8YEg^IlKw9|bc92-CGxEE~Mnk%vOT|B@X0 z<2E7oOP&*nFkr|J(hF+XU6B3n+?*X3b}C8)IxmNycJ>@14=HH(KzLL;{#9`F=Us=@ zk;uc@dOv*{=;rp9A@rX9x3xhTe;8l^z@tNl!2N@C93)o$jim2*((X7&tqCy^CC)34 zmtCNKf-4aE7JCdw;kNM&CN&=3QXo6-{iaU);oZ2YfGq*X2GbPpbJn<38pb2vYBA=- z*3r%dg3wiJa3oOo*w*NM1HB8 z^rTH64IrDR^7b5Y?cX>V?BonzlsFh-Qd#R)sGIQ{yKe|#$2j{|IHe!VU$GDJNH zdMv_q-Tsl6SEd+)7S>argQ!xCgXO}GuJKL+RIsU(TN@hWD9+xdfrg)g)F3SVQ`rJ6 z(bVh%>5gjYA4|jTz;Uf8q!kc`mn}i!Iu<&DzI9nO80!AS+W#;4(j6oj)TjueMnSDK zj%~cQrT(a((n(!u>mY~!H_l5*75T*IZx=;||BiOUCyTh7K=W`o@oX%!%|+aUBt&(D zLL!$g`kqLjDPBCuMvTuu;4!z+p*Ff6Z_r3n?bM*bdh*O@L}9~dR}qe|O@U1S+~mi< zoWVuNuFm&l8tf1b|MPzQ+EOaQtD<@E!U2U-1yUhr4Nv^C_B}{w?br~pQ(f)X5B%ek z-KmdgZT|LrtN0J0ymAG=SpsPQ9w2Ss-10I<(1H%r?U_5c57?9V{lpI7Ld^yO6jeUk zDOPCkdk$c``nSo!*tYRl1+e9Y0xApGwE905v-7zm-~wqD0{E}bi%+gegURju){RQXFwmI4L9am(a+fA|0XiR z>)&5}nSkuoe_I*|AB>9q!8E3#rp@r5gwK2hlUoZ=tRX;|`$4c?9iXOT_@Cc%V>NSi zrFSC`V&xhrvIrD~V3D%&6d=!m6-xSY{}6~B#SUu+JejW%^c#JJHRdSB%Tg{u;fipc ziWMu)3{oJ@)qMZntmp0hL0|`PCyFKPyc ztAIf`7T_m}nYUyH|H}`;)zAhg4ZY-~t*7Xs)1B*pGfII4om=}qWtus)KYE_z=%)5`yxKQVvdRWf7(1y0k$9{jR^{LkDfjy`%h$-+C*Bkl?` zh8_7oy{9D6I2{%X@WTh$eCLt6(BTf_v;bg2`p@qSyi=BS+-+BYdF?;lvNFgpvH`ns z09c`#7ByWqzig<0^_m*%HAO0HiID%xMWkDy30&v_L25DtBImY@~;?YvLfr^ z40;%PlaWC}RrGF?MuTjG$!tnRUC8jA2;HS`1II{JPAnK})6;exvb{;N4as31yc=kQ zsFr3Mg!Q2ej1c3^EH3i?i|FsMav7h;vBr#8=FDNGi+ zu51INYBBR;10XJ8dCFZhGf>Ud?-8)#XlYC1AYI8Yxl4>n z7C^i(uY~d6?OB9}dG=rX`YL*@d}N4oyI!0=)cX3BjB_yb_&2KQKJvM5wYK`MvRBth zRP>F#P@@TN9m&DaKNFfa3i=)P66cH0W)cGUJw=b{p$F)dF~IZHGu(T4)gR;vXnCf zd*>2(LM7DIEiDEwOZ4W*WYk9lnAE0ukzWd5jgEfuG`$4KPTF6Iquq@JJF_sI40_W# zg91(fzL9s0(W?tNrhW?S2-q z|3@e4l;YoVVtE=DzqRSsj`8+zP7oyMXb+6kXD3dVH^y3i%OBNhojgW%(zn<_VrbWa zhjQlWP4=$bhJMq6{K?fBpM#-h9>Y;$o}c47T%$Phh>NYE)%S2)$>2(A^3u``+M%|={PBfuFrG$OsSFh6H=7dNyr6d9%htmG>FV+ z<@gd}9MhGY_E?A|-+Oj;v^LR~t~(^a5yMSaHdzfm~#oief-Ja_x1y6Ii{S@@% zWPoqy+2^Nfx2PQyejOv0*Fni``gTw5gwgHHf%r(i?lhYK@6yE{t%*j0*Zz!BU+3Ej z!;&v3b{mX8CZ$S^_wT|DvQktUes@@Is6e>3E;`at_Qb|4wa>Kni^D+TUMDLXc-Tkq zRKOavpd2z+>^!@xwq%a++A%?)aw(M*U3C=*bcAyj;LMWX;0Zpakk3N z2O1Ezpf5dake@1Si0s~8FqXfGV-Iom!G-?v8X~fXuJ0e;a<1N3iH!LW!a%X7uEb7( zo=z;QDLL~X#2JeH#f!%a>LZ*M)T^s&xH#aZ)ZN;Lxje2J_oRt?*F^K_f_A=R8ii>6 zL^ei-_xkZywduzDAB3azsLQjZ8_jQ-dyx&0AJ+3Y-^7E;$`=|aS&5nKd$Zp-&a|63 zUe~!LL8|bx(btxMV4E=Sm62mXE4kCECi%fSdT-AbcUk z=l9H(yawX1~bA)PFz6l;!l>`;W|S#++YUJs@0_c#zJd zEsKb)uuIG)=O)8R$?@sZ+I%@7-w&_E>k|953nf2%?U&`q&LuQ$x*UzSFM~kRaXip@ zA?0|<@nB(Y2JHH?MzIUM+aw#$X?8zD6sa=;3(=FGbJ?DaS_WKPoa!R6-Tap zd;i<_;=5{3RIJOk2M0MW=@S&L*>o;jS<;sn$k9!j9;Miu_H}(_00N{L)#a&fzpvj1 zdEWGVW!I*^HFPDH%)^`rqi~(}r_=SLXy>j7;H})})SR80DMuP=2!3YWD;T!<8h|7z zM79-is2;r%4<+K?ccLI4^FG^n_+oF#$X>PX>#0GfYKYPE&h%aa)1HsDHC#^(`yl-YN+4Y8 z%eCL7DU;AwebXGthN4&Rn>RQwCx(Te`&49o?cN?m!{s{<_SZ=?#Z?Cr9A0rc3-;0YGk+Wh(9H$vP{ z2a?sn78SR$%Dcp2A^_d?(w>su z!Wz2D2Mg{Erb1`ks3|@Zc<{*CKU^6@riE?{pD|d-kZ(z1{7nCV6mq73oYO&yp1Eg+ ztGq5<8&)ae7`!Xtv)z%Kyb)5|Y`VI!uy>@=E%WG1RI#{rFjC)r40zd;&>`UXZX6mM@N(@2EEWpnAGxgnt6)ktP%Tbn7iM zO3k=!xPX$(#+DFT)ad+VE0wwL|A$GXq(y)&0i z_z)EgXYeU~Oydtl?LlX?Mup?eb>$20IyyZalx|^zbT6lWJC;ix0uq`Z!LhZ!P${ip zeHN15+@}(`I-Jtb&_E0dBUDLANf`hv8ddws)q?N_uPbDK*+yKi2**dOnJ1Bknb!BQn0)HyXvu*~rX9hhX;JuacT>NG5=BQn z7iLTY1)N7RNuT##lQYfIh&1|n*DsE zfBWj@k3`yRIp$!PugBeVGvWPb58Ndm2butYz?_?P3kwXSg}x{eFm#Qno*vL$DW8IN zXs|HWe*C;l$YJ{3jn#OdZ*~fSSYXyb6k6MHz_)^QSo&T^f$FL^`@lP(Q$802BZfaR zHwl`9Oa3IFa`TA8wsua34bsNq_F&$|N(x5nGCpvHJd<#B2Fe+?6 zc05Y@l{ghF=}a`A*8FWOUNspZij zPEb)=;8YrJv-~)$XPZIpwqT4?OUq>^C%=gB>%?1Ybzj-^>up~_8u-nxJgp+>b3>@; zOu)5qUWw&d1|kT3$qyTH`38TQ)YjI#OIx=o1f?$T;^MQYLV4MsIK%GCM0?xPQhVEN znkS!@%Jt23R}m}tuG`NCF7w(iu9V1x-@N+*RmP=;8DB$AqV)2U%L6jO%*1Me=*S=ZlA#Q^~G386*u$N1o!2Djf zgaPzy%x${5ujdY2bT<=o_Gbqys|~^$eH=MskSV$XqaT6L2^^n}+$e-R%c;HKa3q-Wqp>#?4-@>jvju zlcNRW^jvY|%tHHX0<&EYiuz=st_4)_y&30dO}e(om5t6)v_ z8n?#l1FU{)dEt30n7#@VuINiL7-zyf%DcxAIL$I*NwWCKm z@vB7S_0JeJWAj&~_8ppaU)Wh%&n?!m(7}53m_w^M(z;qsbM;rg$0swmJ6XPSg)cgK zTzSi!p4%eno_s|4O3WMP^6S^k$3GZ%TX&$DDGjG(IR|nQH>@;#$q9K)hBULr6MkE~ zIOnyGPM@6vrYp^@Thh=%r31FX*Fpom!%pmFkTp>&ej*0`YmrvB&c`Lihs#}KzveqcE+xTe&&ZL&e{wN)y}m^HlXRfD440NRIK~S z2C|I$y_oX3u=G05<7IM@{^-pU-F|r{l;Q#ux_mWyWZz0M4MM`9gjjMKtM>M9zHJ!Z zsYP+VK@taJH+*>B=ZC+#-{iQR6&}G_8!oN&>Bt+q(f7}`9Q_8*&i%-GPP=+a+gk))wCI*2i5Svid@g|F&v0h|+SJ)k?l zpKGaqZEv;r>nhLH!?}pV1Ix7zn%Zk-wdQL3si-G(Qu@BzABe9%syIG#w*ce|iSWKF z`i6&JLWWqpE^)Sw@2HmA)rvEhT8KhmQ6#0{p+~)alGN4lPs3Q-}tvFcLV8(*9v~tF`JqFd&Vl0mhAsH%$V)8un64{opmZ%x=1qM|rHF$@_re*Tr{;qnT4HHSr0IP7 zm{;N;DllJHp{I+oZ@exSRSRbpsjFdKO`*+W!bm$YCdHkRl1v}6c))sY_tuMvyrZ7& z?Jbd;>yV?ZpDWFoFQuDEkYJn8mHUBICwxX+i#m87in*D|(ove_W@AijY-OdU!ZlT54Awo-t zl|k4VkiM>fxj7M#%}1(xzzIRb#->2o^y>w)Su{or+#UMG=)k7}$b$ysIviar&{Y`? z(T+4N6pG-o9Car(GFj*0=I+J8+uGO&DF^8LK1Lrvb!A&Xkj|eUZINy+wl-GC)_B8d z4e5v(5V$wMWL1#`q!D=c56U~+` zpH;C=a2LjVZaRTen__l$b{rU`R%O6oM{Joyp_^!HYpa(}O%Z?pJSenz=VZubMCynX zLF>kr(f5EQdmi`bv`dQnnY!(@^2-Vee#8I{_;$=?0tCq2u85#uS*NkD?9DshzSI9M z%sA)+0reN=D?&7M!nyt^D^=dLS~`6Fz_vYaLQ)36e&0GPhgVMMLeF_YwbWqXj6t(N zjNR&a0P(*9u%6AV{BEW#k#_0JsZ*sZvMr_Z?v6vOQ(4tCDRRsUr|>V%)`w*oM}_YU z9TQb4>suwaoDvzO_I>SakfvjUY3*h|0OQB|PS2My6seIxHR)$Mwmlm(sP+`5%tHmh(nV>06k_gdScQ;m%}fvxO%9F^K2*R{)zo$M!Qr{xKG+pyCf2 z(lS22?`rX$>vBnyeCq1oNlEm=ldaS`Zt?CdHTe}id({w{Md3*E`C%AW-$cEVv1lui z8b_D#fO9#46x)kVPR^<_w6y-XnzQJ-iuD^wks4a(vL(`rz!?K-gYYu*M1>qa{+1>l z>RmQHk=5>;m|@ddH+^3ErZBqQjug&NZwh9}JbOzhpwhzzPBmwDx7qMS)cu!Zop;Mm zR?HPp%jHn#_iHWfBsIB=U0?pb++Xbd(*nVPW>UhZAAb0~RVQ$YC=-7epP1zy3k!<| zaP_MOyKf#A9oz^LXreGjAK3w};*@4%$fr)$f!H}>-zQbN*NDTyfr<_&i>|ih`>{PD}R5tas&FzaN|Dx=)0M& z$`VLCvt#8ox`(EurxL&+y|c9zEKK3}M34u^0(|cusM?NQKsdd^Uitl2VZFE7hGzXG z`L037$fQse+7RAGwLo(x@AntoKf=xv!NG+}WV?Un($)98c3lsex{Bp|`ptbS2}!PZ zoQ*vpz(^fqZ!H?h+pCUpmt;_|Y(qzWj1WUVPo|6g?nO*!vC=W#j=;nVW5E%f37;QN z6SSm<(dc`q6%%X>y%f4C?cs=n@eh50DiosEn~+X*mXGVV_RTY%KWnzipwubf%C&t7 z&~k#zdZ^cjtz?3kE(|7J9oea5RO*qpmr{c4hL(gT&w?$lOsHvAexgz0Wf!Wm^HwhQ z8{R@^^@dt&9JzZ^NXmQk7!T9^s9*Spg;Qt9Ewj;1C0^CKveevST(mvPs-?m3-tGiH zp3YG9L-9K=_6bw@Q>i07Hui4}*WJVZ1N|?zI- zjxqnwEz8apt12&ImI_f1_O0V=m$ql~2gM8w((3GWi3Wh4-#aNk{lnyfa_2OfA?t(- zFR|ia#VcHI{NjUYOfgRZRlx!(_5mI2Gi>tH20Cj^w??Xq`|XAT<-}esjEspCTj4MD zy)r>p&Mxe2SI6Ga*?>v{VBtOIof!jBn@wHOogM3Zw64j`B++v%pE7o?ir}y>Mtjgq zy%#npT2k!9%%xOQIB|f(%HoUTem}pQ6QWbvQU@&ZsqEbXTVlJQK)yX?hyLxmG4&(W zV~~X)C6sRv6TeZP=8p=+cV|&X4~mJnr@0Kue*ln#0Et3$nsRr!Md<#xG|9?p=;-a9 zQ&+kg+J@iU*x4u{=J#Bsczo0GY)=uoMJD8sr+=7ixv-y>_b~B*aa>R>t{Nmge&U23 z^-24~&Jxr_b@%svBcz}(ei9d52b`1cC+s#qpCY6dQ1TW9M+oo= zskKdQH171boI6tNqjVDg(%IPrecqt!3eT3%Clpn@5gs87dXD~F05@vQu1s|yl}&N| z*13X4At=YT0XL9ZjbbubHHWk#s0kFot^p}15QZZWKVOY<6_L@GP?LkMH%Fh;-EuHm zWo=kKb62^e6D${yBdG0022)?6B*g|eI*`ph1{zhed{zwjx0CQ2iOff9`@effd4_*G zlV^SCB|c_y!fs%70#ji6JXyIg8wN_6qPgdu0}#zzFzG3K&-Z5Ce!29B()D?N5htG2 zru!>V&Zi;JDXnFVt<16s#3WVn>W6JR zewxcy`w*^7#(sak=DTRtTWr|{jyk9tUzlAlVsZqGD7E$z;OEu*|u>yERDySTkDy(3n(y(k{$>%29~ zo~(#Y=@p#DWEh{g!!}veUPHWsi#9Mdk^p}~SzWuII;su#q z6I9CGX*&?5NT_;WzkaPeugk9vGk7s;*@WPcJ15W2pu|&Bd)n!z*G->4v}`V& zvK||b1mMB>{(lbvWmf*qj3ph5<*pj2}Kwd%`eRK-#AW2eWdgaSLsYy<1o9I zEiLVhwk&rVt9$FbvG`0)($&bKkf~GqNmP~__CkvQATCw6-Va7cn>vnLVk?`z+R|>X z44b?aY`dPR5y8E-w&DKjjoJe*Q~bjz_+Wr)g0}n+YFm9G#d}n(&TbU(Gqypited%nVq;7WuVhft{1JM z$4I8r|H={RS%p|nwI}1B;N;|tiBhosB2x^68T*;|bLig8UDr37Jj=~@5l1wxG*ghb zDEyHSR=-wRQGtMQwnf33W{O__syor(_l2z@D_=|0j$n8gA1FAI@XhAV%l&Jl0mfidvIX*o)_WHMUyV zI$4l8Hwc6)1wAluEe3Nl^&36OE07IY+3&>T4C@^Q|2;pLu?;~NY#uwcb#@MPOyYsG zF0pn4Mh6iI%Z+YiKph5Z*ShLhvf#6{KFHlI>Aa&;7Y=Iah2x?62A#5Mvr|fZzGR<4 zg7_+A_AD3@7(x2u&l{g~`+FX*7fqvX9jgZ}U%>)%>fw0v?}^<(=O|d#4|wly*2E>6b{%$9tzo>D*FXrSO6jtC-8TFN0;UY1baOW88ZgZs)tcZd2RX7*tTe zfiyKiRIM(25UnrQLg6uDbY2UG9sDoB5b%1nFGVn5Dz%6QF$|S~RKgX2`7X>(=@DXxfpQ$8fg1o`E-c(=iYX7gkmOkDJ@iYZ3t$Q|C|PY#Tr1rBK#CQ zEYs;&jrzJK{ZfzW;MMB|D_h^?zNBx1VF8MOrt*Z|B*i4mn)a+0z+VPstT@6A-a9Ra z2kJk&W(P;7D2y2^`APcr>~}7&lE9O=Kk_YQ|CjzesxX_PM~pc20f12u*m7hRW>o7r z5lRwRj4VyVY!vCydbB>qj;0TWByC|Xu&#UKW;ER?qS!dmX4*y~Lrv?-i9Ul!X zEOOgu*(U9Q9~l~@OQbyO4$et1v%nt!mfdzzK?EQB!T+VL5KI^d*%}Oi`Tcpa#vVlT zxFR-zoHbS)W(cB2W3ZCz(y{q|=PmtnNnh1KAR6FvFcUGWvW9*Fu>_*U2>C;0E-_Dt z@_uGg?Z2iHAALtwvms1nUE;E+=}L7J?gRlkb_^Ce}ngSPkL(91L9@z?XIlvS>RRx97``g7l+mOQ-`&KiqzvC&vw|r}TDLG~G1UIN)5ms% zl+PwyEA;EfJ;M;}z1Vs_2*}x?z0FKTNa!isg}Wb%w4clBeg-9o7gHul=gQKOKkO&O z^bxU-87~3w6TPke($W;EAD*Nu%D@Ca4K?*$)}&&s29mIkM(RK(q0T(v!iea|EROn1 zlBmJ7wg_lR8q~Aar=n$&9^DZX6r^EelR?xQK=F>18@8gGe(49R?T+bodl)9mcHVPJ z^nD5zKV%hcMQg(3wlr$KGmQCtm!%UW8Zc<(|2_fn&KW6MtSUOn^?P7EV_{9=FgE{` zKpJbdb2Gn;l7PIVyd=e5QQs1UQ+;;5(sS}*Vc z$d9mIGT@!r`-u>ZV$>DuY^IVEM?rwP&O7Y)T;v~%;V3o}3`6zRy>3cB*sBR}?7aQ) z`Oz<{Sbha^p<(($_E=JL%6i4d0iFxEP;tT_EKRpP1+4m%^`L&z{R+f*<;1~3i1 z2rDNHZP1l@2OR09AB9;>rGt>^h7sD>9rLaz?vfOug3r> zvoJ*eF?%~zMcYy*u@@$5xwRW2h4c+MU@Gu+D; zS>00ktsryW41S1Xz3~0(?K^6W-`@+9XPuH(b6n|&iExaKjTK*6J0A8ss`ifvC6?l1 ziL(ZJ3ukwV%YY#wqNAfD+D~YCzz2arJ9rjn-_`BEYZKb15JKmYNbYqsrjjIe?A7hF8 z%Hk*0O2=WzpTpbi@dYtATC_vbniQB0a-B4pvnyeA9Psq1|^`D=sfu3-lSAeQv#~{g%R_e zy8$3K-ex1|{DTYpu@LLgp1?Jj?FQ#NS;U~H8{L(i$#wkr@$S9}@TqK#xsU5%&%fIB z+zcvi5r7zJSy);N=0Wzr+ioS2z2D;8pv8YX3y)w0-$B?q{7xl2d)rHHCd`$@hm$Ct z&_?O|Zgsi+7fJ7t9ySz!kp^jSrHh?P#+=xU<&gUmL*fLl!;Tjc`-ZKa`*PeVJ-%>m z*8CiLq9BjXSMw{d~iMn?o#lJ#pew6;t6868I?S8RLT(r8nuMa9VOkZ zK7uSfTOuq%**OTi%FHTYFuXj|-4-pLrut#^2}9N2E?9j)euCMZ_rYaToWTv4-1EB{Q;|gK=LTlb zYN*0-lCZIS!(J_mpB*V@(=fkZ08^HNn;9W?liY0O1^cMU(r`7 z20)#Vh)6qaLN6{Z?oZ1IUMMvmq^dtJtzz+^;5TYdLxZ*;N%=Aw0>r#!rW2b%d4h5&#J=QPjdv;tL9P*Isc-`t8{9m<# zFfe?%L6~l2J|b7ad$<4Gq2x*lV$p3K$z6PiN$qc&-%jnneOvPqrf|4H0d*vi5&Fr; z$fXY+cs%mJe|*kn{9m7NkSDrUoP)J$6O^`}ISJ?zz_*GLeusp&f4<)TPz0S1fz!tt z050$ax~`OO6bP`vbUF0dZo z&FugD0Z~YVwZ8IX|CT{!d<|frRrFb}h?)k28P$-*`D>@qqd% zC%D%c43sE<>bKZ@LieES!lIdZnB_1dV`T9soAUP8 z^nYC~zixmBQKT)raBKhPe$h%iRDOC7Bp3J}gP%ylY zxp?*opZ#ArkI<^V?zknL`9I##5SUa57R;CLmFdv+K>)2o#>8M12bbPHeYo-KkoN0h zfw|oOHBu1Lz>>!%yBNO@V4A!^8se9AjDDLo_zy8+|GplXK_VK|mlJEY{}(;QfVuOy zHk;|6OsKz}>u+BPBon})>vM@y3dBkfFLC&fwBBmXe(8EvfYKOwjpJVp{*O0wlo|1i zc;(R;w7+g@?!zYV#rZ#8%@M3Zgs}w@jD7^vLd~aw=?+;gZfBX!9R2_DDX?~YE?q+e z>jB9UEi@bZ|6wOw!e;R$JGWQBk;nqfqrhCt>AsW=Fw)flAw1Gc*Zo2^)-uwrzn?K! zk^`!jMkL3=8itcIuY7MQ5(YJ}WixaQn-hA$;CCNZnIb|xCcVf!O zPm3sXE?*bA_uBc@Y3HVf+2DAezH!?{TLONM?%G;E81ZT=#_mklpW)6oy%}u2hIb|y zCN}z*Vw-{@9jj0(F>IWq<4e!w{8jB?K37FGwkeH{Z8T5v-@zK!l3%;en}inX=(ux6 z12`eSfJGO9WaQ+S@(IBEjmZR=$gQJzxAh97UZQ^#TM?IAeDNY@9FE}HaRmA&+r6+r zT4%J*FoDK{3{^s>5pSl6s`5q8I10Y-X~n{HbBnGUwGSTDTSkb%>F~58g=r$fX3MZ} z@^YM><70r%#Z3B(m@u3hpHG^pj|kmhR1_XfJv;#3 zAW!GJkJpQTi?W}jkKzp^FezQRHt1~ZZZwuTc7R3o^|Gqw&3^WAAZ?p@x4kL z?rtI*yF2|JGIWwbxa8RM=6J(pKVN7>@{^WWk zBWb!+n@%oS(YU#-)78z#8j)+06&}LNjmg0;Uy^uZyYTh@?LG)AB6w`G5mwHmn>bFs ziGjlLU$^V(M-P!HF7nc0$@^E}a8Yu_6%YPaI}vIAA@;K{9`A{cbOu+Mlmfi8ix;!3 zU?Ah6WnsAU1FuPDYwE3vX4{uoca(^vAsh}XU^Kn?SJKlZn z=^~OMI&p1Pxd<2Dg@rHO{!6V~9U>D&soWV&@^-LQ^sC2znw&D|lXs{KG%vgdog~-> zqbnEd{~cx|LKw`TdV{3I0R0HjKS^>qJkM>C0VQYVwaMFG%zxdIfG&~9=CU;BEz_8+ zhkMzYv~iEAPn9nSd7xtB`(1Ye&0LeNzGuIM?p(miJks`pFi;zBGkv z9#8FmKZVYhLc$coK8}BI{;|SoLp%E==}=Z5jxkrlD*#oUP=LlyZ%Q%L8bCQ?R_BiG zi@7KdRht8lTg_Hy6l{lFPa*&S!a;4%*W78PeCKyLre6>US^ItK4?gHpiXjI{>q||g zzwYF#+7Kx6n*r2@b%6vd1i%L5xMFM)b0m;Zy-3*`XASxGw#|`dhi6d$PkPk*_f*hX zc?Ym~4d83EpjwU0fI|Eo6Mj1;%Hr%V8No#hi3z&U1}10C0PUMFzJprChZYyeVtX9{ z<<*Rf&6Gy*Vt(0AUBhjs8EZSA7FoIZy!BrksSwi-Wzgs}`~Kw= zxIS5C?U`?G&jlaSs{hHK-e2683H}6$NRDH zC!GD#5OyW!_Zz!`49*~~?8{9Pr-m1w9%eG2^ZECP#N_Xi?`Gn>yP@5p)(M>jG6aEH zO8?$`iXF*-_wP0wbT0z_M7G3u($(|-x|@%= zr_KujaQ{Njy4mkGVmFI0j1^YL@Q1-`o+QGM-JokIfaS^+AxKjqoK_JJFi=z%?;$v^ zSbG0LMF*Ce-4EIgz`?x~jf-b*hk$sio15Et#o6mmvX8^z><@z}!84;8r;J+!pNS1Q zUksW#_i>Yu_M?iaerG@cc)ge|>*OqdgC?%Cvhtgr(_i;@$6xuE3Fqi&|I)K`vBpJ;aschZFT?t*{KGL%r=?;&OSZA<0vzgh?%bip z&n|dnGCTda89XJwy=5-DiirF`)%@1#TYsKm7~(0M4QNee2~b1*0Iz^R$`uC zBtk9&MS9HYLey`S8|@D3_#qd7(&ZCBQp%h^2p0cd%6xq^1T4UJfQ7|o1M*TEa824% zJ#d3LnFHat7(xbTaN8WCVqJ^acB`wa3!Sp*}+8zf#HS`3M&ohTDQe^vlee@An%{{ypVAqgsJ|WQ!9W9*+ zmDxGF4jDO7NPoTYGi-^A8i60_wE~VV)DZGoKi)Z(49r4h5SsWtKR>@<_7ybQL3ers zP``#AQed=!$khetSp;v@*4ExE%2AwG7;HlUD+Q3BctA{J%$>Fugj*;it$^!@xKcxe z`37psdf{)2K>_#*NF1=utnm0!@H4ULwcprY;X%*!xeu841b;LYcszN2|85+j3dxS3I>4sgW4G~t|t|hl@X|DvX|V}b2C2`KwBTwvYKWck6~er zLW}w0PJi3u=6P3W1AhbhQbX?!s1MzawBIRH%zWy4iO)eE_;higyHd4bIn(tR=FoW= zeQh_+i)?t;oi~5b{QyL?u2gpdykfWNVtx7Osi>$VCS<@` zrRjDME-QA~a@1UW=UeDHihxi~$aE3F$ZrX3Ry596t$hLtlA&NFw8#SC*nZ1b-spb9 zlzVxZo%a2ZD!-&x^1%AMbmm0Xg zt><7VfVg*{>FUj|mUwP1zJ20gG~>J^Qmrm2C!PLRu>4ea{4|ILK$Os-T!M~BA-&$! zMX;6$LSV>&?qc7=r3Z!pjkg9Q+BW3br^Dy8w0+!tuwA*^GE-LR8`S z9VVB#xD;7+rj542!4m(D)@(t_XL(8Y-I`moeToSWh!fki>pxIY3whO8UQ?L>tuMBs zvL0VQ$RX0^j{>6(XqevGef^KP@f1%UKRz~9?^c3K<3mD=id z*Heb?ZJD5~)GROHJHn}_2{ugliqJ*$2TLW9XU}39v`MB^z+`ECYpeJI6BAV*^r0$o zh?U%}!pJyGo|Z-W0J0m~mad7+QaSWP)4atf5Blk6@fd(ts0jPZ`$+D}B#;e}Cd9ZB z$%{_y&%5=xOC}&Kg!ByY`%NkAXKvaKzak2s8%8!KSnY^`Tz_0bf~OT{9{=6CSBPdI z@Gahe??ZzkboB^kDGi9Bt)8uLSHJC3R!(%pGcbCc`S}<&u+Afe=`3;>NXS;`ZIdW? zwZeli$u`V!p zK1m(-=GJcdZMETyBC23zT?b=gu2L}dod1M^`ERYUtI1d!c%UmLU2Li7E2nCEpWJ5X z=m;a8q~4HhWnkxBZtKpZ$Ccxy5K=td4bz3;A&tWB3wCiiGBRtsG+qnV(uTJtpx=b3 z0M;=&!WM1}WfMPvN21ZWIUZ*0xl5&|?ggqgHLwZ2<5@G4GdiS;Tl79iR|A&;@#7nx zRm|0)Ld-)qRtii3=^;!6hHfVIwubaN?^`S zrq?l-Pf~*ZL%Uyk5R$o;M=6>*N1oixUyXP?dkoz(YXPlfBS%l@E<-aS##OAYSt1MC zY0}Z*>5z93wGYh#yM+dtZOUrJ6M7%o2tOhXLt7_eVq&RcD?@XXYKE(l_EF3sBguS@ z)VPFdt>0QLaxHpA!Q&l;-6T~DMe?}Od9c@0cRB;p(@1gcjzHppudXuP^cEfzAnQmn6%yYNa!VLAlNGN@BE)E! zk7&rBk@R^avU9irQIY^fiF(BoXUMOU*jV0%0fbS6$JDe7J@#O#yY8d<6rGmr#skV) zO_8%!5#KLXFxDf%5Ot!Zd~1$7k$kf>3o5*&9H35#KYH}2tkLCdA5J(Yb+-1IdvFSN zVS(Se+H5arst~XtG*9ARpW)6V{~hLv7lcD0f<@D_<&yySFkuDb1>w3Y`^%;#A5L0_ zHdCs<{my;5cTs1JfKZD}V`x3@RFRow=l#;WJCON>yLxq1<kN(N878C ztKd!M;^ro@UB@iG8Oi&df`~3~qUq#5bn&nA1Q68Dwv$7_pb)g)d8#xP%j!RS7Tk*B`&H00l!(ZSdxNVa=TL6BPsLxB65`>L0O>Y z3Ia6NMXKdaJEPSU&aC`lpg4|jzs5?D^#vN|i_iwfq)Bs+zyMnQBXWG8)qR6%or$S& z1YN#4mFi%fW7&~q3FX=%h;e$(_4-FsN#Xeyk^c1{4gx@g8R)K#zM&t2aZmulaSL#Q zsa;$cT;rfjtrw;srjXlQ6hD!`PE6?Dg+NMm z5|1U}-u_m>{(TtjDE175)J2hlGT8+H80E-Qe|sjL(E?6_1#}YO?|{1*R$K^gVa$WV z$)u<_E$wK#o)7ZSykD^~>4MKmK(`3j+r&?9~6kVJ?CPmh41;0F%+#R5Ff>+ahj1A z9n5cQD~TQ{aco6K0EOdDDsqeVAtiYc5pL6n&ye=gO2DEa$fa zgB>3r9+iNG|9JJLC1fW|8_^nCT4Ipd7|bCJwI6V}y6R`)58DxHq05J17Jt{bl>4Y} zO350>ZktwN@#S}4T2yP7H5ZR+j z{s%=9k@l=W1e_u)tLT^ABc^mCt?$SE+_y3vUr}-Pd;$#?&XZH;9d-+Sc1^M!6Pe2c z8tQ_L&mMN!4p$qc;_(#00w8UbOegEPnZ&L{d@9>N!Z^UZCfNG#-mVWHQNnOWmUmBL zM%m8_T(0S~oT-_g$%&DzP?9L_ES)-cWwMqo%7PR~k86by33iou?31YCxfE%A;5Th9 z_&O`^nBV%`00bQ+h}8Nk;Rs#@rJ`YM-y6C}n+n5up*=*O*OcDqhFm69Uv@T2ew&-O z6W`8qo69DPgvR;=|DHl=#8_Q$Dr88Od2*_%C|LTNmdq5kqXfM0V;$yR=WO)dJzm~i zW_QzHiGs!YxsP{x{9DE3y9OD_^@Q@;8O zW4LIU4(RV#ks)=2dM=aVyF%XN>pv)=T6c*y>;8@6o6QqNmfr-iL zl{Os6#Zyr#X0@~|FVa_3)SP$F!2Oix{n9n>ybEeViL36L_!ll-)B{c#7x4dAq=bf6 z;GD;NxcMAQm9Gn4(NAXQ4RPYca#@wC9S5Kif`BbU`DO~47E)2PwzW;S-;gjyHbyTm zy966*KE?6d+-m}AjqU5VZ+B*p^Wx#dH0Buc@a_y&e8jy3*&JEXJfBin|pQLdx9PQD7Iy{d1Bzuq=eN2FOH*n@} zDL(5R_vU1Jdp1M>k>B{0dhodQB$X^IdY>2vIt@Igu{_Rt7X1m+MT zOlvtmz#C@`0{N<~qqY9iY3kUX-YzIpE(1u#G(P-U1stn3aBfs_&f4rcLw90)Z0rYE z+H|lvExc7Lkhqgnr01^d9Yrhzri3eV`cPAH&E~dEdEJzty)6;eP*d{=2DfCMJc--h zcEIeg%`E{DY3_D44hM$k>pHZFtV+*#Wl1EqvCTJt`j)An*=!vSwKZr|L+qA{#o)cD-D32sQ`6e6x{%&mlf8?uls{~IXo~<+=zO6L<>r~3p8h;2<>bN?d;Y-R^ zDGp#U3U8e&=byAYf0)xAzE4U&IIe_R`!$&N-u{JuOTDs)R4m?C4+Dye0ZnVoyZ}_mqS(U-u8ww_=V_*bE*W(=o4_e9EOS4;VyB=G zVgysDa+#e`bvU%7RGMK83EXo9cM<(%2@+Ema2hwA9wgOqy zUwr4XaQO??I9%p>IPHH7lSp3Z1yZqBxiC9E&7ldY>m*bKq!DWoDt`+Urmjx~3xqx{ z1J%jb_qGM8&6%5`;B|^9ztOk z#lWG&jicvw3s!+vzlMt;dbiWgB9Jclp`8l2ucSu z+fGPfOHZgZ@hm-vFs&jc0WK!g=FyO?OC!^C{S{EM2LuIa*=>W-stVmDXi7}5%z5P3 zCF!=K962K-h+T(~0a?hFQA?Y)6A_8XNFRg`e^W_)wH0{KSu#7s(T0`Hy)xWS486$s z913(jdX7aoEQ)5#`##CQcXP4>CT(d@-QzSjosQtEAdnJs$u6Lzvf^U;!s00R=3E}+2cUlv#{RDn0%HC#s-89$|$}K=Y$3nGA%JeZznB5d7&{8XQGS2CK zfL2~1zq!dO5zp3_(z(zPm3(~rk_H?${J~l_@36R?adh$vehQ*XepXPX-UJ)%2e>dO zu(K6L-Y9Swkq)9pAIM(2;-jns*0~wpMob6###OXRvxWQ?Mhg7GuubT7sVk}Sv72&idBqLlxAF$2mJ%zl^wwF8l?&i=oB)@f{dFaOSbgU3^7%(w9o zx0S0yPv8FjsZVerT0jNcv2vbGqGAm10A#U%@j&mnt){uZ@PqxrStlDyhN@4Nf8$MP zl%=fytbf(n*Q~B3)f94|IW9)bE}^?NNTJ*Nod5Sfgnj+dg^C?a`EES8tQQCtzWmrT9U+)bFjWm#v}<{%}pW0w0Oi z*1Wc}F%hO{zx=ng_bXu%hy|*XA*{rCAy`glJoI{lk3IDZ*z7tg)s^R2I5~J z3bs?u31JjE*Gp4_GhrqQA(rTGA@)}%a0ogYO!J3xE=kCOUOOcExjp$9S0dQE5?tmkmcBm~ig@Ls(z_F~J-@lE@@8ubM{p+z z3z&x-&fAP17Yn-ncmQM&u7I!gyw*xhx9~S~QGwvfC3@0kzpCLJe-?kNP$jk>g~#H* zF<9^MaGD6Pv+0kQ&bc1*K%2`&Uc#SG99_-qWxd%D>SFSU)Zk04n+YjFRQzw-Av9M& z=GU}CGw2Va0snG@6ltpE<=$+P=paYvesx3X2Kd3<*W$YSuPWx@P;W!=N=KCn%Zq$j zRew4!{GvYUoH8lh^dZA})rj_(`<~Mptdeq0@Gj@O=n`{`L@F|3vlue)ZoEo?0ZN8a6$ang zOZ<=1(E9l&TT_h0@>6BPCz{?Iz3@k@=!WxNv8OERiC`3;-riKIdOK(O@BQ1e2oW)Q zC7x>^F&n-rfWv-io`iQlm1w?)?O8{^+)jtfc&1<0FvD6Xl!@E&>8E1G^rU9EIaDNs zT2`A!*0fiy*reHhzxSpS)jN07LP_mhdHGM_?CR>{eRSszGjbFxb$3U(^G~=|+II;% zcBdZq<}r5gQ9l$F9gO;;SX;7eb$ooQUzyoHjY)E8o4DQ}tHdlZlKYVqZwX|hDCQ>} zWiTC(z^JtLfpOZhuX&WiTmJ%uMTE)qM>n*6e9G1$Q&*Vkf?kbWoCL7oAK}3D9g;us z0mjmkt%*xF?0zpaW`Dc5`NC!WER?g@*ZvdmCy+0&^yO1Pqb2y0lWX<5S!Av?%y8LE zepp=c#cp4MSjL(wE+k-{dSz!P=PQ@I{OBfTeVqf3v`75rO-o4@TvidOVonB$X*|8k z2m3LKvI273+x2*i=3FZxDEsw1rTkSrOx|2=c2(Qai_G0a7@{C|x@z6D<99lpCxZ1P zyVnwzv^3(9#V3}z1W0eswYJ1oGrE$7UrJp_e}Rjq)G9)K;oAE8b27&XGT686y1Uy# zQ!g}sTgPX0?64vQ3y^6RaQfSy*2{zj4kVN=sAqsmM89=#&Mwj+475vJ~; zlHq?gDn(oW^93Kpz7CXL{cChz_uu<#9JoUQQ_}8(Koos}~Djkg095c198zwFfZS;`IY3e@PG-08RCfidn|kw`R*tQ*$x5gpJw5%+ z{`#L|j9y7jHbx;5ZxB-{R`yFZR?fqwg6Zq-ne1ly4 z$kw^mP}*L$X7TmBv^3>9IfL8%?u=K2^3_a2XHI?gF!sJrWYCvvJ8f0aNx>BtOn9!p zlH$0qul`7tO2SsE@lcm~DO-uYy|~_E?1TNqgL}RqB6Yjh*t)c9cdj_FIr-d^i#R+H zaxs+`2A%Wj>J3tTLJWCjEx96CuuxhV1FP~XK|xQg3X)CFOk7N@zt%l_iiD+W=eJcK0)$Im?j*75b|jMm@t*d&DtJhKix&^ZG-6vxT8c zcaLyz7e{%y7&qvD{nh}P>Y7Z|#I*MEd_vn+JFg06;@*jaGtXrZt6HnE{r!(Nm8GKX z5ela!?H9!^1jP~A`30LEoZUO;rKfL6cf74W{WjNWXHCs$(ei7xi1W-lnx-g1$A;D2 zO|O^nJu*`!j#=N*uvw$b<9eQDy4ab^i_(86lYU z-GgfpFoY#uLU-^;KKguOP|IjtrFem-Qmk>6w-hZ$>r8&s;d8U%(AfzxH`X`C4* z^U1H+si+(cWLzG0QG+CIry`X+O=OyBCc?pjP}R{2~|g+ z`%U#1v&$d+)LCTeBzCzf$8dk&mz?7H*%;ix>|Ot3{=&Cm6TJmObKk2H60b3{&n0nB z*~yfnC8rqa>074QdS58!sJzuG8m=d})A5vL|D?mgz6OYMZe?F`+8xyS_ibu?@+oBT zgC4TN8Y?meX>04r3*C+DUK7((n)yE#bML8DVma-&Qb^2LZe5?U&>Mbc9>rVA=%D1T z((sIPv$Ng%N1wvCnj35ZcYeREd;YLAb&7fq1$E8gMjAM8?w9zkuZgZ+zt-VzUS>Xf zi;Y%cXUlq}Xo6b!G+KAbNcxJkH${Bc+34!q2+DryEw>zD|_(6ITXQGEw&d$LpgH|75x;1NiX{j#n&D6V3-T6*Ot!C+A!WMJ> z#9#Qr{!2THhg1zfPLJY2FqkQ=hT7Y8D|B)! zPO)9m$e@Qf-=6iH&LKrJ&j_XFMLuq>{PEanPqF2EU!tM}3=gg0f*+n42bTRk(>je} zBQE1O4hqKR;tHPq6(C4yGWibF_;5}O8`YX6O1GJbE0aEuG>%89^w{^O@gK$1hKN!L zr*a*~q`V@D(N489l(Ga3>YhA9DaMvR4wk{cmfyoWfFrJfAk|~Ay$=<%U)~QTN3gsp zAs-`XR@iq?bWNc7*>!R-DJiY5LrTJm|LYeU-;X48al~}-8%0jYL8%}9U|QdSGJ_vk zpKS(te?m=~nwxw5ofL{QnkS2WDB;85TWTGyYz<-*`EY)H)81$B||9!k>d^)3)H<3JzPyLm~cddZq7N# zZCTmBaQ|cOhb4oi=9psHvz+Agft#zWw6tHsNJu9?-mQMSjPWh=XS`CiVx!4`sy!#9 z^w|9npm~b^^q#s^%RK6CnkrY{6mM4fy=sOCma>nq?xHZOZ{Lce_0St_uSn@@LU(qt z0_9|l%JRH(Q9ZxxD#}VA63FVXR_kQX`jd7^&i5ZRKwXh2okx_ zOm+lmfT`YmW}g2TCtN2lcF8?Ta&A-^7o6-(%u&o0SvZ}o)o?rF>gKL#RT)ZO7*8fK z_aXpEjFeId86^oJnUR$!JA02pvdP{fWhJsVk-hicD|Bf8~*J-q-cMuGf6No=@=+)wNOSD3F9DIwV6T7}SfUZ`}$>wwa`@OEw&P7)egC zR4bYys|Ln%xjB9@`EId75tk1Xj^P|j!u7fQBGTWtOe#7D?G7!vTY2BT zytD3pF)H%=;$RTuf=QihM6%pDodV@JND;prk^OWzzXr!QZ5=kbvL=Gyks(17n#|0x zcsFm}g)~c49H&Fy2n_wO;hkAAd>zKP5+R8V$%$&zI{&?TiK$^`{e~}QtL1fv6PoJ- z+n1|P>j%8R`6DG#)iX}%PZRQmTpo~ZAHFjYef_XsCuiTlZ)lOi5@9QRie?ZqgA{vr zV)XNeC2Y%n8VeNMX*AXQXD$@AZ#inacz0@xw zKi!C0@%suqS8-axj7*kliF!x~jo@9kwZm}%9@c!l7XIYOSc`sN)8!05RrOgv!oIIT zeVL^|NBqZY{PVw*N%+WiSDDOdSGh@7=h1fcQ6~M}dm+hd450;Rds~UYoG~N!d3ETA zu1xQ?{py+OQh%(x+I3aLXM1bzve<2}p8M3))NK{>w;}7G>b@lLUpQqnP-MaP^1Gno zdszs4m{)a`(FG-mepg$kN4mpLrv8R_`HDkeO?I8N;WO zO6?%-U$T5??HxK`|9-;Ssq8c$L%~S=tIC7M48865<(Uqbt&Bo-T1h05ur5~bmwlD^}O29**MGMZ`jqNtY_6)e_Vuplffy=UjUNg^}VJEEEKvohgcm}QpL z>Ol%!6M5vT+t;eLzpo&NjhG{!+;0g)j?B*P9O1d>h%hAn`cBvcRw`f6*u$wb^yu^X zM}9~^_e_yeobHIoPx_hKzq2e+*#5=1eV-)I9Nk1)BZx(ikUfjIo^97gzw32-F|kp% zsY&->@U5s7Sj!T}u+`(ST+Gj+KE5F~t6ABi(H@3qkEkye^E(2*FAV&Y4XDhoOim(c zgRd97y?{{th0cpA9zA$>Vfd$MH)^>x*$rk!aua=ULEf}I=izZVv{?!Ch>TRjNBZl> z*O=JaB{`ggw2i>TclZvH-8FXGN6ZTPYI11I#Zw{6(jW(#&3yeNf8c_)To8cvUH~l@#IQELd0MR^NBtOn3dr@5gd3NALC! z4DdjZ&3AJwSGqG{L|b?M&k&YwW#*%enm##Rma9c-my&XU{biGjNb>8 z5`jThyR`x)NFh1wy+)k*3Fit!7nr7#JdV?$k~Pz#-^HolK_5RSs`7ev95Hz&2tD z_)tDtAtp7`Y#b&1V^@Mh6?h)rWK1S~PkH{j-rj!$IIwutmLZxH57j-hJMk8?dwxMB zsIW(hxm}N<_oStInfA0u%?+5A!6t^XKt(N>J7Ha?AG#~U?P2Ke4;g;jz@XkB{(5NH(qN9qKO-}tZ*>&;lsnz! z{reb5=Rk?2;LVHj+7)n^%O?xd16DlN?)szke5r0yxGZVtY37Gii_}KS-dbM^_{n+g z@(lT7+WrxX>dm%0goKGWRQbBc{bRaCB7&2$C5UpHFn}5#xUKZ3P^nuoh=@vPp|>uy zy5r5k$FXPgWt^`{tE`sdeRWPZ#3a!Cg^_G&=GEvS&}wnGBE+9qUG1{DCeARH*FtX^ zH6Z7=4w+nCRfuB!d?bF+ZdO9c<*4sOOWmeDeffRWlgeRNu4RZuYNTWoD$bK}DFy!) zFag%eD(?1U+UjGOmRwe&nV580mbMeSWuLI<*=W-=ZYPeaM9O^c>)kPUji7<9)L-T67}8r4RXQz+uPWgfl`3>H4zCBeQ zWGg!4DbW?Oe-2;V@-Tl&)K+WvQ zgvqL%VhFX$1M1~9flp4wT1DR7n8X(e_Z{3hkTLo!bO!Ozi!DRciN0{>7SCt3_*yNe z;AQwGCnbLnuR$APoO#8TAy{gYHJ*H}1om;2vOwN2rY|FmLOp^aQpdOVL|ql3Y*Gyo zIqfgW8D%qlo4$mA-3TE!Wnp7~jAeyNM2K!@^#Rz)-*NldSNOCjQvzs@5CAL=$gy5K z4xLB3@!qJ)e*(vs;`2?zmeblk`E=N-sgW8Bt#EPxfyA?(j{@BVTxN$|GlS))uxBPt zNuw|>n~i{Y2ZUVwD|>o%=M_r7=;P8hdU~Ghx1iwZY#aL3<*>ZG*f=di@|r|W^$?)v zJNx_fBJp561?82CN~f_#$e>cR_$fS?{f3|W)2_pvmlf`VVVy9YMGRT?_%h|r8B2lo zeb)v1s5vt(w#uQCuX#UgX9POQ>Bz2DRp{iYs)B8Z1KY3;Imx_AeN<%o2+L;qeo~5O z!c(?CYkk;;pqHUG`(Pa4?=5baC{0R7o=*c(+JYNeM=EnZx9Rqf=E+Xit8sf^@A`$5 zmGPl^nGC`p&(u9AFWS?4Ot9kG45jkinOI=3+I{-=WpUbu@nECoSK{mrg|yJ9H|Ko{ zH?P}Dg^v%y!^SJR z{s?>0%Fkw`D;BZ6BkB=9@S-G@3j|F~pwfD$_(BIFx8TDI{+2{GDv5qFNr{+So7J;Z2hSkY2cDq^{}KfGh86XDz2fF9Fzk=iCGQoax@mEF7DK0TdH z@^;)?-`gZ7I@N6HRlx$}h&((XhDudj{56#38;oA=V6gpgJ1!PS1eu-?>)w3sv63Ak z#Zx7*uF@9f%tS;4Zubf6SGq^NsRQD=(~=1Jyk-p_X(=&8fc>{lk}UCmj?_29L(9AtS5QBhT)s z_TS7569`M9PA}o9m#kyk9sThMchQC7P%?&-`Of`?KXMXE0%=s;0tMgqf4&7mN3Ze< zqw~pnw_1>tU@bIXU`I*Br`&9+Gc_{gGvpAUQie*3tE{8$hD^rAurJT_rnd>`NI9#` z-@3(ngeO8!lKFBzXHvatH*Qj6rof;7^6|DWd}koVO6n779NP=?JSfHN-6i|FE-_Ir z2pK3-6<^stKQdmDZrHff&Ps2xlW3|>Gucrk@*}$qEw^Hm_Q4Z(bf+r)M9n?q%=HnU zB4vUrR$THYZDiz->b*S|StD#-i-{t~0^adF$ZX`jgX5L3%%iE&JIf&47m)i5|G1Ee)&(ds2djQNV^U{KCe0QhlB77KZsN)>dd_M(!9}F?kU_O zDrJXe8;e=3E*!Y*Yt+0=0U8`Xgj7pD?EMgQ7EmVENmb*0^^GlVP?=9oU_5Z^W6Z~H zLh?Ps`RZfzEWM36oWv|TA+q!9oghW|;zeoyUP$WjwBi3Lf8+&!xe4Dwl^SBh zW-k0nk1gs(1KyB}8%>u>Rm0EZQq+gAFUf`tW~*n|7tztv7wIeuhsY~@80t$(*7fVD zrG7>w;lIb8d+c}gGw?-@)w+veUUz^upTycTwRk)Vr>@oDpB@0S#F{UkS~N8_E(QK| zDI>&pGDur+g68UgM#(Gt^z4I!hWDeJ&)(0W0*Ta}XW40vT^08k1bAQ)@MSz+G>NN^ zTDujYbFJCE9%d2=%zqpJN;+`xg`14u2c7T0K2WlNFfm?~{0QvbIEb?Q566#JK6hZT zrb~*lJghFz`F@u#I)5Q!Hr~Ay@jRh)p`4f@$Z8g^`qSNNdp3_}O9`l2c{%C}Z% zef<2k4Pzod`MGW#Ryh=zz{zFVWY(->I7&Aq!&*hW`|TKJBoHLopF{x`5X!SxI!fa~ znr-v_0+RZ7;pN4>H!L39PKTC2dBHu%Ob|`tA~&gj4II;*iN`8uid0^XZu)ShH+D&h zUBaDM=0r@&nq<{T+d)w>rp>$isa5m@;d#V!o#}UWMmyU~ zYrHm`I}RUrx@@&-W0>whF_BX<`ZKv4mgFQH$z>y%W8;EK`f5YCZ4AVq&aw|<`bQsC zyt+22E#X|2{{BZgLH1^Ww<0uWfB)s=>cE}pVAiA|oVtpupm%4kTQb9|)HN zF4|B4c!UbF3prQM-R6{#)^LpfFYNo4;$pJad*s)7cOyG%S>J+6Zgvs=M(IT$q@Hr!y3HuLG1wIf0GpMVm6&6v|4BaZzXgm zG8&G*)Z!TD19{9WeaiiF`gEqD)G3JZp{wDcL_G@NLn)4lLP-IPZ~K0`4Wl6npS+v- zI7!ncrl^TDb(A`eVW_yC&`N>ZW3NNH50lhw0fIalZheZ>0~KH7@e z@P#%lwp+97I3|66DJR)L9$yrs231Y1Ja_nNj(u813prTiQ|f7Xf>)7^6Mki7Dg}Ba z-72#M!jBl)hZZ^MvA&A>L6#QW;`R-NuHnlkK46lv+k>vEdnFl@Y*Pf)s5+W3?ajF^WI((c1LRSfwL2v+cVo#BOKwN$=6( z#@^o9RZgAh*P9!MAI*)7vT-LRJ-U~@PI#3O*-8--*q09FQ2O4G*0LMn@UB7C_x7P< z!9$t{OUFMN9KYecuZ54%w36>%B6C+RmtF{6o6G@Y;}%&G*d@{?Xuf~M0<6*~1P}m} zR#eBIf|+20;oP2+iI;@)@eCJHXuwPCu#yX|9SMG|x@P0@gC-6h>%MelpRT)4YGFm! zm7Q*puEwJOny`6OVMDQ@)ZpX#<@$P-?fli6qDbmy`oyFoT7BW~8F2?{!b*b>#FWFf z{Ic1#+mKjo52>#_hvi>vV96%1(F2m(1FN*&IOPa;*-z&VQdNhCoZo1(?Cpm-6`qYWx)1dy zPht*Re6Jl|~Q0n$V^VWO#Ns31p_%CS<7=9`)Rd}dw7WLmS-wWL;v9-!s8QNMKT-ltCvmKtFzTbzE-~+1Nj{l?Mq8H#1 z2C5tz<(igfLS6)Q=z?&^pU?^Q=l50f*!QN4A=$8bORho-y;v#Y0yICAQ+%`fAkU3# zEUlrM`f#eLW?wC3*L8ilbDqJ#p-CG~dv63;SdxCw>}@IwMfT+&cr}V-TEraV(ygIW zb)spT*dJ)}r$>-+iO%|(Vu3uRTuU@qm?c09V zT$RJusIkczf=y@}m>9?e3Iox22S@j`O(>Q+gi)G!Q`+*_7~d-p^j?t%-Pv)4E(m$* zon^vc>YzVUJ+GT!v%i0&8M%<>9Rju5qOL&EbJvYQZPvT9ShU*n-QI9+XDLlmUmq>Y zBB4-@B1`SZ4|T4UCQXUV?9JVoLHm{211?8^?e9)UCuzRsYy+h=h&@Iw8&M#)uRCo| zyC79275QiTXjwu6L#GuAD)VLfp{H#*R_RzwD0#i|k_roc^0EEmRxB8wD&{rB#94l3xrL1+#!<4Ff1g9W#oO_uUd#Lk0 z?GTC;!GkCWR5sQD93kxiVHSg54USDsFUtsdi<;aIw@1Qe%k zKURtdGg~UYL`>PuOF$Kq-plY9Ahs1PsSe7Hz}AzDIh${r@RTjgc^4w2)goE?q-e`t z6ag&Vajc?4y=H91E6=MGJIXDPuuOy7cUb4HI0f*w=}E9$0#!b7YTbDg!EShDn&%fp z-`wCBo5Lz&jbOPj^Q`5RMTPZ1gP=pWJ{4Cpt0^M_MoQ+O8+H+4|L(40@81KF=C#LP z&NrQa8r9~X6Fx5kyKNBI+79X2BcgP9qx^jR2KO_s^S0QLJh7`^a$OjV-suTRlv9(N zE0EmVSs<5E*=F559y?9{B(iL8*vl1X<%BL9dpdsl^O&12Yu%tE z+T}|&SyO9C_4=h8RF2zAmkY$(CoKCVtR!2qaA!YqWtZ6qKL9B%*)zOVKvFsin8?yU?JCv_iIV~Ll|ScZgz4sdtG$@YDH;fW>D55S&znl{#%Os|6%%HfLuf5`Wa9lt1LEXG^(0ZD;UAqVIau+;eK`edo3O?u9#N zFO+}{-36dx5Tk}UgCnLuEyS|x*13*PQfXm!0-W z8;e$0)e`G2$CXM@Yw#`-OFerMYdr;*AJ6vR^3+&|vzBbK#aV9*EDKtdW5(%!GI$h7 zlY+u(zuLMwLi*1r^e=QY99s~3f}w@Bwl>^U@}P@aDdjN;lfInt=v+SAo$HTKMKEz# z8>hF?3+r!bd2E-J%}KJQ;jv0JG`%2%3~MyVhtoyiF;my&Q`)@ z6TEct3zH10iJ%c<4@Mopu;6`vY9J*bPYD4q*_fN0fD(&~*)UXP!rb%5H*!Dh$n$2f zz5x?6Gjn-kcefot1^vW0-OeLFD$^VFBX?Pcqiz_jSZB|Ptm%`Y)6q``&eaZ!(cf1> zXHFM8TyK9H5@~Jit99!wci8=tvddKwy#C5(7t4(=Mom9m@MVI*r-l|^%Y}gyajV;e zRO*2|eFMYk0yVres>U*b$jDja+!9KYCun~4T~t;-;ip&wlWj5FMbxG>h9>RrTO&D^ zlU-MyoV8IXxiA3SFWp&QL807P^C&LXS%Iy2%`ndSFpc+Mju19^iahn(vP7&;r~=xn z0Z@z%iHuws5`$SiWnr-b;}02NR=U?bvC<>F!F>BwTbot$Wz8sqiAfp?wb0ObfDV%_ z>b#Op)(9J7ly45JbuXg}><@M*cZuufRj(3l@$pq}cea~eHn>hfB#zg?MimhJ08l5q zf1rJ_@V^5RFFH2>XvxT_PHzE zImo9hJ1Y{3M{IF**h=?u!QcXM%~!%JRmi79X>de3B=V3mv3rz9YYJMpCVZTxM?JapW1PtnB%St|@RvX*PfrDxg38|*tet7o5#|YX_g~4)u z%afA>!>Ui$ME&p?AE-L~NTeHt zF}vox-2^ULg*wUeVIR?fzf;@aw;} z6ag1*3q)I6TNlIL&*8&y!2;ayw4)Rb_)i9a+bRN(kA6}hOPAz)vK?2^45EndL1q%| zY@KdV{;@q$)k;R`WztSZ>Bif)d0QmA#J8R`PomA=(zVw*WXNCHRv$bJ*9jC;5F$(2Vu=4!GzB#FCOfg#aa z6IAZ)k$N9aX*u?d=3z~~YHlg*Qtf?l%`x*9&o2?6@S4|nmvABS>R`rIJvB4|@Ruq* z*1||PW{(O{+3dlBqQP2|G#kX%5 z>m}>6Zt!Vn{OH$n(Otm;{yiURu2?K0SJK!MeM41RI{UQp`G{QH$(4`I&4zo*Hg+*G z2uynP(vwsbmg1QuX|jVQD(d&TX$OtoLpPIf|FpN)DM69s2v9$vrf=;5(?m#23>jL5 z_HQa50xysW@DGHaPjT>H!dv6Dd^wxUXKEm?iRR}~s)RHjUrshS;7;A>L960f{^*i& za95LfQrw}tv|+VWxji&JGvP%^G@mf!Tid3xKN>b`a4kS-rGZi2gdd@JBV6)t1!sSD zr@Ov;BvX!a2r7F{-rolzYy?TqGo9-hMub0K?qa zeAH=o!b&9-cPcylNwc`a*F2@E;qfxAt;5yqboEe%f>N&eKTQ#~CNRy@4)CS(CZyXN z6tp7bvDLX1%2kTbOptjNM6s>R_KaD2@!IC`((p|OcN3mdxi~g=Vl%B(lFo>eF)dNs z=v6PNzF3x%H;YA>q(iH^mt_&S1>@E$QPd+#D>zC-}|& zW!S8@(4^jKW?ZVJaNKYM(mP3D&QZ}62(Um&I834?c@T2)pQAmm5GMe}^W+!(W==27 zuuE{wJ1%Qx#AjA&^!vU7Fp>r5QP|Ip2cOxU9rBZokJS&&c3DCvZS=%5t>akI%GWQq zho$|jRKqZ1NMTpc1w{XrmP3yrwl7MPNACPayIn+n1emA*rup7oKvy;MVEj=j+~@)^ zcnI#)*2%>M>%b5m=TX0P^;MO+W-!}}&AdDlZAtiqvfh=YAhr7{VS0%}Jmm6dVM!;~73D zU-Z=90=gb4K6`7B1@IkCR;R&QNF-@HrQ+@ z6^?VA_5Nbj0p0diQihm?FkOanG~);2$w%D0d$XTO9L>D3LSO(VB2{>-DOro}o#`Tk zGkv(&^x>io0!k6T2%p5d%5I0{HH`9Q9NK7Dlx)qrhq-8c(W89t)`4T0^&tW!Vu;GN z%eT9@b)RGU6PU~)OXA4Ri&F%tC|fWe?slrB8J;qpVS@I5DTt-Z)XYdlkUc8A5JU>H9^k!4FXimV|4Axk~d5N`HC0oOv zyhW#(9J)X0F#S3GSEsx$smYu6w=^|sI-9(u@eoJgyyb0w^3&9>A zE``C=oH%|BJRQkGjIzut7BhDv`L!*NiN2}dcXj1M7yNQ1jqr>^tk7)w1rjKn4F38) zc?$pdd-#vQ36TiBRxhpA#yGpplYCF9d_yAGN|b`9#XrTkg+QXX@DuL(FF zWG4H5u>ceGeoH7;vnoX_)Mx#;nM-$PB*5y5{};7xpQ0BN7LO*W22j&23CdHUswTaF zzi;I}FpHk6@P;CI>_uXJxBBTH2El;e4O`c{0Ns0<}Gscmv_}o(rca_M%;+L6)M&OS|BTWyd{;4 zzpuHR77$R01e`MqT`}~tdqp`w$3)K46p+Z?Sb8eW3{GKxX{UpasMT}}^pal<_hFt9 z-*b}wm2Wu*2do)lGTlGT z-GXTZ=lmD^7|lkVq|#FhJG3ck==YV_p9%KOvbmhze_%y>K3n8U>;_WtQL7o4qNdY4 zv#sF(Cm@1TPk{ny38=V8^XvN_WBaC`!b;{>@ zEkd8ZTc5>+V*>^)Q7na^0r#Ha>L3Ajp6jM=|C{5dqrd~cfn0%9;}bDr)CG~ zT-~)w)W~Hcad75byLZ=q$BLri?X~G1s;h^;JM4cGP#qSx}oiKYN|ZIK%LkZ zHRja84+ilR3vJxX*5^(>KYV7%$ zTsUGC0J9xT7`VV6wP9@e$MihA#$#*``Ek}|c;@3L#X5m^>B6@tpm2B_Q+3&drhb}0 zc?jRyJ2gfMyJXV@SvdYK#KF$BuA1uAPUrjXl-Vy^7SLOK6;e1jyUl{$`62DraD@{v zH2agKTW*zm-79~hK>cq!d*MhraPDZHuvW*LdYA#my$>cg5`RbSGpN{eoIzLc+#0e* z*Mcb)d*qnBL9E=KPQLC~iN1rgbIkj9^F5|`yg151d`Info2UsI@rj>&K70Cf%&)Jy zQA{RWfCp+-4=}v+0*;CmE@UYEIa&luX4r{GSoef8-fcU9hWh$Gy5#xNgj0So4P95An z{G)76mE{4=y|8eNo!VY)m0WYDn9Uz2^@GFa9BC4ylA+8_iSERJ+c-G-t~lnxpM|}@ zi%^sVo4_Pfo!l-EIDZY9MB!WOj?ZE=Vx`p#%d#%-8a924v5Ne#{%DBF-$~!ef={k; zCT}k!phHCe-lgOxC==i+hT?}d=m(Sq_Xvi$tuyI%VY0rf*|P1KnFN{qt#odfpKnFl zWR9B#R3^RxqoVj$3q@&<2jo9%9;tV)d9ZYqi^gzYd%_)U?uNSqXG+?z<9fsUzg3W4 zdIER4RNB3EKAC_fhH+OiCApe=60Q=p3qutk^5}JLO-w zlFy9Y4|j-(^AB%^4&=lIN8)1sX)7<@GiqZ80D(tX$wi48JNyRc#{?xQr~F?7N>ZQL z*CsNG9QFq6^k0uK#wZsuPL)?bl-%ELH-NJDL!M|{@R++L)4h?t$AG6EUXW%|aqrMx z%QTO={VtcY+V)UZmaL&|>s3F-GzXb%YF|;3r#nXYWY)!?^e?L#Z>pJQXQB50IhXO| zDCfg-@e{QvwR(od%V2`n>DxA&7N}p07BZD|<@!oJmM__I)a<|HjI6JSl~qZJMeEkP zf3a#-pg^V8zUfOQDKV+|PJe>z;Bukm(JmJo(<7BMUfrX|>KX6zc#)EWv?*V&DI-%E z$16$(@^$41bM5}T?Em+qjvzZgP&clg@8rv}+MR3F(&ox#y0{tEuz>~S(ds}&!iK7@ z9C?vZ%;;y~(@7nxPA)1P1nt)g;P1T~kC?(jnXXB1F40$%r^>*|Y4KW{Cshi2vP?iJ zFlqLc{?L)j;IyDsjhC)Gp?dk%fpC`W!t}dV=@|BiK$W8U|Ef9u$K$R5^`7>z-#_sj zmy=q;Zm~UJY2fOIJHC$lo|?;1%N$FM)0F|4`ZqgIHgg6~Gvhpfojy04{FE5rrZEUQ z@z%%zt7NsJ`@UGdrK31IZgG+5!`N;udO<-bgM5Vf$IWXH9~|v;m(xC9o3>?E-nU;C z3?|B}n*C1IKlr?QJD5LFVWqQc#1{2LfR;%8=4W)HKZkz*BO?Fj&{gV`THc0QWYGk;|%BrpgQk>SMf0R~Dq4O~O%Tl@cu>sjI=l?bg)xdil-k7;S zOeVb6YIeUhYPFF%_6-o_%c-c~mBaxzF5g6*gx21nT=qgJDan4loIbd}DyW5v#o6h7 zEuvF=t%pNw@pxUDMUeP6KZGBu38RI~N(zaz%k40BKKjaPwI02`TOjHwC}!Y&WJVREC74Np_r?wIZdeR@XIjJAc0HK#3@w?lzgj?e5v%v@-u; z)c%H@YJgR24gGd>VYL7pm0H<{l*|AA47s}WShZSYe*Ts~8Znw5O?GrV3)aq-N{h5| zT9_#pO{$fngyd@9E25KSI5-CvqxaaX?)<&uWps`R`J(-UZe4#rV~?x#4y_TF#fpjs zkOdQ7nY=z(y84dUEWDGL4$q<`C;FL{Y2u=$suKE_2gI15#A|_wE`MLKp6={Ji`NjZ zxf$Tn4%YKI{^R!kpPfN{MGuPNDrq7Yt?qg`6xz*lwNja-0_*CMh8~A+SY}wm^cDNC zs;7~wTe6-9nX`Zjs+}Wl)k5vbrL{B$%MmI##{W$~6QH|Q0)N-qmn2HgCTdJ0d8H2b zyXscm_&fQb{a?394g0+^tF7484z9< zti`Jd{x+yhGJQ>K5F81&A5_CyRt`78B(D!C7$^YSeN_tKO5 zg!Kr;ubcmNV$S}r7fc~=sKB2eoyDSF_HHgNWon?2YGH9FqJJ{cR~~J=7g=r~H_7Nc zMgc7lDoECV?>%>U_~1d(l26Ozo*u6DZ##vdTP__-fmD>lCf@AE8(!>M%jj$meHS9gMadMNz#Cs0RXgk%2CJRxG$=;Qn5X67V9W%E-D&`wF6hs^KYgt1e3%iI`HP@l4jacH;u8|F4>q& z;`o;3X?B~tIuefWiqrD0{Kl2`05~=q{HO$(M3-t=DzULUpN?3ySyFoY{a^<1wX)!o zx;eAJ12dP@m15Wv+HMPF0x_ccdO0u21I=c16H>w~ScbMy4<(YDVqeq#aBGBfLK-V#!oh?mea zFz)|lGAJ|Yj4TFZx;pRXr@q{wnIVbWD_tS_1sFf&3qK~UDJzSD+^Y1S7xEvkqShA` zI$t0vUV7Ia<8veC56Mg_5Y{hL%tg`^vPKQO#WUVpPgt(M7Be6gs@^|(0MkJD=^~!r z^6~cW9qe`_?_+!P8@`Odlk+t5yefZ~6y0``M|-At$!xL~Ipy!ujf$0IwG*x00A3)j!&VxD-B}9y z`2^pD5I3ib9lMA^{`2ijp|l63?q3Yi6P%#?cb%wE&(lcph>400ObPHjH(A^G{sOQW zj0H-h3=>*{0W1}njQ8L7PLACZQUu56O4hB?}>OVx&0qnAL=m5A_UrDv4lCM<3;tl|P5(4C{P~<{RwPvY$Zc4hFA}`K{!iW7_sE_}iBc1U{9lG&YDCu!MpWH9;++`wTLWT^z;-0TA zr>$!}@y+ViO%Fz3v;u=(zgl6VBI*^wb3?;t#lL?!(cmR0pinoWfm|212JLTK6BK&J z9HN&q!Jt;y)z?KMb;nqrho$NL@u4_NgFE?l0S(_9wRbW-Rw#hHqDvvOy zQfhP4ub|sC>MEkK*f_~#;zboM;=)b*-4Oj9s$dXc8v>K|*6HZOIU|=_5PDoTG#y^N zW==)S86OqNwtbhxiu0~YrOu}$4*=B_Ee^jUW*AAkV9B-9=cgA%OtKSBf6DbLZxi3; zhFXuGEcgr(i7B9Tzg=z(SS59jXI`Mj{}Wg}Dt`QwF}+J?@yXlYw|`^0s_Gp$?Ha;n zgX#45gb%enn~~^&n-Pe4VjJb6B>Po0%groD^Zn4ebE%u!!+*#E_B3|%81#xMTP&2&i7)HLeOK)B(U((oh9 zx&Mj4I6b+86i#az+Fl!AD)V~o2Kv<>Cd#c)1!XAguxca&(DknX`E9H#ccyUXNox4! zgJnA;_3om7XLzIZC`*%K#38e&f@Uu)HMfsPd@Nl)-F|Onu6O#izz={dHBS4!nYO;( z(TqQ(V4PZ6DaWE&2I)iE(OVCVKvK0+obmG&f;4|hkfu58J)C^_Uo!5h;pB{)|&ICyL!T}$vH(-JsF%Krk4OYh9>tl*H zl)Z$G9&E?GTJ(3h8-hSEuT^e~&dGffaQzDn9Rr(ylK-Dyd}N~Zh;op1N{QoW+=C3B>SH>6qs5br`I>8E{JS_I&ph&N=Kkn!yig_$W^)0H45r6dfH?Sq?sdf zMtDWCbUv;FUfp&gqB4CdH{kA>a77Y+#BtbTQj^=O2viQGBhje{VzA z6;b?`2~iu6J(-DhITjZ93f+b-Guv&IVUE!IMHwkqa$1`X69{%aXK8+{9N2st@$#RG z{d@T`_rR)s40|xn_GBrd*3Skhc9(O1MN@iQ{%x%*n)${fzSR-NiJa=W4xG((j;g$) z&zKsU6o$?OhBaey0qT6#Fq?OCw6l2mKy031iNfsxWfX>~X%%AVgQ+Rsz{{u8>=80i z$IBv9>|?SIjvmTd2YFYBES0cCU>%Op;LHLj4GZBRg5Z&z8W?eS(H?*d;{F2|_# znphKY4XZT(5Cr%rd>d};cG+#P-C5*&{Q46ijig_J0Tb;n4gIl~*c`?j2^9bKPO%AI zX8_!^im9;X*&Cn2Mt9hw@XDD)&dLpor<0S#anc{r`hPY#)I8l?-E5RnAkrDz&MCjw zTxB6Du$zyAhW?rG^^07wFvd;~(~4xMf})~ca_>VI0|NtGi1^EgUQrkygZRxLLEy5I zka(Em*%PwUm)7pWt^;Wf$FUd2w(Tx8H&u7Z9}QTyj4%36ay}3y51YYQH?26`ta!2*KE873?MX7!{e${`S&$Cy?ABpK<*6+w|Pjz;BJ#n5x%2=uq z4X0taK^(D9?k_~!yr;8&ra-uNauAY-NUQg-hAQrPGA|!X{e-^!gn_L?)6>_tmA+wo z!#XP`r(3)V2B(Q=9vdBvT+T7%GU?L<7z~YvTf2L9FwJUo2XdF^8kLK&cMhR@%>i#4 zL~qP6vBSSjT>f@EhL7ts)h>+xt^bSMP5UvBuY7{L(8nU-G!95-DUG{MTTwH6jE66z zG-wZtUX%*Ce7^d}h5LO5F>e6*9D+jQ^95>Rup2>1+DDyx!)`otu-T|}+e{KmUdYs_ z#zRh2PWn1A!MgU?BM#xwn-Tjl{sTlhOi=`s0Tgb<#l_)#F3t}GAaf$yKSy-9Om8&u zJpan=u)7w{VX7e~FF$|zwLCiJ&Ee7EJKXDKG+!!L)tML4GjK~S-WDJ7MOinAE&rNq z_PE#5l$3~dYhmiy_`HKl$rHe#7SiDJiiyqk*T(*wF65?84y)09kEk!IJ(vE~l3?Hb z1P&eZcpy2-mc(abj+R(8+MJXgY_`NEJXPBl=d%K43 zPEV>Zmvx47(^I1mDVQS}H5}LLVQ|u1zW=ctU$C6K@nGJwDBR#g-`4Z&0fCZg_&~2+wGFZ z1k9z%l5*X4J&BiMdLQk&CNXzmx7A@qt?}f6Xr0O(uEM8`$T>y4Voew02OeXZTf5Z% zdgR>R5rPv}Oxn+cT5oVjZ5*^gC+1S`KVGV9 zQ42E;-}@kxoyK_bmkDG9m$(rKGQDv;EP&WJcs5mnsqS!7Dfqv5FK~I@yhgh>{=#6) z_sqZT@K5oT82Q4p)WZfzHIIfbYuCC$zP>(!MFCZ%ucYgTVVNs;jeChwVs6n~WjYCH z2C@?~yG)L$Qnz{R?DoAAwHGAX?YDf2FtBgFp~RVd2nagTa&r8eo14+sV<)o$m#aZL z)9U%|u(q_k{4GFfPJd`-?%X6NmB^p=#jF6o>pfwB(`Id*Ql*_uPBz?dP;mCwDPlO| zW14MRfWSD~CAt}l8W&l*t{DG^KwbGq`on)eeZdLf^wC1oS+1k}vIw>gH|Cxp1vkyN zz#5*p0P&`(>Fl+X74oaL>x~>O@Mr}Acv{7fQPi5?ie|vM{}KclNR#M_O@8kXcR|`6%q9sak8SxYN9w??kumoB8qP zr&-xJWu;ssZgDRT&C6(K`?}(S>;HcC2p$Q8JN2%TjpytQwc;7sy?r&lRV!7&vlv|B z=Zlw*Kb?)-vwhdTJu$SVV%FKx-D+=~$9N1sRU@J&LUy|s@qJLzRV!Yiz$=dP!zQpT zss%bQPYKOH2pk^aswyupukbP}D=SZ{&b=c7)4Y9U#dkM1=?+k|gKHgkhE7aCTX9QQ?; zQ@tP@sG%0Kvo}xS5YBj93+=KzF%m4;mLk5 zV+#cWVE3Le$46DJi5=}WotEiIC?KjgruUjsuQ%kYsM7g5??YT}kh&ZMx!=|67=se` z=QtlI);oZ``O2@jQ8<37RDBf44ETfiUFRWM4p7Hgdx%c7AgdP^4Jyw!d!e!Yfk@@Y zV+1CBv!`6Tudh54CD?<)9A@>H*4tMeL4tcR#v$&{HyXxfmQyI&+tQePzg*+H_g>>L zq&oQZWW|fpoLPo zIkz0IZtUmim9&tZ9)1|&B+Pi#(ovvsfLIZEt-Z|0cO$CU>b?AIK5fu!Av29`QL!A+ z#E6;n+CcgFa+eF#6_xT|?jwuh`~Z`f-ix+YZPDDC zXKA->50||wT~iSRZ5@WGc3FPuT{u^RfIqi zXwUBchEMJPYwya#pFmx!%RP%b|KKsZzAcFp+equ+;7|y6esj;dmI>ht z@9s7!?PEb0Gu|E?9E`=`9_+$S(8S zaAqnRa4TUj?KVgHj2nw{aC-E(1#F7864kW>HQ%q3&qXYp1qH{&i%A_NDzeFn`v>I> zPeK16j+CV-ZGtg6c~jKvXaH3g&dPxy6^7Rdbnly=9=<=iR6U`N^9=|Nm%&06L+gw;nCcS<7v{cf@@FsWmS^M1Y2(?t%fTeV=y`YhvJn$0 z5ldv@lag)2lk?p(9qPivzRPaClky64RWaNhHQF~`m5lCcf!(=9PUds##Q#f>aSjW+ z%rks{0TbGVp*+cw9L1I@A1PCp?s1X_I=g=>_q<+gxJg?*PAC}TVv*|a-rs0w-dB1u z$F)(NhIRsyw6DBJi2xU>96NSw)caApAq18pmp(n}8yYIVV;gih#S@94AA+9hO`_;) z@qW6wbM-9Z*NiauN7IWzoYxpihd+|LamUQ6Hgwv!nKh(mq}f|7EPCga&ecyxINE8r zmTZ1wVp>jr*VbotYC>ymkA9aDZ#~b*`H?(tquItP$0F4>DJ-9&C+%Qc9(G8t-aON* zsbNW(Xx|ymp5r=;XYZ?a^`YC*_YT>}M1&C70Qz4-c08G5x#zUywQK&t!OXR`WqqG( zZ@@XeO!JgnnR^f zpoH@dz$p)cvAzKP9b5Ho-iX_4>+QG@AWt?ICV!aENScR-M@&;nMbIlLPhfU#A{6h!ain zU4(29-K_Z)dlU0jnVRr6+6%30jqGEn0@ZL9vB6v;r|U|Ym(bnYG}b0(4v=$re|X~$ zayHXsj1t*eOo?o_12|at^tT3d3M`!KFd|j=n94IGEnoj|vdrY0Cx%@Dm95y=lBjwU z$&45t9u{#Ns4M>jUBtlG-2L((*g>-(LfYQq;JLRipPbG<&M5`gePr3`#i0-_c$@n< zZYJJb$5fRsb*ss{W!*VL-yU3bNoLDBx_26MFE!hn5|091~{TD8&mK8(WE8Y4qi%;s1%4UzWT&~^!x--VJqbO=gTtIPomc^;f zrT>bS)atCiMH8GVK)C~izg|(HV^#Lp(@AHFh*|cr1cFQhv_~K#q;Hr*??t=i#c9+n z{C2r-??sYGPA%4UaGt&=^LFmz!SQhM%cqi!vfkJfWot%nhfzJ=lLfFww7zV6PwEEw zoVY?Y12u`T4|jpWeMxdW{N3Apf7H$O(*)NwHmU-Emd82=z6J-UE;0vG-aSiCml$NP zVaeBgs@Ul(_AD(;%s&3_jmgQQwfV#?tbIeiFDj+^bSDZ4gqMV`w}{uG|K5mD-PJ^% zsKpI7=J~frEjj$9k)H`X{f!vEB>1ZrtY3kJ>M2wDduTis8ym|lmM#(6*36;JV4!&` zN#3L8@BHB{lBRCM=^}uOMY5N`73fdBf1O;(7%XCliib{&4Gm zo5221bN-!Jh-Bb82(f1??67$ukiO%ZXa)MitxY1|1D=a&d0EXN?;DDXo5-Wf?6JL{ z{jjl+X(6ML2yGLWOBOF4=o{_iGqO{`I;C%2TM>5fD?n=2@lK3wMHMKo|IH8!=lvv7 z@|Na8Ruqn8M4)evy#DC`4or39@#NI|G;3a-1lYeae17uVZ&Z_HRYpv-<`=gbBGv9E7>GR<|)8( z04*k)q;2@!(RE( z6$WV^zxoAnYXcTRH{>$c-{juu7utH<7;Wr3Q>wbt_=40z7Zu=gxlu-e+`|#geg`MYsB4 zx*kl3$1|>@e}PIMh)gEl!Bp$%GdW+Mcv=K{;PT%Kv=jl{CDoKe%%a>2q{1N0LUVez z*(R(U?U2jDm~%!9*Pl6Kej;<91j(;&LqELiGv~`Xig0cP_gF+-uKQ*UOCr~K*PHJ} z;AK7JsAlFrfn6czN@$W;sRJaYaeyZ9dm8zo+(qkip9&gdhowlY!bErbo|~P_!v5ZX z?N7hfI?lNt5oVLgHT4!>;sx1Lou*XWb3yAh=?tNt10{APn>eUYQJUguW|zI7tv9J@ zTM2X1;gv|j*T*$~;34Xn2K1adXd9g-7OBu1uHnfooO?tobEbm{og{(S z&l9K-q0s5eAQKq7-rFqD!lFw~pQc*K_`$<~sZoN}OJ%_-*4EaM=t09-v#ssSL3X5T zr7f44o1Cjva5m%~mZ=nUQD0KZ3Hu~hZXFp!tg32uw@{otJKuU9)&+vl3dZC&RIKgN zj}AdR1Ntow(UB>MixuLj@w;?5LgwGq=2LM8J=V#f@+4y)C=Nh$>9<`d85QpO4 zVI`opNsD}fG0Bi!KeA@f@2&tUw$bFfXSEXA%&Y-lwtd2 zf+2FTw6^x!M9*9Zrz6d)pQ$7$phUFthx_`xJ_hlrUb=LNXVH!JxzhQ;1~0zIe^dmY za_R^mT7rcGK5u3(W$k@5>LL7R=p91={XteLp`}tMoel7E2+o#URDwaIf193OlzcYr zSw@Cru#jQ!{MPFRIF@6XMO|S9Nv9H7Xc|%jnsBi!GWFcpI{IJkPQf#KzE#Aczou?F z`RF==7|09riMQS}p`t?Rx#*l6`FO2}zX4mtU27#IlLiCs7O~_D#j6!8+P#<>QpKX8 z{AMa_=5QMg8Tr|;LO#1oMz-!1-JA%Qm$4jiy4uhBS&eGM-&AWO*jxIWSLvx%G|p{X zya%O>LZNu?gx#^LontbmMYreQr4;KZl}B-L@#l}vn05<%8#Gmwn>NadY|EnSQa(SQ zGhIwS-4>Q0G43tcaYrzvRLxjry?Ld$%L=aYlYucr!s-l#xQnJCYjWv*JcW($U!CI@ohd(borJ6_ zAoB!Os2PaCnuRo-yUInpOTW9gCQe;f@xqbis6(>Pjn7+|n-697Mjo^FCoGJ!*iC)8 zY-5OlgzWs+_f0t-ZgOs|j&>ao40mvI^yirjt&@uatsoMmIaIxKl^l=KerCMYb#+XR zj*g2;W6+&fHh0X=hShNtnzD>}DehHppE*gIt@2cGD1SOD*9XuRoNk7zW{!l44siuq zVA;?lHpA3+BRW3dkma?Klf-sORXmBuFYRQJ7VXfq@rF_=W(vP5Yrhd=2mevv2t&4> z>Z%db(L$_LF<=YNxr1llJU8`k10WFaKIRZ5^Cm5=TaxZhBZX93rxlFN&CI-4wnKkAABY{`d?( zH`d?ZKSa*udQTyoIP^(OOuQA9`$#F1e)l2YIQ1vTJv=O7Us}6y1J?=aM4Hl+}RUcM4qZ}nlSwoqdg$QpB@B?(rn>-ZBrZXoSSgKNnz%^ zUPFD6TGZmzLUN_*Q1j_b-XY~^4w`L4#67Yc9^y`~40p?NxbPfbE;22AO@FA*Q|qeQ zY7q7tg+8f*6uFNytF12kQS#4l81}K4fp^=a*gK3Tu{RpoYidouN`jlaeq}{Qp|39pD{v%)Bbo zE#^%n@7P*JJNb#_pS~Z4FA5i91h>Fy_KU-nlmY7LQUWaFew0kAS}PNf{hY zBUvJ@V;xEv7lqfe2TtHFe}{0Q#W%%kgfc)cqH-GjpbbecdJ$@Y(sbftLs=10-a3?! za4Co6^M`QmOkxj+mCsQLiBQZFcg3!WT61S>>z?_7s0hVn7YBuJNc?6+T*^pDdz*FH z6Vz!G*skK&=)C!t^YZhrY%aw|J5BqpD78y!+GIwHS;?{Oj*VIl=#;AiJm{c0wUGhK z$|<+eiX(g<>bd3BHptS^q0y#n&`I?j!A|SeXNXH@La@DEWU`rIOU#XJ%S5&|(zmqA z8^#gxVUk5@qy>v1XkN`78AsXB0eA?-RKU$)UxwKXe%avv{9(cbm^|&&#vGeZ*Q~zSwQMpt>Qf3ez9+4hT$M$Z0a*L>HW z9A~?~SY@Y9Z*HB9%D3b0C$WL-YF2RN?Oq+KGQ^dje_S| zqIaj#qe2Tz2QtZ@n`l~c;2q_w3>2&MJ<7-%;@r}@nN+!| z;Z#8x1cQ+fw2*0%4pN!%ySoo>#iM&xN`%KnH=&`3lOxz6w2s!T$kn4zpk1hOX6JY4 z{QG!NMozg*=>Y6=@7}#rir1nl8v!Du?@dMmEgXnbxG`uEue@lh~+qwW7b8tkD1TAlvcb+DMY~TLoQ;g3us<;WRdTW~Y$T!QpVwJyuvJSFy&EiY4w6g_Sr>61 z_L!S+9>SGS;(oE6cw!V~gB}E|_hNvJV2VCP6wa>lW33sgD7lge&gK59#9+;K_CdY^ zU8ldt+qO?>w96*X=H2k%y%`p=g1%;*IwJM&9Gp7{d&p5rn|=uh>=S*5WKhVNs8KJI z@p5zCfbJ;|#aUKAdutlDMD;0ZcfYczz$>sw3id$tvC_v#>CJZAw|~{5l;mIouHYF* zs>f@L0+0qcHMhxatE`D$paG1%WBbzA?Vqmf8)@H2gkLq_L_}iZ!XeWAeV0VV$Kn&E`8ZwvW1}X5c#pm!na3Rl$>F&PuV&4_csPKS-DnE&%hDWY* z+dA*S$H()Eu{qT|SIasO`A|S@W(rEf6%O1t!yc+vQ_Jvw3+Mj*Fy^Vo{=-y{E(!)} zcv2YAi7R0*%dTl^Vu3W{OKxPdBL%mGS|`9UWfM(5!rz2peRHZWuOUVJXMhrBIno}~ zu{$GnC1@8*zhY%Y$%P+iln52`Nvi@7z_YQFQWeW}cloF}BUJ2G{e3Uf8bkl!V;p5q z&C}@tJ*ss~ASv))Xg@9@B7(9zE)7-b4{+RMw9@h+V~|#!3mqx__}X~QvOD*K(wzTY zHjtc(6oA0)^kXVL*vI%!dP z{#Y+#{b_Pufy2NnGgFaI#~<<0;R}G2O__G)Ju_-7_>`1GVtb@!a}^^mo*0DVNi6xA8YbdEj$P7@toic(&$_NNN1ktB*%?xXAC4YJ&h;njS~go^TT0+l*G z?qSMT!#mIOp`~l^*mij)s0MpxB&DSIgC{`oj`zqev`7irP)Xsc0sW<<(G!qDs4x3R z1g^d?1^9_+qLjEGix9*3Pw-%g54cq=l(LMq$366ICF#C6-L(oz&0FXe<_^8?HuJnC zW%}V)Yf%>|x;AzbVNcal-m9anMLrr=DE76`i>FdC2^p;(ZDnRXEKaN>B1hJ7AN6w9 nS~d08|9`4q0`q?+-~uK?)Yx+C;%(Ut@J~%eOF2d9{H^~1Un;$^ literal 0 HcmV?d00001 diff --git a/docs/core_docs/static/img/tool_calling_concept.png b/docs/core_docs/static/img/tool_calling_concept.png new file mode 100644 index 0000000000000000000000000000000000000000..7abdee69226e2bd72c3f40dc6a1928720546bbd4 GIT binary patch literal 123308 zcmeEtbx_;g_a;ziff`z%l+r@*0!4~j@dOJHT#FYd?heICkqEBEy-+N;yA&(Mp;&`U zaofB*v$M1N^4;D4_c!yIVJ2km=iYOVJkN7ZBEZUWj|ra>VqsxD2FgpTVPWBBVqsxl z;@`)7a>=~GiTQ)=tR^RkRW|$rjfM3J3n={_;$gVmWLc$e=^5%fVmGcLZv8WEjm~{w zp2*hW-lI_CaS{{ItKb*}5{Jz<_YKGo@PXlVjwDHIET=?k2HaSlxkmA^@c^Ffbi77s z*?9iQ_VlIR$Wim2mCKvY=9hnZ4@LWy5!1&r^tW|CujlL;mBz_@;;;x}?|fS6h?Arb<8kE!?s_B$z!C?aC2#`T`mF>e1b&#v*vbicMG`(>3rJ{vS4nIW*6m zC&qmE|J|WSxNs(|$urIeEy2uW4o5@$w3Cx6N=nPwwAajxjEwt5T1|u<9UXGj;{1$( z`BDKmn`rQg*d6!sEf{yqxleNce2qQoMN9$P1l& z)=ts9k2qZm1?;Bc{&#K)YH#(FO7?o-yzPFBIlA_le+ z{Ll&emo2xKa;^+UH-`zX;-?qAa$-SKjqC5kPbk00-kYtppW=)&v`lrpuUVq)2-#Tc z95oiCWQ zrlh6S!afP2XvDJlS>jnAw&vFpi!nY|@R0f)xu})r_5P{g`4OZF~9kOt~of@}IR10-yR|6>FAy z^-r}wjuFM&rs+tkCE$Ox&p+l0%$HfJ_1mi5r@cL(t>23>x-#*(7|#&fTxmQ?+Y2u7 z*)W6^J*TEtk2o5+y&6$D_i&$c?1>?l+m#S@+XjDR{8-B5eZ8}I!|r#H?|(sH?|;2z z)Wdnz&MA^>bFm*L?xQVusXh}yO~Zq^sGdjfRHX07rX$eSe^xi`2z~4oTidwCel{+C zJ)SL;2`x`^9u^GS`Tl8^&vvekxE!;5j7Q1$5iz~B*qofu&`>Z(0@vHOnWhfn=;Qs4 zr#8!|)KjLWXVHI$hK4>|UmS~ntnFB!V1(@w1}znwtaL}IU{dP=)zPCn0%gC1D7JF{ za<9cG=D4Bdvf&JAbdJmxD$9vqIEZ%tdlE-nr)P)4`zKla^>}DRd;Lew@%ng*c2aPz zo4fsVQ>*3T?K!~oGu#ha;^=F?&BA?j;l2tbVtCD?JuczZCQkLOJiT@s^~hC0O43_bC|Su+X}5Qi(QR9S&qTw7|+> zqwjU*RcaE_-=LAdx6bG!uw8vbpBg^Cqr{1@1o~kgQ}`cHoF!$PC*>i8ue`gKtib z3EeQ?yu3VzgD#5G6N*vB_Alid92l?O*!{kO{CAxb8Nu&8ONFNOZHexAKJ`1|_djAG zJu7axV7odtx^=bp2J-h2I$v|fLHsfQ%VzmX-vkH*Y&8CkWJ~|s_~6NlR`Ub{x6V@r zym#E(L18bw3KD#rsBcDTuZjH*DEw9Zk%)v|zSE0Z|4V*-&o!jVxgl!#Kn_zJ&b%2L z>Hhabgb6eOy1>(x+v~Fg|8ojo`=R7_XN$MjMmy>k!16sz3Q&<(@ZC10?HIr$KKk8@ zU#2n3@QkaR6Db!=JxVK{6;jS((r3xDdj|2fu_c1VFl)97L>p=uUrh%sWVBc^bj zJ~@~k@q5PUv-~h-J$CLyfF5%{siH)NPwu)Os=z&;4dF9X#wBWDR!!0`=Wze#n$}|p zyAiEqFJ$|7H<#155JWd}N3OW4_q9|Xy@agAW(}sazT+JbLAk7-m z_b$7r4U@~0o#aTT@>#ov$FO+VX!Aes?wogD{Gyb?_rcco5zNbLdcZJm#p|I=;`S+Z z%K>%MN;nq$)4A6!2B&%LiU}JE{>w_ON3pG?wF^bpD%a%{O@CxO*4T_9Frd~zs~GO& zNR_3RD5Q&W%I*Ie5fMQTySzDVi5cNW5>esg5sdseQ%+|&3~$4A(sSQHLg5vgAijuLt#fi zEd=#7JZ;tEALwy_aTO!D7G1R&ZF719Cc~#Uj6`eQMKhvgC4}xp^xC4QJ;�+;7jG z{xbV=fY(lY*a#psX};}qxgOL%wS(SOKR zgc2rVI{lg(h3|+MO!n1w!hjEj8+;G!JA50%dK^qhGXsPwGC235@7h-YZhR^xIAG#9 zhW~+5rT)TLMtOTXcOolog^ys=H@V{B%rbqk0e7SoiZf^Sw z>9Sme`wL{e-Fl7!Su&$H@;hwq25fhWQz7#bK_N4!(DI1X!RJL32B&&FX(YTOS$b#^ zyipYB+oMaF+2IpPd$M^fl$W1h;lC)hJ>5jz;J(-1Eb8s(;6&o=>}C4yLNPfUsuM!qnwaAZCm6p^WB@`rLPOmOW##u+v$4p z%adK1YPC_^iS`N7N4mSK!Z(JQ{B6mvSveTQ6Us$!bM_06p1ge{T z*Lh>q{31WYPw1zS2lmhM77SA^mleygFJ*K(q&QxSJp|T2jK_9PNXr~oJ5lS4-X%wo!fx^`? z@Ma$tJk=+Qk`b+igB7Jk6=-1$B9HJUNZL0Ff&bPum!@BR1B;=g$2wB@mW^)LjJ`Rp z$SScEIuJ@BmFYa@6bRd8l+ z?qpQbu~N%W(!A`}*+~6jQ`Yo^!pMstvP;eWZw$%i^jXfEc`oM6_-N6QJK1Ttg#QSf z5otD8HZNJNHaWH*b2YTMl;S?0+y2qmN?xITUOO!Zq3;D$Q=v z!r~2!zbtJE!*mjkHSpU}&K&{2KZyT#fGJMkSog8}qA!2*#bmk3*3VL1K(lkt6LRI1 zf%q&4_eVOd6ZRE**3z*me!C5riV9XG-6ly{vgRR~thS3>&B)<^+3LWMr5&@#9TY zFb*HxDHXl?=HMzUIPsHRZ{I%Sf(!$-1A0!k-<$O6Azare!3 zqw`vwR1gOi0ZG7Pf2&^g<+S(ovl7c4HbhSYz51D%$-#%PQ?D{6dlys9@ik- z6y4yz?|ao_DHi6vHy{3ae7pd{YE`A1Oa~inSX)`qb{zvUlSScQ;v%A}P@S-iwgeaW zR)|w2uz6OiMc=EPAc`8-C3kS}nXg|0C~&2)1&BBnjNN(A8Yahbf52vu>FfuViAa#<2)RfK00zf`|W>2adTNS`^~pUSvurJ zB}(LEclt9%4$7Rjvq=VO*0Mc|V$IC?C7d#3O<2V5#H3{mS5SV|=T5A4&9HLqtDr2~ zSN2e=$C~Zvx=3&_A{8)*ThS}iq>Q=Q(m{jY3gt+NXUZmCZFgZ%)F3F2=!?e@i{Ftvs)O=oKoJ)m)ePh~$MxbyN}-mTo!@=I6=d)p)}O2P+%P7R&Gw^_o|MHKx00599O(}e z*@^_mkHj3Js@U)*=mOV1+?Ae_NdBl00uRyA<$mP1H)s{p)8;1Tr%wvOuqs&ZbF?oE zcLGyXM&rr%FEnchHjcELeNZQi7()f=exozItvT8y343XPOeW%)OD z1%6zs-NzSr2+|L*04Ek^rYx^Cuv@iAX9cL{A!j2If6Gp3-6~9hcm@Dz!Sc=<3Ahv& z{)c+fM2isK<)+2B7i;R-qpeddhvYtoE3`ek1`S}!7t)@_66%U{)@G2>d0Uf>m6%hZ zOaM9g8Fr`iN2>E){BBt}RR>d#n%KGA%nO+~>hF5YNf%wpwd#CX&p+8~P?5__FE|^MZ0cOgi)DXS zl@O)(nY&XL@Wr{7%%8VjZlk@%zukBD5BqXD_c?G>Rf#*o2bO?epjjNEnVoeA6v|w6 zr&2!>rKK9lNd%k&aEc(Tuc>9ib7J*n56mFt_Al%|Tzt#|j|Djio8$-O+jlkIV4wwY zV+dmIQsoZ<2O^0k^yJ&gxmFpww^pAu^Qp-m)x#T&>{=5)R`)m>vhTil__NsINTe^I zkHVsiDJrK zeg0|MqdawvupoqOw9t5SD^5u50xeIFd&9v36o`P1i7_{jq(}dbh^?eir7JX}MkNez zG(58R?-6$BPO>aA&h``XyU|gVpb;Ml(vtj_?(6`?uM1YKrOwt#!mDQw{t&>6mi#Sx z(fsS-d3+pibi-TEqu5|a|H)TilgQe669>Cv{sVEZx89G?A|TSEsJsYXX|w39G0^%c z**MSlv$;~E_6_@{$5e|w`+t}#r%QI3m`L8!`>G6Ckbd!^8CO?N4l3eLVagmtn^#yYH3B|3X2&lWd2^55j)LpgrSu;$-v^t%+GdvSB2T|qJbYgR_3G!N zlO1yf%Lv7>?xV%#yaSkNlR9LZIA6N|9<`qfJsGr1t@2_3h2YQInZS4o<*C`e{&g1~ zWw4O!ZkXG%`=O8)EdSV&V-72^@x!X3z5b9w9P!cW6HiJk)8?Wf%0*u*T07MB?ORc! zu0Ll2gt;)Pr0nm-ZQjm(kuM|mT zc41pVIXRtk^yM3>qONt$*1r;I8T#-UUVc?~9J3;Jy7ZEW66nzSAF}|sJIt6DbD zIn-Z4YL;9{rHpLyn97)4w(LLx5zqMDmCO`sv{ji zHAQ_N8mD{NJs0#Xs~gz?x`%SD5C6CR-aoSXPu$din{wNsr|Wp0t>#l6++cl$zp(;?vR==H&CTo2p4(H1|>w^V10ezL+YTHO~Sd zsd>MgGNbkUzdaD2(~$AB!v8EKm26Ccrg!QACMh?RvMm(`B&T&XUtblPZ0IxEm3peudu{C*g?$eNbwaoA`mhY&8O+S=L*bYj*Y>vvQ?a%K*ky#3zK?7gV0 zcr1-X3M>C4;F;FUg+u3o&-)3PY2K5ao5}sG7aq=R~8uw1f7k;PSwktZ$ zydjc$1Jn92gtV9RGn=fq>E%2>#{l*#;(eTdC#2|i*i4z$m$V(j4R&oE;%6KuiZu{1 zc3K(p5GLrUL!ooLudLt?c-M@TPums_(t2>ov0 ze9k2|ub%hNEEwi}!dmp#)X|C-4&}~xutNLA2L_lQ^daYQc7;BA=CIDPqV3o>@V4;= z+5413_$Tc-gE;Mf5&>H)7?*G-w#$?}u2M3(SL10vEJCQgpA6IQGE&m8X2!e#_SKE8pwewT#U1il99zHpm(RK0Bt7)pz+#CO)IyQ(H5bndCuf zQ1b}ovp@)-xA;4d=dX_g0`=NVvYzaS2k16gz(r&wftGQMnYJV#nAq4ggaZjis5UZh zG8CrKaamgFl1h<2BJMhb;zLvK1>MIP()Es|u&M|G+n1Bnx{=hCP z*p|>usEpEJH6SvPs~>)!uJO+VNj!xwE+4i9kLAw?$yIRJ-j|!{jre4Q)*i7=g&B$R zI_VaPJWZ#AyN9Q*V-%BM3n(T)Za8iHclK2Ck1g#Pu%)uSvlbp8A-AuG%p^xkc~FZ z)$D4+#<(MLq(}zHiyLy_FFJA>NqD|8v8_$-F}mWV@;zTJ`S4UqheDm(73?;101neA zf3eX2-d)!;|B0ykO5&5;Y+FGT^sH~FGmRsUP06K!$p5Frw{Re}UAOh@#at!9WL}Ii%((1lYqs!{-%GYM=gbn6i5yH751Y4`Dzo)0 zT89ykd|l7gRk)G}`U{P&!O#h!;Ts(J+-*+54AkAx78;XQ{piZp&^yCSdbrdpKTX-K ztJVQB`p~d zFb057CI>kKc~Fx(+l20R??jqhr;VC9DYKj_t+H%&>n_q}%lPxArl{90vM$yT={591D^_MK!94<2X`qDFa{5Eoj(3PLn4^#<}!5l~+$Y*(T$ zn#D|+GikU98QjrRhTonqE_e|w`q+vW?PxO~;G(LOUzt}tLV}{#3%4tBWkpBL#z7b9 zm~46LZ6rPkFZHN>%d*xv^I^jSX2^^&8)(6xt)5iP@kO!27CGYKmVjo~vsbdUf?;*z z6Vi!}R@|^Zw5--sDcrZF7ny)jv-Efo$v*3(Ag3cD15ead3Gh2WW^Ol=$it&gknNzI zhcT9OEiTW%OzP>HTDbX!krcM;_ATlNdWYS<@#0>Pg1Lj-9kx0WDX=G%V-Et7P|urv zPBndeFUerc(nM$V&r`LT`RR zuD;#$4Lm10$^-`&c|^yiIu-vurkyHz=3#}DI89IQDBEYE`vDZ(>B2BkpiI&b3G&l% zT?pEOe5mf8QMze2z5uaWnakYc8I*DX0BI`!e6T>qAWr3?%$9J-)sew~3S03DM?BF6(d3xMYu3_HL#&`7bB!35ddVqY~>>d$7HHK z(4_%30TPo-e@9=^_VAw!zV5ReD)I=7duA=o4GWK(T$Yx_GBhU4w5`k2`or!d1>;f{ z>SK<7L0{d+90xIJY=nmCZ)PW+^~wm64VcTb0@vbca^W`dW%O|>FgIQ8R(AjL32OlX z!3fQKQ>%V47Kb4nrsegAS|2o6+HZ6p82NdKH=pzxDo#OzNh-2|uZ5$TNI-sW1S}|Q zEnJRvIm4o*ZL8Fk=?sSpM@(y+1Qg>MydnUcgo96fX1<%RC8F&T(%IDe%UHOCqo*zn za5Lk}E(=^z3K(ewvkdxFP88x)E3BmBZQuAS$S6*GBQw^flXscm(N6S!PGXN`8Ca~3 z-YEvRQgdj3{yqhzn>YZz>i42#m7x?pwJz~$`@*a)LDS*CBSL-$ZGA%=C`Ip~De=i0 zcargk^+1s|L7t^e-BDZ5!OlI6OnnJ4ioz^5Q2IM2;X8HR*aqoTrV2wB-2PhpW}ss8OXd}N>sZpm8ya(&n)e#_~pJR0?h5XAKAvIlEN zAk|S)Q=)Igjl8j}krT)OWX~-;l>U>o>pt4=@(#~jmZ=n4S-b?)Elt6iw<{UZTTRSK zfpv=Lk?Wn@LiSyyC4iBvpZgW@*v6k2HrxPG-*H91rEVL@>9z1xlK7JD{!Y`#kDe5) z@OLjgpWzPC+Z5DFP*C{k^uuhZ$_J+g{DMc%OL3EOqLPw?c7IhpTudv%UL!d~fg9qk zz}CIFIePbZq#@x@E?3~;>8nT3v(XA+VtLtcd24yOGil>#*G0&yz8e;zNRJ}-cNz@2 zD}mHaaMNjx1&EJ!*6;3vITS>Kfxu#8{Vfmw1F9i7d|qE-r ztUqv}K)GTEe@)fZEllB6V|0u>d{h39PTL$6=iVi0U@_FJ%F^<9837lhgdYMf1Vrfb zyd;0%Gwr}Y^%gm5m+8RJ`DCBef7v$V(yEw`Tup;hFM zqhfwdC@OvaB8ZuHMe})2oFgYOyM$~oF?5N@@ilRN>#5>Jcf9t1V6FnFAqxWN)4Fe% zN>}1S#7pX-iof`DP(JyDG^xf$tBRe43CYPZy)q`AT(xOp;r8rkvPcOnA$R-}`tG`yp2Bw&H-MNqJFYV%-XRTCay zUDGhL#>1o6F-5!ZWp9KRlbz!R?&E*OH(Wy3rS(SIC1=_+BsRX17VEd>E}H+?kq}qU z%$ufEDVf}#2O0R*rTuZbT8ZEs=9ByhCk;1!$9HTZhFqn)IMI>&dKWh)+g$FzY}?M- z?b)Q)JU$tclf}^Kr*-2_Wwo{@K140)w=KQeSQ*RCf`~!St*uzVrTb9kbc6;US|ELo zK4s`D6k^=WJ5op2ml_c9G;Iv*t^q97Y~Jd-UNrvvUL+TpWn z966{M{oWzQFSNki$CP>M!y94|67Od7cs({=N`mAQ$LmQp?b^eUllLD>8^a~V1$Z}^ z3YmOw=>87=vY6Y<h*0go&V@czEARYDKagWoLbQ{jd8e>hKgrhs zaifBZ6(PoJ>6zjA%9;r$Op zuUO3jvtEGvlfRw-YGk%8Cf|Q=0nCc5y=r{-8foFusOr@Ovy-DOFY}w)v?@}R-S18z zpL>O4ZGz!)!!8mL0j%q9oo*D~I-c9;NH+K0HhX&9VOs$X{1-p5U8mR+=T4Z59;@IX z3P^~-?jlvxizznk)Xyb2s5j4E=8=f<1}8g);*wRk`d05u*ly5jFLmifVP5_{nl7Ir&%jmf>{1=R!I$_Mi6H1r3`YWyO zvvP4^Hq`E2v)lKOx^bCLHIb^s;CvvC^&{2clAweEFG^-hF0Wk;?dzZv%&=0_E#W@h zzr_xD=Zc zPA{oifBmWZ&&z^Lo<&IDDj=Ga(}FC0#2{oQ$^4tIC0 z{WC?S7#35jLu6DAMNxoHR_0WmfmTgM|9lu+EV}w~;r48vPqb~FMBR;^T-hyW7|Qh$ zxXS1m_QPSG{Zobq1t(-?;I%6O5vNF? z@wdR4mV+&4APu-Do_|F4-_raGqCYAUpQari=GBUaf=Gpen3}|df=>7Cu8eC!TneE2 zWUWu{ogEsC@nq=~DoY1L>w!u&&_78Fd5^9n#(2lD8R$J3N8r6y3B8Xfv0=kCM6?B8 zC#^nap%ktY9}QSJ&k**$1^i>^ZNfT9G=gIn??hC9X#Tj@yuP=NLL<#DWeij>L| z0+Q2JG;mO=mJ-SAogq_0B@m zljshh@~>EgGm5f#bfk*Mf8xe9dNIs7z0hhc?coK!0@4abm~?llDX|X?4rMJ^7Y@e3 zw&R?z_dKUP>#R)30y*h}(hXa}$&`$y>Ff<2b5^LybYnGCxJh<7DZzrRt4FPPxePuo zRV3YS{$3j?w^nZ&$1Nralv;DrRp#IZ?N-xIt!d6W(-oA14S;_cl2)59a^WBm@4biO z_nQvhH^eaJq{dbnKJGP(U{=d=P( zRsQ9+5;#5g0-5j&K0U-mu)ki^j#_?<Ks8sM-yW+f?1pOqQ~4P$c;LY@ zdHX{DXPB`%BXEq(mEY9kxo>6dYi`bL+Iu_mo1KX%$z8_g_c@J(zcQ>&>*tJcQ()iA zeIq;4qAQ%)pIsas=6}EE?{k45cSXk|vww>=m?k7sCS&bFRH?fk;kfW1zM)=Z+c=pz z54r8HDr)LtuB)u~26&tZUWKJT#k3339UqVkSYCycNLI%zM~mPGF(*rT{3jtADm^%v zV$O<;6aYBirwGgNEZI|e71Su#YexFm_$M%N21uJ|E$Gv zUeIH-aDX`C5uwPL8scT|ugr3K525~ix^7`(A`d!RS@|gyroY8{@l2lMd_H=$k7sLd9Cp2S)A358EK$y`>s8}&Q1o(Pwwmh3 z$LA%awMrMj)eqKX13Ae~@+;Dp836OiqIX<8JYiY=ZwzbPz<3a4%{EeOo+$|h zP)n|5FNi`(>?07RDs|Y9mo9CEn%tKz3U`Pwt@{giAx|Rtv{dw6Yh{B|9B-wAfm{XV zH?|t@LfuE1PEGnZH@lxPYFi5JD%nM2KwLiSMp6`_O`=*F5Blzv8;VA;x`l@)+kF^^ zkUD>fe#sdr*`yT*KKH?no1DusQ5 z`dt;Z5~f%Ap-w0)Ax#AOol!P!z|wV971TSB`vbpVzzjDAXVc?s534-zj)A0SNC|(O zethPXfH5v!me=LKhr>>n7OWrK?7wvq1QG!qt9xGwXgWxjbB6~QvmkSn1PRtGDD-C- zM?u~Em^9*wV=Pz0K2!ITam`F;G>DY?Dc}B5n^4iv6Gl8PE=>yT)ne=aQE8#ig#9~v}YeE?SVhaYOn=*aUG z0poOF4MMKU1Z!EIz>g8UoWrmRv>j79IZLPb)9-%hM8HbbRt#YiDiRPDjatTdwqO6- zVKQvwK+%tW-~)(Y`yWrO55Mus%?ZxG-fXS6-6d2Cyk5fXX)8Uo#<8AZ%+!SeHuDnG z%9f+$PK`|&WZitorfmyY7}1Vne^ryM#^j`x8zw6yL6N0WXNJa+P4F_t1ZhdM0!+B_ zs7+|*yQ{&wp<_O@M-p`fd~b|C*Qu9>g_%uZPCI#<->CGfS}__0xBH5>Qr>Vz)`*5x zGe0}r&+c^-$4l-$G$R+EHDdV4Y(TCOImy6>A>SfwV^y{dY%3vL^>pKa!bv^?X4GQc z^ceDR)%_f`U}frG9mvid@17Ytr2cL~w<6wa<4A|>p$r)zo`I*bAuO1rEjxlOA#&4b zeweWx!iTbBW`gSjQ8YcyBV%7qXc()uH->2Bf)Rzz{3B95=qh>9D2I|@<3{LPn8yAi zcj7SE6-o19i8(Ft7rScF50tHPbJ42h8eIIMHgfx+4(pwimFI7KIJ4QxK8Gaz8Z%Q-nnyE0`ksMC^;1>yC&kIx z^o`NY|1k^ji*K=YzJcv*dBGEsOk>u8mD|X|!ifN}ehtp<;{0P06fq;fDaP%^-HI)&evqxr97Dt zPp9imZ1Df@g}DULv6l$Om`9uCsN9257G@{ou`JWaYR)_m9=dS*h_mR zGOR|O@1^wIHQ>7v6hI1oP-EL!(;ya-l$N6U(c*`-yGFa>2(Lp=omCw^z81_neH!uk&RDo`=1e)Kq>PT%s(-SrNk$*?taKJ^TdToyeutPbd`~Y6f z_cE#E_kyUi$C(7)q-M$Q;>3Y0W;)gkel|Soq=q=vdXfz4A!oLA)TpqqP=0h@^eokg zS!9Q~_)5kgX{>$%^$?t0-?PDcdnc5t_Jp&q8WdmGbY1clIWPKi?K%NANGC0%wzyrbj~r;1zZeGDi~od`aGAJ!pUQU ze9K{;YT(^DnbjeSBvGGM0ZguW)gBfMQ*DHn{K((UB@O9{S6!urs)sz%@tNaH94QXd<%=~NJ|}rmr!W_7gCl^2bT}qM4X-yfRo43YLIH-&Ak&?`(Z#;b~LuGzNL zPWw`VgQ76@n+8y~;8SmVAwOo?+?@=G3ep0Q?e$ z3$ew;41xey7=qX*C-+Qvb-7N4ClB@mJ?CmIpQJp6yj|S3q65LehscpNBAGZ4zcXn3 z=^BZuorZ9eG+VmM)i=x(bkNu;TtRJmROb8#%J+*Zx9OLe00vRL=CE&xVdAhb{#SII zQP60x8w3i>AO}oZ^z32;OAxaT`t3(1uf=K*c?WBztV4GvB^^4R1RHq@<}l-OT9{0X zeZovwz;J7ib;s0K0PeH2)+~*EK9gbtyz-y@nUirS9-xHAw-4inbJ@j9kBb6=pg(|I za7GKj9%6L(HSrs##<$SF3R*epA6~b2j3XDNH>Hob#z53V^=SXdm0$8iZ0Qh*^X6yK z4U$d?elSREPWrmESoJKcd1YV>+Z21x2*&K1F^K9F7Ng$RgX<387TQc7tXJ%`K6)}Z z`D43No`+kEF=UoF{>!7^RS|32Ep|9e<&;unc;V)qie1n)&M}!>6>El=IXV9K&wDNm z$IwO6qy)&$in6POKdhc=+6$T=7}XdfW@ma3!1{t()9s|PK6aq_&>0yS?b&xA6V$kbSo0X~vT^9wCP0~BuQ?HTuP2}F#$TV^ ztQ&aP?(seu)=;&n@;3csG0FclME}`J$2|e(9oELf$cLDQYH3eo1$LDwy+>7bf5dYo zN>`1+2oKmq3?LiQ7!JkY8ATNY^}v>!v`EsS(>N_di@VEW$%F0m;+2P|w&UrgpO0Lr ztRZ=0D#*w5P0f-EIaN7E3b@F(+*6D%6@!1iJ~ylXww-{Kl}%3)WAFy0o-NRauEc6!_ogPrm=u96D+tgNMM(96dll zmyk}c;4WU$fJn@n;voRR z+-|rk69hhS8!L@LcOkwI%S28RAOU}Cl^0Rl- zUhN4-3uvatMKZ!ejf!E9!5}3_XyP^}o$rqnz&B}ysWN| zrZya}XFimqP>wswv9OP9Bx}|w+ixJucqE8b_sJgZ|6I(y7m`kJZ zj{%?c`0aWOo{C2Mh(&!@F${GrPFTq~_yCQ0WSohRV`f{z3p^1Wnwm{aFaB_O!y>ZZ z`(*C6_eqH^SETs)8vpNHP7#V~3q80Nw}l03nPR)#V+0_>dC5ty>(SQ1wbjpUJm#GNMF~X#Q2uiERAetN~ z{!^Q3LH@lQ%ex(hYb{tfhzDPiYKR9jec=FnW4_9*jQ&V)?(pf^R5?vsQgIK_x>Rs; zn9ol+RASdYigWZyL0|=3`OM$+p?xet!NxrO#^ZlW=P%*|X8ya1Yk`Yk^deEz!#=)7 zJ>!-#U;R#e)hZ<`9S!Y5eFf+cD-E9rj9)&cCIK$ zHduZHY>y|Buni2Dk{*?sYM10bHv74bVRgP81u5%m5 z>1mq^uj#k_xj0U0K{PV;o2j%B!LXpcN563sOZoSEhp*2T?!}<3U8h>kO!S?03zLVr z@wSA-ts;M>nQbO7FaRYB3%~nnT{I(Ag_844x}oS}GYEfZ2Jm3$0c{V>seRj_(-Vrc z$`?#A2yi6MpNk-&W=b27^~S2m%LSg#VW&eTJ!W{9tdA%3DkABDDKc3x%=pBS!Ij1M z{byf$0iHBJAN)7AzA~uo@Yy;L+_kv76nBCUi0>!0BvEc4d+}#TlDeeR>?rz13 z6{qw~@4f%`%e|j6lVm0{dG@!@?%A{Fl;-MkAX<-HX|kB4b7e=$;zs6;vEv$jPr~a@ z11j<)dnm^$rVOUjFIq83=`(?!D?o#u>)=_t<`q`?6PQLrsz#15cQp7Nlvh_$j<|(k zAIMH~6d3w{jEc4gnOrJDU&I|YT+kU?C8{W3b!xJpc&&5$6n(XA4za_6I;*gxqBBZIF zC@Q4-MPVfWjwgsW^}J>_@i|1x>8h!@W=W(6(CCiUM_d7}=Kwaf+=qX&Yc??N5El0|HF^stw_^PsYXv*zhG~d!oXlpt>rI*AR z%Emzjaltq&3~Yxh+NtYaa_^QcF=WMQv2DFXSY17=#a7fs(ycqvw3CaP>w2ru^E{ea$hS$;`|r0A`GJZ)Y)tR{9|;Dc*+vY4+4KJtMcfl^~=vfMAQc_4H(Ms7*fhK^pRO`MO;7R6w1im_uQLs=u^J%k33C+YaaPnIp$jcpj zXA@-DODp3E0HQS@=(^Z@3pVw=Bzp|>S^cOxN3{Fb!uv6D1kpb90T5Yvc)8xtLNq2* z-;{luIdw$U_bYW(y$Zu+5mtsQaOp6NVYRtKxqiGZxZGdo(8L-;-6#dha3h$_j^|Sj z`@;#3wZ&JgQXQ)4?33e@rEdlmnUoam+F1oO)l#&1u9>P&m3({*=d?9FH$OHvYx|GC zqEW6$MAW9}IOJEFm(9$~D18@#ZF5;Q@{IkdyRYNCIr}Xh`*>O_^i!Tj&dfT-N0u!D z2jk%O2FFX3wP62AdZ(myi@(^9gxbmT$Hx|pdcLon#sjzIkq}ZT7?c{ML31= zJBX25BwctCbeaP-jf~fd^a&n#!lOw_g&UHY2C#YaJM^mTIHXlwj_*^na|%|UV|JbN zdiEJEj^L^Ea(JSba=P%w-|2s|nqX7XxcKnn3f^Of<_otZuBo{#LP|-FY`>y@%Nx(^ zw2MX!2s2;U{-a;08+U?cWn`b5hTNC3eE5~6CEV87l5#5_iDpNUA7u=?tQ$3#GxI5E zB?KgxikZo7Z=V)>$gbwk728Ksla}l;wLhv(EM-+F)7HGbCokTJnsLZkz8QqJZ^=X8W)9jSe@o;qTJ#5G-CYL-ALB@n?zNr(e?se7B z$armqo8N3oUoEH;^1EE%_rloA47d*sS<2RoRTe~TJv0Yt?PdvrO5DC7h!t1YjdJff zRndj<3^Q>aCyv?3G>Mq7B=l)w(E;URvC=eH3BVHy#H7q-k1iy|1J?WKKC0m3pLrgp zqmg+r*h=h*Z!>=<*`K#Y6Jv(j;f)b|St3;>&&e9t9EGjv>1fpQbD36}yS0Q@K8jS` zzBzGJ^82LW{E17?zKGDvw@U>!DCz#~5QTs*#Z-NfiB;u*Q-EUy#^9-Fu>^@54+SaX zQ^eV1igOs(e&xKfvPxBgMEPBdrDV(|$!2HaWu{MtL*u{T){uiqn5_4*KAe8jiB>up zeHg*e6h&i(6{6)w4K-RrY-$v9xFD8-bQ?OJ)fwx=4R8DvSXt*4iDdU9oir<^lhRfxy&oK3Ug z4e3sG%6x87Z26ld)t$;z;f%xa`+Uyozcg~yb3>gDTMkhjnlvTXD(&^j9JCT4K@Ooi z(vd+%k$yM@bf{m2q?%Wx=+tKJ{P@~QWHS-YJr%|mV6?Qhb`6lv=GX?BgrE~s6hFy# zQTqIeizNC@By^N7>DxqZpFvBG3vr^1D~t?>Cc`MLvU#S_Ck}|?20ER)uxpx|JDi$` z=9&k3;78yNYd1V<8Daefd7rPzOGLP!(*J7oE?*d3yy3?_N&48fYL#RnXw4C>U;S`e z*GfW|r;ZM*DYKFlqR*U7o5ma`?n+b{)z#hht+$ z_(fLX__+=GPa5#PU=wkRfFKr9!*^)yjrp-vN=1rzx^eR8nCG@tW?NQ>Yxk;_Bi$tD z&a4WQ%1H@*ISJOKuJ9!Zv)wVejUNuSh#91eAzVig8B;v_TIfl% zX@y@K9h^J`?RNP3Y|P=iQQf__m7kL;cQ0nO8!^X$j!;_o7&G=4 z4RTURGRoJB2C;{iRsr+pdC+36Cu^P0OsSCLtx(pD*7D869!!YCpxv1LO%eib%USNw zFfx@)hsj7YQBHVk_itKuDrU3sUm*{_=sGU9>qyQr9nH2J2yYDtr5J~n&2abVWSEK2 zq4K!x8w9-WH_M{JVVRF2t4hIcJ^;1|2e`ny+1Uwd+O&YT&4zW+{t8EV&M5}^WeaKao#trZ^RFam<@*AuAo#H#5 z76Vm2ewNf-t>1o5rcJSNPPV^}EyY*vDcP$cEY`3$)2|NKQ%0W8;tpMr#CKb%DW`Qg z3$AWByk2SyeR+JMqTCexUu^8&o0wj6)Cy6jzKQtB3VyL~9Xxtp%-pJiMN5EHv+^G* zLj44)!!|3Ob=&3^wj4$hODz1hpe#|FibSbMImrY8yl`bF29hTB^D{}Bj%;H4q_Pm9 zD=U%0iK>sZN+n$De~hBxJI(m|V{}usMdH}0`Y#`dkrcwY=ajDgI<;Z{eLSbK@Ec<7 zyenJUCgkiQeQps8z3{soU+!|pzWbFmyx*r^l37)V-C_WG8^hW8|m5%M;p)cTx@syqZJ&-Wp%^SQ+ox_x=)y5#k1_}^~q?FvihSttL)@ojm*KJdn! zaAHE&^{9o#&)3vAMeC1r{elA}URZZM3lmA-ocTMZR|lKa8Vh1;I+)Umu|5|$2Q5j` z?`)62Ok4%kT83Gz1H1m}1&6|o6KbbcjruHD{q1wo7QZcb{e83qdgzCd+DN>la|zoG zH~rhZv+;iv&BSH2Mz~Puxcu{2_9c>0*|sYC#{zCk~>3FAAcwLJD0CNt9Bjbxr?VO07Fp>z*F)pc{}?1kd#Gb zc#j`mku}W0@8aY6&?r#pcTwrz!*Qm(RX&IIayR8IGd9q3+5!kjbPP%bkXij$)f~~x zJgjljKBZMS)yU}hzg^|+-I$*0)2i=`cnU}Id#+-;TDh4TIt%K!^!l`=sMGKrzptV7kO2% z*<#r6<|-nvC*;eW8vU+og%e>R*MT=&i6+ap*48MD>7yH3a*Um7h4}swbAFVxFkIGp z$pR33AU(T@_FVpF#K8PV0W8sLX1_gq)4pUc)1d*czi>%oA}V6%Dm8}R;W=MVNnqQV z%GD+5a34>`T!x9JMudq>hbN>`?|$1eknA@mY5f zLbQe_&${|1Js)+rkH%Ih?2*`SHnzXxphP>DBuiVS(_0LR{je8~o%E=P0$_OJz=RaB zPgJbx9_hLELEYwc!oh3WF;9J}E=V$M&yRvv+fDVwb%j_`8@auek5Np+4yCX-*xIF>cvvT)RMElt z-G{mo&uE7s>FMi^#PLFA*YQTnMSl@u#h!1{Hr9hUDqx(Ry%J_#>+4m0$Sc0_gS*;Pi481X>OXk8RdnTts&GgV0e2ca?FZH6T4 zv401R={*BlwK5#}T)lc}Vg<93cu@q(KE2WIWDBo?$s+{o!3kDxM8$`#{fQd?=PZLLa^Oy0AG!GKpS&(P&L`LY=ahRn65$3S=!r3Y!Phoq|5qTuNj9_l zV!yL#K-;t!)Z&{jL-m1sTbrA~?}d4Y13^RHxveM>wHw~$qb$Eq`I^goKx-YvxRkm% zgB|_%Fll_hu!TPHScUqHI(4RU)CCS(C&}UR`j=2FkgJwKsfZ%KKvvH4I%{}D*TY!_ z+Prppq4|5dk4XHxkKgSt$0PF7m0e&$&}k1(OZBn>*~7W5e#i-%fBAv~nCkuv)w^7l zC&=79-98*+*A2bnHIeR43^PR>TZibum8poq9|fM0G6zYu-T&&?o^0Mdc{e3b|5L7} z82_SE5PNCsmO$6hRzYMXZbyekh14V>Jeqpdf~B+F?CqNx=LiWLS>wTkYPAf=3|%&O zV?MO8UF2b$?tPn|Sl#oG$2>M!2wVLVRuvnl?a#>4PE?P0Tv?xt%khGq#fg6YKtJ~8 zQ2s?5g*xb!oVb3Evnh)~p6Ex)Ew%#cOS$N(1R}!)2VP9FC zW83+G-23JAI4f4n{YFhYPSq3_o-_}Da>#>iL@CO6Yyo}(n-p{7Qi)q(Rlf|02{b`E z4I5D)bcA{O_S+t(8;DZ9vDQu4N>ksX<nON9@XT1;8Cmt(O1E z4;^46A?lr)c3d(+r$Iy_Bgjw;(H@)WBU-@20M#$)+U9Hv8?%cE$O2FqSU|35Gh|L$ z>zJFt?e%u^r?M0adfz+LH}&>#1(8xS$jZ1|Obx+qZToCVcEZd(Y=BI1^6Xtjd3;62 zWLiWzUM3kjH?3w-IDtVukL;7YNi>=*7ID` z`|di5@|@40fXe|g4Xw3Mo$;gHr+TAEAF8*w3B^E0_5M`4gyH0&7_OT~ecMw91t~af z047brEc!fF)sP)gAU>0+H>OFzBLnF7QHRUJAdS%9v%REWXZ>W((?iXzV$p~3PG?%j zL#=AWoNls>z8NV~zvu(5_Q`9UVu~j&h8LKqAPtt`MW3NKsSAsJxG39Swj^S`-qef; za`d1v<$2?6dLJvjT(_TIZLZyHV}Ew(EkxdntQ;}lM5fva3+#b>X~7cGMgdldqs%Li zSJ)EA#ss+VVu4=x=u=qMANHtr^L)!KS6}H|hESjBHc5tZIj2WD>_rmQ7-oLza5YXO zAm-ZDoG8=2d!*oDhLHqIh z_ndlhLxYCrCzw+E?oI^Nm+619)|cz<)8`e8%WKX5$vW!PK=KJAn6$q4>*JB@mjC?@ zOg}kI7xcKF(gss>j(+?3@1J`OUEMp>Z$CCm|NW3WQcLQ#s>1UaLnJPQ)+H2VVA#QB z|H~i_P7XECFl^VYUrL6GskMkCDFv~ZMm_aY65>HVp1D|ORo(DvSkGFaowG9iwGfQTwFD1 zml2>0x78!pUk9zUX&l=@$M@!s>%tGO)<@KMm)oUFn*&|}k0_~rZ_=qm-#la0M@VQQ z1=7=+VXC%mnYQ8e1ia{YF!8qi`W?dM`l2|GMG+l6{F&XP8CBx(&z8%?sLf6d@3fST z4u78aCDzNGLaW)v>Mi7*l)P)-P}S#^8o(#MU>mMxOmj1pTzK^gG%blh61Dd2kNexb zyK?TXkAmGnI`7hV+MBRPE+dh>rg9<#^_nd#mV8=1ohWQCL8X}|C4h>S%5hZq03&NwKG$W3p`9x zn)30bT;dO^;HG!6--pK=pPR$;k~b?I4X~gqD96Yk0l%{m2d8eM0@9|7<1i|GPA@+z zz$?=O64o%&6$n?}lzLkU&x9JZ$e2a-?kvRW`FuywDIQ4J?SD`z zbP>!cgVa=nPRRWZ zqw@=EEO0(d=SA621`>~ua}h+Sl14D)`=GY2-*e6*5+q^#jejnsFJ$>~| z78w5J&~twS5QErgY=vjX$8ZjHT`6-jn^i>CRu{$+zopH&8#+Z~rq8(F-^1PxCkqI# z#7Ej&2S)6*5J~ilvSnR;$Hm1+uuVJ5GgyiK%i$;m8t8N@&L#nee++U{82e?UHhCHq z%WUug2V%j{sS<{!AeCa)NSmiUWErrB3In$H-F!-RSq9tpD%6==b}LAew(ZvF0SA&o z+foPKYoY7iY%BjMOaLFYS8Vc!V8Z=6E3`q(mOrFMDQvn(Qcc0(<`MrXR?mb2)3Xh3zM7Z|Sy8>ZD6My&9@#_c~nW{6nB|N8t>e2NxEYQ0Kz#)U##}d+vOHh@*<(=ScZ|ha{b-oxB}VZ8 zhmp%q=Jl5g>;^X(^lq+p_<`yW6eK@y4|DVnoU0I7M#EGkH^JQ#os;NQX5 zpFx*TonSg~f-3*uDAXSn_2aA&FDiFh0x5C=xpJM=WVe|JYNQ%5NitP@0OekG#GOPk z!e=~)2*nPoDoul6*d$~%JHBN@?_c@iX@E2VAXrhilHwkYr(kOY4Fj=35V{1ATY2;7 zX43x;jR%YjxQf0IQSQ0N3A?D@=1aQt^`GsZzRd#Tpsj> z=#nK(nPz;)KWe;|-^s7dG2&(^O8Zo?zrj6~`k37lk zfynAIam7S~Qv(?{jR$|lIMtmRP&Mr#es{zvqc&R1i*CgiQ)%Ji<%I0AFNFART9;uYQvgW3M(S_K2Ydq@qk#dc+M${-vcwXm^j~vBx@3S2A>#lAppra;~1o!)$35>@aE1s{cAbI^YMuSmJ7POUo_{r2Mn+dBMLwBn1TbQu!0D$M!s$sOd%aAd z1rF#H&D$T>yHkqb+D_%~!u?hMR(?qGxqNc?i7T~Qjcoc}rJC6~7vfym*M3SIz5Bi( zCgZSCtsbLP+&Yu*7*w5i7)4~dJ70YB`(Y62UUfNnsEa>v?DTZ!>XBc9U2M7ji|X6m z+uwID3vG92Aq^MHNf3!hL`Pv2krNC)Oi~l0fYBF0muJ|1aU4m#;QhXuk^IUNCVEC9 zRB>XJG4=sqEyVe58O1N9JL0Y6kwADzEs=^4Jg&WmPNU@at)#vW1PGl5yLj8lnk14R z4EcfB&dU8b5~Ougb{LA3=23xFNX6mWk~(58(a5PX#Ncp*>AK)n-%Y_Y-HT9+7nqWL z|L@~D?vL}#FXzsIGS=1yYRlDK=%T<5XQT;AaH<99ZrL(JB2c_cHJdTF@sEV(mZg2` z`c!XSYLF~#5VrUO;P&>GdTls9Xk3zybSNB?toP(oZf@@O1v$uVx1h2TH3SJ$T8R90 z8LOkcoh#twVRws&{XKQlg-kdG%d6wzL?$t}#b7W0;%4%{oU@h+2ZXG)?K`sP-vU;b z*k#xe%^Qd@DoChm2Xg8VQn|zmw66m2e7=lF!I!>gXF7tSFUtbmhAK&-I&wLL(6LGu zJ!oe(!@M=CA?c$Z5NfC?Mm3miA&I}-2i1OPMfbjW)Nt5>o}hGttiO5F{&5(lRoN(i zlfP1r$}f{O7`d)x68Edoz-pt}<(bZ<19VMj7QVX?dg9%ZGv0&J0tgeD_d@|qh5cnmPH zrN!?!2euKaulr-q=1wx#Bkby%8H@hNMWU73hH$H?h)!YVizXzz`C`70YdYeZ{~hV- zJ^ZmYX_PGTuH*QQNe|A3z3$$p1Bf_FR9&Y*!7wJC7=fr#ragKajA*-E3(!0 z3rvL6zP^jGKp=DMWhwrA9KbLKt4AcH7C)MDJFN>PE#`X6Dj8yY_}6s@QR5E)3ZM}F z7CXJ(Q?{nNd)Q^ChBSfKY2EaT2><&1EG#*~`Fv|u;NGL-DZmR3_qLU+Yxn43dSVpj zKh~ka4x3;qM+bWV?aR?0t&l1g&Lh1%=rWaQa;JW3ijg|nZ{w>%AN2vaa=_fdoSZEp z$nZ6QBL{>RLKm=pH%qIn0RQl`UkHO;6*hj*py+-9_Muqx8SIN;g1czEK>^->Sn~q+ z1whY79Xl$mMOAnx2yh8R6*XWk+eq}jufap(=!&1q%*HCWhL(Zx3s2PH8i@4mtCugl z3A^F>uWw{nLmpmj5+qe+fMeODJD75VvDq}N(nUOIp|}K&_Ctcs-Pkwj^&|Jw!sID; zWN|+tqbf*0p@5{|!Hr+`9nY&Cy>~aab_3P_00Z_1bC0M&B#lRo* zVFff>nW_zPm?-6Pw89L>=7?B2U{Su5hSTj6B@MxDhO5YRPh0a{dCS}D*8dk~^-^*U z)3x?W8fq}~9CrGa8g=zTrPbZ|3U`YvYaaPil;$tDaL4uYjfT!_J|_?OA7_(j5)Bt`gRcDM?7 zgJZbc8H38;h)62&dP87A4i;RVEgy$u5`5cF^irWhpQAM%YZmG+k>=i}=;lk5f5lgSPIPHt|(m!&gbcAtO^%A{UBTL1Y$=nba^Zf|r z&wYutTlJF11+27O*>mr)es$b)bn+Y-8~OeACg(BdW6dVTsj3Se>7#lZkx!HccPG56uVnrd79 zuMgP>9|8=62vvkeL{5hw)^0pZo;|)_!o|N#oWTbwcLNj3(cyZiG)WS=!2z5`3pC8Q zoY%r15l}UfQPUJS(&cFCxr^TGbCJ9&5~mnIr9+X_`3WHClOU1~Ao>^v_vuYA;VsaM z=Yr9&Q5EWy0@SI>w9;}pY^tlnS~B*^{mSmC{Z(`6W?F(X`!CE2(GFYFQ@iN={MQvp z0pA78{7c{!W)QcXD1)_9)B}st4kObXjh_2W-{f(#Bk8_=Twq<< zf_aVwpVs#Tl%M;muj4Whq^jN@eQvnRU&5*{%(FNcOKW@BjX9t=X&mQR!=G!_yN#L< zG|>EKf%nDhQqygsi0(I*IusK5BN$0?GNVJ@Msfx?Mu!K|_2;!R zN*xn6vbwQ_q@G+!eiBKV+)q4?q-&yy4mE+}lmW2j6Ah=-P>XA3?WD|`@rt8AS3-=s zAq9FvQG3XH?@|=ZbPO?Cdpc2Q@~QL!KJ$@=3lvo~P!?YEv~`LTyuew7e1d+i-o9Sy zJwh)^jItOi63)24i+RucSB$>stxG$h_B{4bG~1?osqz~y^yMNx}-cj1y0?!oMI z%f*}N?ZpmCDH+8fFEt5*Pa)ahO~sq5$OkhL8ngJB2x+PPF*4oy0YT^Q*c3DL-LAWE z8GHfsV@TK$ftLsm_Ti{n?~Z6_vQ>+_eE8o4KJXm&Jd$VWaY~C&Mlj(>pMRYp=Cvc0 zk9+3pC&V z@v1?p=v+o*>gO84|AkX!`0l%@?rr11e`lWFXhODqhQ-K#ef*IAjixWRZ&b#(AGT4< z+1vAYlCPv2&W=>j#3z_>Q%`gC`R#a$vF9*}4Z+RjAila83|`z~@uA61-X_|+$^Q#- z>V7=TZQK&`VS=@RJRc@9XhM1#8+k4%McwullR&&si_&tczOn@IC)*h9#JA$GZ-@zt zd2V?it#OZ#YfT^qF*j~MSaJwhFxf9n_rAgF%)Bplyww~`MzrEVfLz*VYV;$J0ml}ZFWXry|IJPaV{@Pwqu3WBSP z5@fMCw7}>*X=ce!?Jm$uQX~H}Ap&x=5A5$Fdb&E$=7zEQ^^q|ZjAbbLoB2^fWn46P z4)199&}mPrA#~cnz=Ahw3sJJ)C>rWKlOl0{UO&LY@P=M0_K*_~m`ng&wi3<4Lh#S0 z(ua$2&b{dbi3AV!Xilmo&fyDQj5O5{oXXmulN-CkJ?Z_~WkdLaV?2Q;x}o9?@4tV( zw>@?T4(J2K1Alv;NjM3Oks|f!iDKkc6}lj^?Pzwn2<1qNDl@dtEHvOo*)qXa=*iUx zHziSau1S*ytu1O56$Gi5zN0D^0mH1|Gqv$3U~_@jvs3TgG;G&vC|Yba7VZ8`?~yy* z{->Se@Ag+COYe8M=NMS18Vw&{+Q965W4;(n=^h1V&Pc+i_*ZvZ3@j6}7FeT5QyihyBhA>=^ z;&i^nE+=_7HKhk(O0F;!!fv@~$S1(~^*a09hE@g~v#u^I)x7du#x=IKtyx1DN3DK1 zI=bM69O0Hq!`v{}Ru}G{4p>yn2^LYXR!aT$46d8jfrr)Ca{0;?aSBpSKJ>zVyy# z+T^jArBZYRz>tplSy+VmFW?R0Nrn?nq9MW+e3&u}5iCJ70hqK#2d5yAmYWf&Eih<; zu3Tt&m0f$Sxk6A>JQ#3Hm!hm<7glFU4Tmp%d91t{f!DN&bW|t~`(X^<)MbS0GDRpc z4+3iJ6864h!#Y2H3L&?Aq#V~?@Vww6&3h9#XD&ec2ryy|Q%)!8)AJ5WY9C=u=p0e_ zz);mE6Xxg`EEOoxEYxO>xg;6N)SejEY;k^F>iuEy)cfS%K2deh^;aRC>tFyKc=HEj zjP^OoMDMpU9)ZnGkidH|Gj+-o^}5Nv| zPEF~7>(be9{Jp3fnb~Bbo;4HwBn@k{^hYRkH zKc5EtS?|8@x^QfuF_}i71Et!Z6>OhfhN9xP2(=(&PmHW~+6Vmeuf$GLv_fvSy;xyK zX8ill)|Pv819rV~5Tc$h`|?Pt%Wqode?DP>3ydo`q6=bv)z;BD7eif{{A`fA{nnRg z{DX?Z+6hA>10fvl&+~Z|Bp3wdUCo9X8sgG1YV6i~>>2$sCPk6QafG+>WezTV9J6&T z%7`&xu#7pGOe8UyDQV0WkQ&<8I|G+Z8)}){h+3>DNLL^C;5x*;=kfq0m7VjcaZcC? z^Ref$(GiuOPc@$?U{xX3P0$Fb!o>*NzN>I)*AX&$SwalkjvvPq+5qP`u>hH3bB##9 ztB^X@p1<$3pb$4`M7#M?JG2`2HnJ^ucgy2PMqhB1#+bBr-%`KzbchnIVb7t3MM$l;WV`dBj$Ee6I|hu#XBxC-X_P*RlMgWG|^Dv`GG zg2Zv!e6^xPRhoPy+H~c@#7XM(QEEzc^Q+YlovjYQ!?#-yM7AY!O@HDDfiH#q>NhTe zesF?0d~(wVt|M$QT<)(Ta4@*o`fH2wqa=x@G~M4mNdOJJ@9*H~hn)&kYQ4m)K$oQN zLjkjqIS@M4LsUsQOIv7}%XI}QCdADSkUCwi*SB^-j3NA6WexaIu*1|r%iU*2OYoz& zg~Ob@XHc5*D7zxE;Ljc4hU`@IO=-02QLfkSeu_HfPvhk^SkJj+)9b*ff}LMo*Is3% zR>RO8c-NG_(|I$sc6wvORB(g=&bl3AtB1lP$mjVSl871uGvKhyP+n#{Rxw_`J{?Ft z@&>^2*8|M2cPKj&Y3XG``J*8jLc!;o^tILV^SE($z)ed2%f97=mLdFi!92|D=T4^$ z5p(fmTv$ipOxRPvM6RzfLg`!c=d}|(kJgZ@;DZ>Xg)jz3oduc!sK|6*+MS#V1%OjE z33F;Zk8n^)Gr_4t!l7s-I(1sclnC$v+(*COZv;PtdzRqamGIiZNn!xqUhNSj`2aTV zk@IY(kgdIbMC62FxfBIj9dP1Xz}sAIc9X66BchtV=#~<1!nCj#l&6gfG(<4vbrEP= z&gTbqy?R(mvap;KZJL}RdsN+fw715t-#mtJ&f|;evLtyjrDZa)wd5BpcP~Zw7WImX z%8-&yJ|duqKmO@|VqNfb?WFrLa)Q zQ9)bKUhrAlTMRaZ!V*dvIcX7c%I^YD)h@7Qv#ws@^}?*nVHp4;iqhKKdgrSmo3S}I zdFpl$-nfxQU-@ks;T0MbkNPxLi4cSdVNG|J1{L_Jg}$&>-HW2hUyQ`!VlJpacEGBe{fLn`=eDS5@0o(Qd#*vkwKZ_RFU1o? zg7xx+T|ZbZ=1pN~shrBwwD_|_C1_=fI#O15V0S3ST9ogxuClTZHaOm_7Br+y>UEzM zW9pW)V)DPOHa$bGR`R z+m>-(Nv=9r0ZH4Qpy{x_FPpXxz5mEn%L_4#au;6qdnlY-&pKQAx=(!RqqMmy)H9nw2k-POHj{PU zw^m}>XNfknf5)$#LEu4f>AhoxzpfOiW@j^bhfw4Ojr79>Q=pCcvy#%9%!m0^VEZ|f83jOLxnt9&3Xj1rF6erRfqhxd{2&+m_rs;VM( zg7BrIAl{$hO#-2v1b_G7X5(#WID_Sxg_W_JB0mlNpr-^c?83<*5%{-snjC$=t{AnU zb;wSZZ(xvAc~47jHB@r1w^OTViaQ}z{A9-0SudjwM#Vd$n#2+&p8 z%Wi7icSJq#X%5brm85`OZaN*SQS`lhZvOcX9<#dC;l7*no|gkLMt&;<*V)KxBHq*9 z)jmY@H@{hhRTxuXQ)r@43i&rO>J43#5^J+2?lg(rSmZ&By36!8(bu3S@w8Op-_3;q z0n?}13x6MG)%9%x>qMtW$5mdO2*_iuUI%PYrBEz|LdGD6>qaom=VNmSRj3T} zDgNcaWyEY(t##W;n=7%tUSVe3oc&lKwYt2vK*W?R+GH;Gw-HUnAD5sMg)gW0 z>pPyM57|Hto>q^)jIHCSJ?eyCv_JXaB!K~x`Ph`7k;7#!!(rszKQ7p_COe-#NK#@O1}`-X8F-+^mgE_F(nh5mzAB`F zY}ReJDj0Fjixv;&=+Ze1fx#cG2UQz!i#zX%b!IiACkGmB{a4gb@20&E5+4yR#=$HmBmV_a9Jl-q0EwumWnjg$Zq@;pMhUcJfN z+1&Pa5jb0DsL)O*{$Ng9`SMru5QzNxosGovisNYiIu&v?kT3ys9SJpq zYI7sQeg9{JZ0HD`OwZG1<=^IUJPaC1^*?H0^M>UH`BP z9lsun-#R!!l2>tpn<$MkD2WZbMJr#kWBDnkSDI`>RMbskI&H2t6~kvzS+t%&rnc>B z&P_oGZOieebscJc0X0(C;!lBT?a0oP!WNtzURK)+dZvhC58inse>`%{vA}C`VGw+ z{w&dFkKjY>u698Q8ocBubU+&3hnlm5KLh}11?5N&Y9s8dcqPg9INPxa`o7#?lSJ_{ zV&PITwsHcPNe2bH6Nl^rT>Wv!5k)(U*|HFn={-7&t#37_3nByd< zU5{K9QMOi7aTG37I-e+suWv#MV@*n>G@v34{UxrUc~#9>Q5{z-ZJX@H>8oIOPgbT; z*mHWawDt%m?xWxs-*VR%6GW603@?99PU1A@MKxpUnpf#fVS($ouB2aV@!D%`db16q z>}END15JO8K1Wfo!)t#I^wx#}C1bSaZSW>-$Z0e&IbSSkr0fbvq{QxMvS|Qml42xa zx+*RQm$XClLyz9QPt8)Mo z4=o8-L-BkMn_n<;V8+eY@HSYwgKHXuUK346oZSya;K%>Ir?+{2Y;ML!;WNDvTqY|{ zN@EtrpXRDE$u#VbpoXL3F-8%&ee>IkuA?8##IX807X~!%IyW^->God0^78J-)C)Yo z$|kkpK?E#NMh*|G1 z<}Vu{Zw8zZODlnt3;XA_Cow6)H?>^BvxT^H@(!nmE%st#Bgb^mn{gq*Omy)M=;{u& z1Uc6I_XIMhwnEb*+yrxd{Y>rRUDt0&QWU|W{)j6-_phx6iOkKUe`!)!h7UqI>H^ z8Mp(0af;O7dD`>w;BbO9WDQZolfo>uq3;y56v4qulyU|WmHdD)4Dn2BQ>1GeKq|D| z11HK`Est;oqBLOdve2*x)_N0sf-QaPyM6VveVv3W2Z-9v&;r^LAP&Dl@W#D_$>Q@O zbkbDpd!X2=3m5JdxqhEP<_Sg7JZ7)TSqx}QDcN$;otwL}gs5h{#A{R6(vp)HtTQTU z!q;oG%geu!%J!rD?tt~ko@uVX|Cm|B^9rU^9JLUj>v^|{B8j?7_awMNoo>V5>A)DK zLL21)iua%m(gtDKFouxCEbjeECn{pdGSlLlW=i=zW#&%OTj1&!dm~+i!x~L~AKmEI zHW1*QHJ0SQpEUZRV1HC{-2RU>A?S`SMW#qJ=*f??JQATE-f027TqbBl2B0jr@KDU< zajoBi;5=fong6ornmJkP_$d$$lb>-@Y7!;3=D7ipRj_VA;I~%k5Nx%vTQ?7?*!{3tAY<;%FK^HE^4D30D-Hxq4PFJUdY`WVp(_O zZIOW$AfXr}yfU@(;-kN~-mx$nmPJq_FPHXqD5Ws&G>lN5I*YXW2#b?FOmGF~$@^oD zwffkI)hOb`H)@WJ_@Xx|K`+na2z9DZA{tcTa}b144!^)N*F1CFvLEmuWHD=%Sb6*? zkTPU2U;Ad4;;l&PPh4?VCa0G4NNj)gue)lYvb5nXSgbQ$de9!&db~v zg9kyQ-M*K0oq>p8mya_@YD(Iw}iTDj|Y!RHOv(VrTWiI#Qf9Fg_%nVc zItbfMFQvj>IFOd3ho=haDdX8UuA#0NMJNTn1a?!yJnX@v@9{7n`P3*2eI%1gHo&;QQllzfz+f0$0hb86?NHHNd)C7#crLAU_gRn&QZI0xvaDkJE# ze&F?nH+8^M@S!Z8ix&9_?^ynWu8l)e(0Pqhn`WY-?ot?7ugn88KvnjY*#C} z0t6UbD^=p-5U76asw1wXe?&6BgwU+j5}nGDOI~_5jgo(T`*B$U|FhYr(Q)jG%ludhaB3{($*XWU|4u1n zo?X4fI&cC~@GZVYFibR~C!Hd}En9*);PkA>2#I%&*+i-x1Y>-lxJu)&!#Rk}&j^o%hepAqXPu zeEjA87Vw{FP^1v}TB62k9_gjFr8$Q%lJJRFrO*1a5ZXY!I1l^*i}fKTyn|&a#sDWY zyQQYuo+Nuj)HBxBHwlgmcX(`E;Rl5bl@S;HoN{=`hzJU#

e_XjwvIA)FIqjD7nYjC8V2)lXc&pl4`E%yP($ zL}()>2;p%gZr9y#3xU*xvr56w7gLU|N`{S&YL%_8R$a#d?e%9OA(zabSP(1WV$l=j zWdxuQvm8kP1O@hPm7ly2PFjAL45mcRBz5pU_b2?1?-f0?>pp9Bf3iyjiaC9GWX+uG zA#=J9RUp*9b|(U~nhI~lT7x5@uU|J@tDO_2Bqb6rOUwMk7+m=bH6|0UsBaGGe`h&~ z(H7!Vui^Ee4^OOvYDy~8s-Zp&(gCGjsQ#)Ujs48pzuh0-)$}@Wjl&0_lZF5;L&==` zO`yMybjM~~af>NghI5l;A^CUZ@Ue81|11QG*=cl90<3Per=&{dMoZ*8Jt=YH<+X(_ zlS4mQMZ?v~-8E8%F+}*vX0b zLZ$=Z?mBMPWUz1LqzQk1MKGjj09fZJAx&y2tcPY7If&j|c34Y@G~*3VhEigDvn5sA z4MlFnP998=oD8*Jh3vNn+MgH?hn~i+S^ja`9g1?-+&TARU>N)M*lZR&X+_5OT=FCr z68^DYCnRjc$9@0+w>SC=sqM9NtLML^;|CB5LKkVlAy8)AR-UK^Odx?bqQygF29o*d z*(fG{V&_FZK@lZ?-r)R{b7cC<0bY=x>lXA_4}z@9bBn79+7MqcR3 zZ$c|t*+7GzED!%dAKs+1)AY01GDdyu%judbdA6e%6y*nh$n3l|SGN=Sy2Ayuz8Hv? zsEG?m548@nLeLxbQwNVAnJtL>sNbXe|h(P(@K zww)u{cJgJp6|lM1!!~UrwP`DOf`f>oE2pkzUS4Khf~i)>;TOly;Z1u#=C10dL9gp# zEG?hcFE4sC=06vr!@W&BZxn4$s#So}Mf1;jMDqTX+n808vG%u$zkJsw?QC6d%-ZIJ zHUK~{P`zP-o#cXO`xV=b8~_f_2;D7 z-rVPTmijkXU`h6!IgJoef*>&hEL2t1x5yS&7}*7ubIw=MMPH$eve+)2gA@&Me~8RT zzzqgi1zA%AJ*5a>tSU((Rf{gPg0Ld#E^_@J7cPU*cBYD^ zY+IN^D*X|{RT*-5y8;9L5!g2OeLQkF&A?-PxyiyHlZAGeL?AOl4Zu%&Wo z`Egok_4}(_s-1*)=-|+?-MS7B1|cGMJP*1(YGzo5)o73!o-NAnI&y-3Iz6k5R!1k% z&K_+di4YVh+44{zC)4qjEVf%_32!v+IlG?8GEauTGXeCG#zzxTdtSS&9|56h;O?K6 zn00GM)D}kr_c5;-ZOykdOy@HZg?by<>*u=NPRJG}dal;+> zI_5Q{uR?$QR(P5^C%C>@(}JX}pbi=;uNSCp?I%>!DkMA0p?2}vWoZg&BL@Il3yC9V z8{El&Two6UnMh5ol2TKp z9}TN_Mm|~jSn{hB3hB=&3!bB(Xu&-P|K0xgxBXdCGtlGb&z}egQ+I0u>YQp$t9k7$ zG|=_}zjax~nPDy6?}?|(EIjb7*)~Ga_x$*nrgpqFe;h7^;Wr&GE02Db0U2oTsv&F$ zEVh2bt&d-!$eIeh<%j;s@yBJX@AJtgHv$f6EE8%;JlbrOK>{d}E^g4L8YbkubxiR9u_~om`RH6h8_3bNWhdbC$(QH`JMqC*WhhE20ZFwT-WNzIR6;-U3AYI-*oGRGgsT?I=MSmz`#lqG}^GW|LhM@5bL>%4RM=bKdOP^GTlb`yL_?i~#s| zae?c5UT-C5t>>ou)Y05nh{EMQY@vu20Td#Xx%O9i{)>kKqoxd~p@4eM&$VwHa2Aw` z%aSi2C5xY__w|8F>_kx^s+Parfccw`_FRTk_z8DkB(XSsUaD*gqH-SJ3aR_6N{r)n zxQ(FEimJJ-A%9$dek-Q!!tpq*?+MY%`I%2TPgy4k-OBxEuG1i?()vZ1`|)TEH@PO> zC-i_9=yqs2$H=xDu~0SA)bc^|xQhMbGP?k%30Idh}gS{YHzp48`KhC1-gkMoIkVLi-taDIX z6j|CWC$UnSa}OF151uc@#Lg19<}gmcpPzk(Ypdu8cg5PW-rAwmrnA`oWuJ(=NjK1t z!mE6Lh@SmbbT{B=u#n!L-AVxTm~eM?oB8NyO;#%HwJoZaH?F#qA++|fB77tBHWbSUWxkH^*;}a@&uw- zEIba4$BPgTkI!EDXdXM;eq?DK3Tw>@u_ZQx+Gf{3Y(H*BE893N0S^Jga3#-r2W*Jq zY|cJo`OwJdz55D9pByy$Os2B$6#+ z{481!Cv;qujJ_Gn%|<^ePK%Qx6L+XCO-7#^4l!9clN#T$K1 z1Ek4>2VX~-9U|+)o#D`56$snTh%hQV7UP&dx2>Js6<4$oUCKbIG5*N;zKpynB|L2( z=%`D}RB?<71D5$|t9@wvw+lwRt#Qpq65>X7=(f*CqBYbN-vL*(_cL*kfwUK%I z1WIp|i2MA!Ku~qcvcUtdB`kmP{9_DAU_yN2V+vD!Y+aqunizP}bJ4c``b+z9l-|s- z&NMT_6)(Fb0ivL7+Aw0Y8z`AY@uRV0A7QLMcvt`SX zwj>}l`*!Z|o~WTc+))H*c_^?oNEU3jdh~8lSVBN42zp#I0CXOj!{`9wo=C$ldSM$> zp<=&U{s8pVB?vbEn)$0T#>3AQ3?aRO6F$q1(Zyrk9hIo%xwI ztn{O}LPdhHL-_cylgCBdN(SBc`kAH7$CKv&69d--h}D0I^u^NnoVe}ecrv6I&ubq1 zJJxUkz5O(Dx;E}0d3`qIZphm`KnThegW5gSstiz2o443wi;6_(98Vu*e4)}M{jy?x z=dw$3BD2-S)+r-{m5KT^RO66~52Bc?WkTM$wHUX!J@&5>5!EWf-!fIpR;&9;2f!)! zg`^F@$EAH4hOeggbS5x`Qgag9uj)NA-hLCyR@n?Zxz2<)q^1$GGE{0Xj3$SYRjIz+#F}7R572 zf;LAiZcFvXxvbQ!EFF=0U;h2!;=uR4;6SdTCHH&kM@1(elIyX*XuW;W+ykd$V_dVK za;3EHgm;Fd3B$*4hn&E^u{pB6yQ zg70oBfEIqGd?-8Tb4%1j;U2jQdBhA8n)Pf`YXa#R`7fljMY6bHF=B*IgFx1t;=?#! z%W}8PnwpwDcWb+XoAJUP^BJ(3iAM3oWU&|2Se3>Yqat_75xS@cm}`OS+`5{oh(t)| ztGdwBXZRx3Wu!z)xdNXpKLbemS^N=uP)Qv#kI2!pk(ZZO(K_|9*v{8xc9pU0 zh!;x1-zZWGJ`;h$>O{+AZHtfJvyK>nm{bbdC0s==>rfkyAo{b{-U)a@O8%UbZ8V#X{edzs4w^OI9r?w~I>I#OT=B zMfRyeImSmORe?J4#`2lIGoKYW@NQ)WGd0cqs7VjgE?SkB;(+_y7P;l=Wxi-D6cPCK zaNkmdkoKI2D$x|Z7BcgZFT+N(H0Pl1Dy8hDqJMa-(b?d4AcYWP_(82GA&kI}Ebu{7?z7mT7jDns#Ma4BVC) z*)oWvgla0hLA7>O?jlUwp6-HmxO;pTJKLsAX^>O__hlkdw2n2(Kf-So7H$F@k1tDxS zh&$wqLQASAi8*d&neo@VtLc!E=pK4eaCu9(6S`#>B@ zW%*4-x2A*;NYo;ggdz_Ggi%jeV?ZmLoZ1Z_dmaNlE{CA)L}GpoZ^fMy za2zIc`eu3qIEP^=J=NbN=;R`RXdi3!&)uA9pR1pUEw`;Jpn<5Z%A;MxrE#RKKC*C~Xwt#*_qR%t(ey2By-f zN$B9vRiO!84px?E8PXOd^$#L|w30P;?GB4!^i!9I+Env@GWJZ*_8;jz^bteCw?zu1?YBTaAhhqj~hBOpTNhMe2 z3sZuFP<4|NA-fwsS&E4UTm1SN4<~U_o3pr#k`%~ALQqQ48E^fEl%Ca}_vDFeHuVUU|H%LMLv%s+0=B|!3)G>0@E^72nGH8$lW#Hst zxVlTaiyo_|moGg{Q;?}Py#^6qoAjQ^cuB~E@Xy_wG;HngY z*Gpw%+tRhHSR@e6kfVY;ZbJ!ZFp(EPfd+aiEOa~S^&@#SMRH!Es^rPsw0m6pyI1|j ze(jF_tQY?(Id~`!+Jyck&7$y^^(%tP4b2z|dq!HJ{xryA)2V*T{M+itg=@-a)+dX` zf&QDJ{qqlbx`OYc>W4}_59M*w8vYiKt$Q=%Ha0KKA=$W02k;RNG_VH$=5AD$eXm&8?_}>9xldLo6Es>{N4Z8Jxtx*)_7Qk z*MiG=gcvgXX%9TRKiAL0W*Byb7pf*@V(Q1}hbETsR^Caxb(J+Jt9X#iqDvJ>6+H|E ziaaPPgF(`TWUF@e!`j%T^8l~9LYnsLnr$bqT|uY&nwUb~3$y8V9dilfwg=ANmGX)X z{&kL?)+hAqtylDB>s?47k4VB@o)HJc??eH)HSBFNA25k2$<lgXJB z6Rdx}b4=4FdSk5agVW|UsAfWIGZrm)V92T-zud{0I_!Ujk9{ac`$5(=~3Vg>s4^=Ib<-2eIGXZcXg^ouk+#c*nt%>V6B%N+Hxh8!6t}}u4U4cAJg-+mT$s1rN*ct-oRH3A`C15 z(zx@&&ej`cce)C1C0eT?MDN^n?c8I`EGD#k%;$BWOYbpz4?~3gl6#GG_kw$LL2~#i@pT6=Y8MTroRKVyfe5_hqcGlvGR%Z}j_GG)yso3O zUierV=A3)4Vj*gU&5*iYKKdqK8z`%GT6EPgMCE~3%2c>aSI47xiBes(+d@NLv^!p2 zln5u7SjVQX@??>vL?3$UP9b5fYs8^b1f126w09zrz)O|uW$O^Cxd`uk zgg(i0x-&8|#>L+fG(U%CFbo5k2eDsgvK5Oxq}!s|wxC%Kn%j!TeiGkjK@HSRWBFFD zv9FJ1g(#fp>4iOQY%N!gqX3a=b=4SDKV7Cj8B8T1gt`tGDzrH{=l%MVuVLc=jB(<> ze&pwFPvi^=&*@?9F>YXY>tH67wnoGUl1X~oWG-P< zeP-iTyJ$U3?%eY;j{9Y8^gtt%AM6``E9BA)f>^EJYqkGvhnQS8V{WF@(4P?UYlbF>)Fph|L;*$GjT9n|KMje%0#eHLl)} z8Hh4P(-#13#_sasRFM*?)E?_e!~L|U=O?k?_~R2UCO<1k^2{+4bH9POSDa%e$+#B_ z-wxp5Z;nie^X($T=@)gCBI)CDq9m%FE`Alt#bLt;J$(|_K7o}6DB$)pj!r`0vIPHU z^4XJ2BtI+xu3EXMAM|=e1k}HdI43I7CaCx2ynW(Nyje5wt|yB5imMn6NpYPy8>Qgn zDsZhJoowxOCyXN&9*%HRnLnvVHuiPwmMUd0_7#%?+MU|%xer<=NXJfvaht|sC|JRV zZWP*n@hjT<;b^Dc+K+0*cciY$Oyh#&H|%~!1*Gac?iXW1G0-J!`Fg2~8>MEMT&xuz zmPBJ8)?5vkI66EnI=ETmyuDh!H=gP6yEX{PnXAk}qV;tr8jGtd#`OAJY{@ z$+bF3^aX5kyxKZ3xK@LB!~Mq6*u?XtX(VH~ysX*BYM#g-tvskE`+(7sC4=#1Gmm=g zG<9%LTU9B#5YpKrgfX?eCr#%Tv2ZYUju%LfKzv_^$zy(Q=Mcr! zbQ*{a-xP$4%Z~0EP@^yj&-0<{%RFJq{F$95MQV+#!H!cxF)E`;JT&Tw5p9eRLLCDn zBqbvnoBW9|E@LcaMQ)6LYPL3Oym3Px8x{pMLHYKeV<}OlSY!s|w2#{$4fLYlF3OL6rt*^OFj0DC3cFAs&is(=3p-{o`tOrgxFLCFTOh|XY_87rU6wD zZb;Tn=@jkg=UcYkXC6~<&M`OXq0|(&fHb}rwY<86RL4zC{BgQ?P6Ka~P#9=KS9b#k zZf85tFuqu-R8T~5P|?#APxM)m(t$znU+^1XrAo>x72!eLGHBxTB=n1)P4b%Pf?z+; z!=dG2dxYG-V)7?;B3r@(McU&u(Fl^_(f;8~O>rZVuE~=2zIewy@lUD4%}hsH9pw7*WMU%AjU{Z0X#`gJXzY*f!~K{8y{1Y zJ)Mpb08bfZ@kiD4?Gp`I$y@qp3CD?pbZ7o=@>dXy)fUs20MLw^AQ|4pm^!Ofuet{B zcp@+<3rfNM;ku8Qq9~A~AicoDzQxvqZ3si5$E}%z1SM+!RN{k^q%DeSG=gMAyj-*uTIDA~R zmauByJe{R7rB081f(Y7E6gogGr-rxoOZj#0JI`-%Ug)R_r}rz~6n>43d-Jnw2&1^u zeBV=>7r!2l!rBvzn7+qQ03p0-AU&38#fy8$8V^h#|23sv6H|DMCD-DouOeUGPZGn_ z<2J<@IIrR@+tWTTndrk5aS(gD{<|lnW^ZN@PcHdO8voa^NC`)Q&rcakZ?AI*$P+?` zoRH33etGw?_V~y=e3w~gFg1$6hxz@yVFf1$giwV=O|@VrA_L<^Km7ZPi-dEuySrq` zgl%Xq2#pl0XknNPCAt{27;8NGn+b+@ zGE-E#04QTz-2z`gg)%V`-9f0a=ua_#vnbOZW~e(pkRsfYQ=K*u%$y9y`KUu6I2qfp ze2$s)RsnoBu1F_JAa{V&95P{Dq1 z0c0k28N{l6d3z4l@lyk^C|pUoXG@AGVngTv1`bmEYx%6rQH#yz_51_h>kwosBRWB; zOl+J5O5^Enicw8&SaKTgq;PRouItZ3rp<-40UONqR3)Q~{+42}@4=uIUEnVueT^i+ zmq6zmU)Oe+d8{TFtfr3alp^*XILWi=U3bA^fda~16nLYm99M`Si60yDQ|Rv?XT084 z(7nSH!tp=LBqs)8M{6wo4;@JRjjZ}b$KH(KujZk*2Ty8C%VCx|rvgHDPGY>iRRr6E z&T=?~?#^rkS3FFMUR-`N6q*`ep6zoVXsR~H?{kVVO~BY%o6l2tA0tfRv&EwThfNgM zj!yboub{*FcED@*0&)!<;@0Dp+0zEpm0KPAhy+s4IA+2>h;a$_d8apHAi-$a|4FDV zs5Lhf7D&@v>Te;X{&n(*me9Ezzz!9D|84)Smluzt7yqP1u7M{4b5-}#>^8bq!0bBp zf)X2xg1^38XefuiKp@*QbvT&3d=&*s4hHkh95xv1;`+2#{&LqC2;I?6eA>pryFj_gO z@D-r(jsA-TD+5msojvPUVWa`{kJ@)6jJe5YVi`io2t5RFtF$g9lmzAKV3-~76?PbX zYUqjf-uM0kj*2EE-6CIr3~CJyzyv^FbO|esF*R%N$clpi!=RE{f*24VO7I4nFZGb* z65w^l0l;_WKeJ*QDq!oKoZ~FHgp0!@8gi4M!|kAALHwDRiHNqN7ja}-^z^td$MOe= z(YuL}yNGbRCEO_&n0|$0j!D|B8Bf zfi1PHOF`S8FZ`~EIJLeag^6WLWJ_6Uz+lR)qt&}!Kf0=#2)79vcHh{G&R<;Kt;(wU ztXC#~H@s-Wg&pyRYgH$sv&I!q&7qEjLCA?#C?6B9KM}cl`qyA2_)9`U14TXvxm~;U z$=Kmz1(N45!FKf){5;g_(ByGNEi{JQy`i!vM-{VI(4(?`LVZb>H^7C)hRs*@H8P0X z^rn!Ra`LDHYR(XZSoF+H z;j>`CWO^Bg>;&U-!}O2BH%Hf1Dl=tiR!TAO$-6N`EzdYn3Qpj~nYY<)^>hsr~WKSE5~gtB4QghTQMd%tMM@nX8{iEG)X|dQGP8&YHA=+G4nT7_g5l6#bJbgOGptX8wz$q(w)-CYlf@;c(Fse37>NH)uA~ zRNi>-F%f5IL?}g;jk0*0H!)IZ?nVcEO02);H=^4_lnF%!-!`>%71kid#}`84$sHUD zL_~R|6%1irENw0h_7g?-Fpo^OAuYQ@e=Io9pD1!Hr{Mh6(dfgF>su)vLP zSh4G5A?k(^8Hx4_nKdL@kwn)uuA(bJ7h{4Yj6pz3i<^^ z3OB#I-P}}L7C`l9cLapr-Q?QbZJQ+)BMh^~gS})xK*qnYr~5Z7-L1FIH~kxHlH>f} z1yvtxMVan#Vvn*LPhsN(G#I_}*h!whsCSSM1mhWs`%c*yD9_9Vv+?!I&2B+RLQ%Wlh>KfOJRY`mma^!o@al;noveC5-}KRDSngR@gORH8QxbfC>QdxDk!dR10p19 z2|G@~L=WDKm_moYy)OyJjn3NkiklFeLs0oM6S=<0Jden`!+atwHS(Mw;7y(XMzbR6 zv4TL$djb5N;Jrra5&#?6=ZT~LqMiaiNgu|E2g=SSqELqDU5Zf%I6u@F915-< zTaw4$Sii*f6*5Ap3|SC=%gz#^m!OXw!4yLbH2*Uv#t|&q>BF}Cel8;9b~U^zC=kYz zsP-*>6w{$NdWdbEpC59jar8%cOD(wV6tG^*w1HpBs zDT41~54rLB`9KCb05z>28($+l5~Fdpk6}L$=`0Z~#@~7zLnL73%lm82#kF2AD~Tbk zZN_)c=M)&{r`aa{SZ%X%DA z3a$Lm5VujUAHZMvw&rd`=D%)V^tu}H`&@upgOaU#7K4N?LyJf6Xf+=V35tGWqH=6| zp|WMraYhIUb5^ARb{RG($6{}^T@U-)N}Ls$P7ruznDjI|a*szEv5E3_R`A>+`o@)0 z_tVLY?&fft6HCZ#A4YVj`f^Y5FDiR6CbBrXwS_Da9ni|)Y2qY;zwpxInPI?~@X-fD zh=ViN2_~JH!tM! zK+^IS98uoaFmLXI(5YsV0y__*^rH&ttH`f?4uP+;Hq8C{}My%?ub-&PIM zexJXSzT<1-rInQ^q9zy^G@3W=er}XG;jYj_#A7#(pLxT+1bM=!e!OS-9`63CQsrS^ zOyc;7)Kt1SW-BN`rLbWBb8hT6OobOYp+hz?2nru}UYh?}ee$L1h4oPTR?q)jo$uwn zrD>pt8bEC@Q~i#nx>9k;2eto=B+Cta1cR(3w-_()OUa zK-n<;g+GsTqG(o-ZcTGiRw-n@2E zD#E#bk~Xs}6;M@*_c&cNKMK^oU(mFM|2c{^SUal?cu)v>w0ZAYliPu0PGY~K7hlHG zu{~PCh;F7E{%2WziSTaw@7Obse!xEDk>gA|@v}a^BA_9f9uZds?|N8$zuvDxPoB8_;+7yII3`CmQt1Ebf?B*6H+Of@ncp&gDlVvn*CI4$7EONqKT+`qsLViIEe!FDk1<#N*v|CGT;~L0!XW59V<8NiH z+xY%GBC7>Rt!s8|iJC;svqb-p`bO~6Xv>1Ux!>l&<>On>_jZwL6)R9siY%4fR2K5} zk@oHr&gchTo1|aPp9gdc?mVy99aLHh9TL&dQbe%fwx<{_iJKUUzy%g@(3YV3FRWYL zJ&S**qDS{=W%#G9{Gbz-^V;V*pFXMr?DOg!Es`(Gej?WHCbs+3w3$~wyoQ`!?{gft z`=8_cQ?>}8s?W2}@BDVc;KkyF3N$t`n3;Ggk>f^>45@aZkqx~BIdOLa_&fwL|9HC) z`kGUO`0xwwF4>6O5uPwpRh2QtEu5MfM$1;z_-p^KF3h08@?{oAXg0HaisTghX563w zxg%)2u#W}dPc$el=7{_Y*2~QDjf#MuJkVf!T+p;%N1LZ%Tzc8R2^CN}F#Tshf*wU_8_Rala1ITu}DGrqHeK$M;C)C;CuKmgxJ^x~a*mo`u}DbhY|| zGtJNc!0OnFp?^Jsb^BlteLefOyjw`u4#pkh%urdD$FN#q3Z49OIRla)Eqq!FW(l69 z#q#cIKlczoxE9q1&v5uU_AT?PJm;B2>CR5pQVDCy$9c<*>lpag5?Q3{5wyC#xZRBa zGw!$m2PHaajXU3Y5kqSjLIpO0t$G5_J}N_A>E6%!|JLSTySf3C|0NHc*kejy()X&} zCzI$AD}P&0Z2%a#cnV^Q)SLAG^&7+{JN~LleD@dPMNkP|!&Y;9gZ#Ba&%ci=(Oc;y zCM{U4|Ipj2EU>GSYlXS;+KoN#|5y2*FSyx^6d^F5E}u(SDCj~d4>-`Ox|f~CBlcm` ztZ?r5J30a4B5a|O0A$QK>i$*OpLlxWKI+_y5*N#tO;Yih8v(vS6**-VO&hf^@nWT}+u2^1ebWZu|h6~yuu zbfFw5V4?vrU4XlqTN3n{iP#d0@0MT{^XVSH+^V&q<2EM(|5v+&D*_3uw%rj#Lm6}@ z?RKDmA=UiV(EO3w`IYq5j`=-{J~Q@N+o_eHif*Y)xJVOZ5q&-0#rB6ymmSS zMT3lQfom&Xm^ioB@b(Dyeo_#l2=bo?r`D67B_2#t5hQAE*jX(5I6c8`Q!j@L&OF9 zSS>#;CUinW1V70B-H`p4xW~cG{e6DaTd=ShzH1kIm%5Uh)uhi5z#@Lp<9ByGO!il; z@3aB}(jc!UdjQT(zhv{VNW<&bPtM2hqa)|);tg8=y`6Nh6@p-b!8}JGi`}|PQ-CkA zj{>|FvYjgIfrKeUyY6D9_|t7SyV-$U!GcS$9+&F(XW&mNzO>XlcQnGRq{MAZJbi-n z(Tf?)i=Uk+LN_Y0D>)WcNpxe^zD0qpu8eO}Nz|k^iyK7W0yhz#FItku^ZSU1-9%u2 z=pJw+Wg+jpg?WClBIMCGk{N_?m$v22wM|MaZL%SF*zL7dU53TM3@5xkq%dJEUR{Zo z2oa4F7Um2}Ceg2Oh`W*Q6fl5tURGLBBbJFVaFV)c15}l5W~!8b&hyz-e5xC3;Bl0d zI;K$32th<~819Ok09=Gjz?D+otP##m4)MZFTscyZ6%)EB-r3 z+;~>saRluilYjKxxaccxwLdSK2jV5HE}99m4YVUuhH<{~GK;#bKQ8$E5vlt?SokH?#|y z(&aM@?uDVbGgg*jF>a8BjciFM#P%|B-YcmXVjPq<7KR)1O-Z!CPn)R?b!8jgcwXe7 zd&Qh1g-z`7880t7d7$r1wQuzXXBH?-liP#Fl;Z{Ws4!-`-<9U@;fcIBVB(1;+@{3$ zJnDA$_3$-;WJ)PxdTf}(eBv*YzOE&ujrwXPD;+{N(#}z!8|-9XT9~4;lCnTWM}wQa zI<>wBh#buzhM>2Fmkw#sxy^O{^vHF%AldBWv9~!%<$O-&k}pg}pzhgJr^;C{U*O=6 z{&#ijzxpehS=jbP?G=fqH?M!He+tLz{SFdR`S^4krioS-Q@EnSmqi;G6spt9idH3! zCq~i&#t+_+7i!YGRcCr3;JYv^wNRS?`{4p!Gu!=i+cx6Z9S*p024A1L^{=vH z65#f5bCKhF+C?wRopiT7%OqNr(tSMDHq5I+9Kf#=%4P|~m`T7mHv#4Ga5zSA-smOg zj^#j>8xNvyvk9WaDr!b&Vb`YG*yjUNmGT(gqCOPe!Vs(^(&Kbe`e+P=fpoU?8%qh& z{wJmO3^yeHf@T*e`n9yxZ;1|;mJ3~pwld92M|@s$jl}yYQ+-(_o+kk%chxMKZP)@a zLl_8D=Np1?-9@W01|hAas#gT5XOw|c6_4^8NhGRhl^DMZ>DK>&M&G48kc$Lfef+=8 zIM9I^;&-i#4A=7b&vu*NERz^{uZHss!ZasyBs?CCtph{3%gRf58|?r zVYQjM|L-tjNo@10vIprzo}kH|Lp#pDb)7QC?wu|l1JY}yTfc0v%+m7UvMN>ir+~P0~bNQtm66pfgU`M74~n=Om$4Uqx6ag?2cqx?u@9pWhWrqD=NI%xGmn>}A(fm&ZjcXJ_4 z%a;}_R*Dy6%E(8N90eQN6GSk@z8S9;PjRxz2@#Scw!uD2(i)iNL$#LoVraHsFT$j5 zGdwl6;+R?b$|zJ`pvciC6Hw_GS9D>=Y0Z7Ml~%qwVR6WQb{cldN|l8edAL{cBYJcE zPcT3JTo2ZU#2gey*L)o`O9CdgSfZ+_+s07M0kZ2)vE+^!Jt;A>?Mv7>I?Pq@Q+K&qNQeP%AQ1tYJt(neP7z)`TYbZ5y_yOZ4BJ=y087?hYzoX*Mbd?X5seeKo=()4T|mX>MHp|Fs42_%v!J#F+C@Q z)*rO&#%CWas&Nu>OC3ILce3}DGBhpECaw?m4*^o9QN)Q?bCC9$w0@do5x!Y=e(vH3 zUz)s6r26e=6lt5G7AMKEhx^iKo}H76VcYLi&^f-&$YE8TIlfu-Q6`f>h?-nwwvnuoZHa&uL@?3IPw^^kKY825K zH^nmh)r8z=35FBBqCA49$A#9K7J*XIW=5ZQM~FeWB{A`G%U1!nhd- zNE+ZcXl9Pyl53fMdAyJ|N4M`=f*dETVb&7%dH!Ok9=&dSe%X0!L9bVnleMXv8EVB% z*bh!g{wf__^;`!I#%lBrj53T@r1qZ1G-dK*;_OFM>J7)I^79c=r@GWPgt{|#wVM~` z!Du?bl+xCIn0c&G7o>%%jT-ZXj{kv?MFv@TfSQe9EOHGarp+jVS7;5;@=B7qww}Hs z>Q(s^ZqwGs~I@g+JOK9++axd zcZB+t=yQbVaY9#Nf&H;IQ(Y7dza>hzAVk2lHQ)Df z?}ydsNk${)WX}w=S&%0O1tplY7*{ddkNJ|Wc+R{tvg&ihtQ7g;Bld#nbCo~B$PYBdOFU$0rCZDZdA(Dlo#*}Je~I{aUwM1PX&Hm}u!{yD8&mvz zjT^sx`Tx)T{~o^%Vy5V>F84E3rEPPpx217J;gY}c|*%NofhyL#}JHNSOzvU zw_>c0`jLh2@aDVb3(~z;R_42=SCDle6F6tco!5jk6!q;}uRYf%(4k98~jX{B_{eIW?}%m?d&JnPIwJ_KVyj_ilL`%DM!77>`a_T~W-@G+5S$d7d5TPHuS@ zG*~?|Ir3Z}b>VozXNFMf3MaORDQ$rT%wT}U_oX==!O2CRtKaRg{@A2PNKll-<)-DghE-h8kJqiWU?#p9A#;YsBtL2M+^99udm zz0@=!cw8bE5L;nBR`pT#_x2;sh_QMOviAODQzCy9IZ5 zrv+Nv-CbK;gF}%*kz!vs_rC9Y&$-_i4>0nFF|wb%*IsL`x#omLE1gH5zzns?da5C; z>9)zT2US@d*2A?5Fp>II{@-K%En^NBe)c+c^nbL?;&Nh7O0C^H3Q*Z$>jiy@vL^!{ zY*ht#QskxpwPxO285z<32e}5A6Ixi@c3b37Y`}~Gs(Q%qF{WWLB65#>LqvC$>~Z)F zVyf}13!L=_VHqMq<+h36a_n{7?elQ_;1#T!AM#gW_V3O9_{Zoq>IFc(KAqr~+%yt1 zAF3USOZnn^3U#lDwXI%&Sxzhfeam1{#3{T{MWr)s9 zzg~;PhF6~ds6@5Z?MYr!ZUU-Y-+BTK-9S~0f^UL08NNEtGpj86_`L?3v_d583*ArO zOT(cnZ>jS}co>4|#8*vt_Ra)iMY<@nr;)B51PsU0B9(Wv7!*XabcMS{k=k%v;|}7U zl@n$mAHEN~>N9=wsb8xPz{oz{cf8AKFGd_zO}Y#bPYtwe&aC2fbT)yp4P6#f9TBxH}?Zv}^5SRIzHj>%{dw@jro8wtV^8_L?(f z+_2K5_sj1oN5lWw&yShRZb&Oy!*5Don8vJ@yb{t+TgpQyI4 zwshpr`CO{ur3Zak#e#A&c_!_Mw$r@vP!7YhWT9MaPw1!frO!wu)g-eW=d`(LkhCbR z;XLkld_}s7-y#PaFgyq8J^>3ew3k$#44JF4-Sf-lEAw;5(w)L|Lf>S~1yh&Jv)F>Q zPR-owct?dbM8{L+(}nzq~M4uQNJ= z7UM$}wc=)%&OeYj_I@zG<;0^N@mu_NaC(6*Jz^s@r+gHCjSAOSmGmv4S5J5LrwAwYL9WVnnQ; zV~IhyQIJy+BR#)ea_3Lu#&6;88_TW0DY?fC3j%sdK=zB6p2_9J%I=^Wk$O4IV=$zO zI)oF*(5uMgKW%si9Pg{e_N>Aia?j-5eODQ^%d!hb)+UxQALnDDgU|68s+ao4gx-WA zkb4e~k)+tO#yW}&D`qEyjBhz4x|vPr#av~%l+*L(ir-aMHFe#ZoP^A=;y>&P)?jz6 zd-2wCc_{x#GK}>T;D04lg^*@lAlHaEu2}Xvu2_lU&U+Kl?__g3CRkBY3*ne`lu+nw z2-4P9u5d6WSvm;z{N+Ah`*T{x%xUybO-Uw8n40-QX8h$^g8ipqz?w(VoY*#s{|5&c#5hCDM(UFh! z%XP+A=lRrk=#Q#sTV)8tL3+-%s|{&AzoXCpLh1h6#*a(g!i(vMG&7A0XOTM*5D&6o zlNI5etj_e1gI@mm=xReS1$gxq@S?a=fiNPcSLOgkhrwc>!Vi16+I-koBZHjb~ZuN2xh2dA^<8-;qxi zMWu3YhIdi`?iu%c;F(rxkIUj8!>bGUBKHq;RChN=VGPGKpgM#2Og~a-*|{P1dy7#+ zo_^Im58Vf?3(gz0SP`g0>Y%$lFlnTjlMXDG=CDozEMU)exfV{Z;6(mr#+d^FH&zHr z&f!d^r0^r{&jHMEN?PnF)KS#WY65Q)uR9L(9~U}NYwhhCNcSe|vIWg$35vFIUi4xe zfp)rXaYDu)tJMFD7Zmyg88ey5;DC&!+R$_j$#?m~=e4rCi zWW!zVAtXXdEh#SHJDDQ*OV)%y)tJtLm-#OZG(0=Zg@kfdx(1AN^2}(G6aor;o!H1A zvY$+sRNmXTXKX%W_Z$ivP{GyqR~2gX<&E87iH_20EIrIdAle35_w5^258s`*~&=ctmIe>rxEbTu@lu zTTsRET&L1y(GzJT*=*CP*!SW$qMjV7d>KzI3?0o=ld0p`Q9A!r*9{)H5gLZ;_q4GXoQpB=C?)*!;*i*iiz?LZHz`J zncfsV#zG0^fH53LE{_XHR2ruG4 z_w{V;CZv6C6*v_bKSgZ{9Uc%4LN72gF{B88Gb9*n0tI1wBVab6P4uFo&L;=gX&qlx zqnx432bz=N^_-#0tUET+goH=E33bV;LJD^e3;lTgf;L@d>9D40Y~}^al0YzRqZ~j> z4o#@aN!lEVoz4dI$$};-A;{%Yr}wZ=@$}#$=xV(P?@j+R(v?Os4MN39w~m2)vE-fR zJ~GCIvX1}ibJy99GkAlFowh`x$5WGe!BANbRE zYE=%g*xXozf_hgn?^kYCE;k-+lpj%LgoyH4HH4#8=6`CLztRjp`d5B9zV6%j0Cj2d zYsYSb)KcWO=nTTmEc8P*+y-=lo*vFzQ`XIpDEs>#0K{Ck?Ql+k&|U7b*c8| ze<=O3yAWZ_C#K}6c`d2x8U?|@`hsGQF|g4l8kH%9w%XHQ*Q3Xk1-r9+V(h&yH{rFa+?b=T$_xLvEnVK^RQA~T) ziUc)jD%WXF-t)vaA zt36T}H{YC2SfLjuBeCN4pQ-G{+>XmsDi=@8$nZnqb6L!8GCAr3&qt98ZQ|afDJQxp z5ssJ-I@t(kycA{^T)({iAqX`Skst5m^!?1Tw z%a|YqwI3X1ZFm9md+>W}?X9x|evi?K?q~T~*zzcPQ`&0E6F#4?I_%usv~5&TK9X$} z_m}g+em5f!E0;3mqQz}%V1y!Hl*bY@{^6UKVW5gA*YkbSdUPDh>0_B3c|6(Nd~TWM zQHQyKOwv|Y90xIi)J>`iwpj$ZWGTBpn=}=u9=@&H>z9a&SoU*mWNbt^*>9ACwEyj{ zHx~;54Eo)%aZ`#<4%T>H|GGQ2%7#8VLL(I787dyw0BVo01}ez|nq>jEz$J^1WUo*zzgY*iO@@uug96`-sCP8i((YI|+|7UufN20!=! z@XH%GiyJe(gO69YWzg^*D<1TQTeJn=Bc$HM(~O)69Lysw?k~8$BmoyTRi3okUMQiU zG!1lj{&?6&M`%hh(YG7nX;kbHR>TgKl*RA*_R-+OeBuIuJxJwzOW0KuYJ_2ivY-DH zwr1GmDNa0o;78J+`F0$gN@)O1hu7Z))qNYM?U@8I*IrtNWA=3b*IwiK8e-v3Fj+p8 zoa_<3uFKN-$8m5Sd<0aY-!6YNJ`_Z#&-7e91I$o-^CIm_4c26|VDuU6+a$QOhPkkA2CaaxBByt6}avkOG98 zI%#*|eI+~Woszv{-b5x8rB1Z=UC9!CqSJyuY~+LLzEg_4Z+^`vHIT5ZgOKxH1^sH7 z``L6*(QTe=c+t{aZqWG4H>$ZEUiP|n-?w8V)F3?Y z{v~_3)S#22JQr2z(gNdXmMe(3nPWWTu5ZWeFRmYKFh-}IbNEP}f?a^09RGnru#RxC zMvROm{-B%O8Jn)l}Y05aH>Dy6$3qnM~AhS9Q zZ_!@v*_+IaUPw;gO}BctlZAnT z^UY))DN0`yO(^?L%JCOBvZVs5NSd%k4rc@dMVH?Pb`@eS1%Wc@G-dN^?SQE^Lq)BP zB3oa6UD$?e6m!60|wAfeWRIoxOP&6H_nRL0PiWVty1l#2f$3U z$n6!jDT55*217J7pM03R?(oZdWO!wn-Duk;n$i{G?!32{2AKd#+*-|sRWZ*V&~#^hUemGi7JRH zvC}GIHw@fMFqXU&{`2ma_tB|UfpbB~j6rlgG|Ers(7 zpz(-=qGt*UajC{ktuq*^NfmZjGI|=UrKZM_1jRy1neEq`KW2E>geMN~yj!_-8@L7t z*tufHkBdjh-lO~pbEGWPoZv_vw`HT%)CVU(@66-b^?02#H;!tQ^ix=uYcjNO+Z6#<_!e?sW3b0>(RZ z{v>w&s#TogWJ_81qyPESW4qfMmYU z7OqgTntq16+K%SWwKPY`RYwYmF+SF&FCyy94JA`~QyUfN9P8qR1GsJ#ePU>^4vO{? z!Y!GY--KmKJ~bVnwHfT>xa`byz>e#zo~)n@*Lg;xXn-6?dQ*KW?57CK)(b?C3f~bf ztD>lGe-VAb_9H2j3P4y%h}VbXNs2bJ2Og0xeNBY)ihY|$NIZC*W4w!O*`LZ1NW>L< z%&;N+t2f9j&-Dk@cfhA$&XhBdC_)56bS9IxYoAX0i5lct!h}uTWD<)vPL~el2Z6)B zyMwe8D>w9|1~z}oC6$U7M9ul2Fiz%*4lL5%vo!^oalw2&169G^Y%ITku7THri%0TJ zE})IURsxL?-`KsCsxk25CByP7%Dfa=hGOCD2;|QJM2tFC=owy5%Q>PBXwJ#W?PwKE zN3!4a`n#F43!M>Ac!(FTaimdF*HnD*oCnO5X}J;`pNzg(Ww)(Iz_$Uf&F*OnHXzIKq zx>5*!p)Hsx=f^t|L#47_4Xj}cn+r-t#W3KPm@t*kYue2Qk!;4KR}=PvR40E-S5HuS-KQVC)WzAN zxQW)Ue!!b?!g}cx3+ej$!IchYa{R#p`4o{Dp`SNFgy^meVK0l-e^@_9eYam3F1MR= zM7*xOo&Qj>#e6Z+yD5YG)7c7ZRwYE|i+(>`?WoI3P1VGJ&eaieJ+S4uE*z+asOdoU zXZk4P_h2dTwW1}V9g0Tj64#BolRw+IEYGJkn|q;W2e$Gau2^v;=B(-1&w108ulT+u z^`Q0cnZsQ)i?SLsu}u;p^A>kfn6pm=ur^5<&kJT+Cao06uBRBi1h1dAN5VN9fVD!H zIXHXC1O4I`JO(~@zvqlECulQeT*Pe%cR7_Tg+xTE96k0P@1d#pU*!LB5B(w>K$^LD z8ohoqdqcS5dalhlwdSeg|d$oGe`F1%H|-r(PUO}?_Dk| zMKILf66U)_9LvT+Vx9LpR2E!+RtmK^_aeEwyJN@DhQ4cwfYx=$a*_-L@gF=@R#nau z5(+MPPCLEb!XKDjD!B_sJ+Vi<>U|cL=$+uf3JiW7u_#;FP{sR#>}&M1+GkHiTb$9n z+bpDzw6nBMfYgmNG4K5&M5dsONMRkYb;0o<@TZ}Go#rRP_z*W7&kEzBZGBgB@U@#N zLsZ4!Zx_?sJ!86O%aQyr_Rt|CX{nOZ%O;r8LIqbB$?7E=Cgs&*}N?SX>qw4 zqt0pdj(sh2&4jih*cx)C9$cN|vg^}IG@q#++0_4cc-BI7d}D&gQpH5-*GVL9iR&cp z7!p2ty4NeAwR2Uf9y4hM6TCdVk6EguD9q8V9H?%xUKpOB_S9eMiuurP|d-mHW^I> zE$Vw8!gxQ^;?UQman$73$(-_vq#!YS=mBg>19TjKxD_jY+2yGnGQ+ZCFDTU;4ODlj z!1!r;nW?@zFc52iW)xKB+Lue+dI+QUMo4(vJR~t==v)RBMTcIE*7FaQU}S6EYM6#% zQ@@E@BziTkgLYhxy{Z<%A3Ij5ibrv!b2cewqJ!-qEpB#E`2?OIv+;!)vvT8b)^w|p zB!6n!MaHMid%BmB*O$&SZ#u|)bxBotkHPaSeKmm&x~vUwmc^Gc@7?k=_JotKs-r&- zu#4O~*{fT$?LO_NmI$z%kS_fj#9RWDuK+?Ra`m%iDC$r@IUcHzVJ#F3G!9dlo{j)m zrayY6Z3xc^FCn^Y&*r9czJqr=SYea+&no2={$%WW(kX9TK4FHXE4)o*p-V0?B$%gS z?jk&+;6ojUAlhZ`=7>5!z?VS31TA|_v^}gX+>M`PF(_JXzQquH;l^C;tY8*dM?+ju zZQ%9U(Qq!#k#YW)?U#3!uQM1{u~V&8RX6;;FG9!nd6fAnHv?wmPJn)rfiIX$EIaVp zlW|Jeo#k~C?6OGkr1T-Z%()54O4!leeWhxk8VI~Fd0lYwzOFU$!lxDC z`v-o+cZNVjQa_EqB75~YUdhbPBb0VsbD9s%{%@oI3*6NI0wL~=sfAP>0HA=@DyH3)jkYHi8E+pbbpu%w%P?8 zrKJJBfuIawmAYnD8IxjUVA7NLfi_y=!vtNn=(ODfwJdm%$~2ww>nMzBGCz+f9*pedF+a*M4Sb=0#GN8#qcN+%9aVG++Z zT;bgTl|$4d zt~znQne>LG3N=v4{w+TImEb4?{0w+mMhT&={r^14zD#ZqP8$e1S1 z)%>EOat`u0d8<`V{xT&#GA|Xp$jPAe5oKxah;@1_tI@M4&qYpqI$U;E)4?8iDON{x zDJmW;xA+{Vga*tb+E9wb17s8~7xfojyiQ1roFr`;fwjHu`|BVIxTn+ zMymKj<=bpuSmE=Qqbs+P#N7toOf9mEjLH|jR4)kyt&qMy1?_s8lqtS=_0|W|vE8nQ z&Bn)C>;XQ#K7HACDVE{BUy9}fS=Rcmg)!1Z?Kwq$d+-Sh*M_vWh%$d8@Yk8!U}Nv0 zwxaU8+MaFFO3-XvYYBDc~P@0ba7<| z!&msGhi**Jjb|wFb$_B+oORJ8%Ng=yq}l73s`5p!nXl@_?pc~glO#pc*^^ zP50Ly5&1+yCth+V_9qU0dy6JLW5PK-cbiQo*NL{p8EC7&@i*rwx4LnGi@4^7>Jh67 z?;C^Ec%P3$I4XFo>(_)aqTVcpR7YorUyBKWZWb@6jHWJoZG^8R0n8VF9vz+Xv}#}X zB7EeqB9}*tTt;b)*l1_uCH`nYM+}|k%|l#k=Id`9uN)7`>o?A))v~>$c8UHDER+C4 zV8B2K3OV!{oJ1`OeyU=cLYf_8@{=Yxxzq)S80C!{ptM5*ACuLC0Es~w8Vu&U8=eFq z+JQQ3T~|E}`|(d}7C#2&rn)D~5Hi zC|uo&Q>f;#+E3D}8dlDeI|XyuoA=Yu>(!J;NlQ>5<@)uMU$RpZo}+ToKW0)Jb#VlC z#G%c7y_*xLy`h;qu(Q&bGYxhU4bGj|GxQ4V0)qZ{B(`cCfR5;sn;Zj&rIO_&1oOU6 zA3W0gA-X&-=1dr-+-A`Ncw6 zV%I`!Sn)HIhkTGfhVGS&u_ zN0B!|X<$dpp!)bZL(=?78&l7gsL9w?($(sxKmTxN6h!{$;Z%+~On|xjHI!k0`z-&F zo$FO)o+V&NlIl*3YI~%-YdCeskwQ&>h7)VusWI6~MR9$IP?R$+gNcoGoX&%t8NBO1 zzURuWzoUuX_p(ZvKYD!KtRYhxK3Dr9p?-fv_Yot`eq1|SggRQwMeWyDrT+-!h+gfhG9+5e zOnFCwJI7I5o2B@-Ra7R&1EC#-IeG#sbRt4gm-;_UGL!^_Nv1D*^r+gWIthEN>ZKzh zzgM~(Gs|T)VX571SZIGX=i?X)keNdF))VD~^g_AuYoTtX{t}1T(QZLNCIxd(HOw(B zKLdzjaSR6r7&}giwW-2v%7`@v3^lAVAFWLA=zoWz`xf*9IUK!ZZjj*zeaLJI`pe-i zPiN-LgtF?FhUoY~FDalBr*lk8J9N$?GZL0oTtjby*T#Q#W|2_n z%5MIuD=}c&O*OHHD&w=l(uWAsn^nV{zu~25ajDvO*c{CgQ*k=^Nhv-gg$Z z814*+4L6q!oL&cs%+9qI{hJ^9zoZKUA)twfriS*G?AgbS3UDy@%-iw!fc4x~tDujB znC7r}vL4g$A&gAY=6DR}QrObqoXMf(fn=aw|x}0H_tyLpX)yy^!I(1-XYo9Hc zLBLNQuc^GUIt!jY+fLNV@aUporz}YYIy*^@?uIA!4jQkMU*MaeaFao_< zN|lJEdL)4UeWzJy1K`%Yv6Ue|aMzR7qUn?!J}o14cYE+4QZjL4nHaXPMc5nSUV2B~ zSkVhH{M(dJDoMgj*+?BMdg^W0?QacmdOfLv zOz_>396iMCg;e9tV#Ab%65K!pv9&0UBC!yKO9Jw2^eorp511qIdQnOZ-OmTNg$Y97 zy5py#;~S2O1#ngydEk_*krWunW)x5VmQ|J&DxMF@)b^w9I5mK3W0d?tuKv1{Pu5HN zR4YRK9)(H#`~fK9yna-Ft84R2lw1rMOZ`jY`VBi1*otp9M)qj8V`!)!qalt#rkcku zBU{?ohYNxKlf3xUS0!%nX-I1FwQr^^Uy(1cge+bYw*Ygb1+m|z5&YxtV@STK66ycH z_0#k#dnhI9Yxo&Usn=W!m>s7_|3ykk-aR*va?ZDfDGQ~ZG-Q}i@Kwf6i&N$iiVN(sbuO)kBTr@eH^t~_$6>_*)zhYmy&|u!gziI2)LU;Zp^jLx& zlZ~5By8L+BdvM`Nmt*4)g5X@puZ& zQJ^PmuG4$r>TEk(wdy1@BVKfbkkO1J#F>37_tzEiEAi`^KNe@oq{D9K-}fKl(;7l* zrP=d#o)Uhwmqu$t9<|0toIL!_zKXVY9nmt^V<)$aNy#ZY8x+p!QDLy@ONdr_&|>Sk z8rwOUDSrs0Y<^*SQUHQ(r+)3^8IB@Wm&udf!6Q##EhO01`7p3!f8yD9>IvYF*o0C# zvS3)fd(|)=N^!ZdJ`vT{^^4*ifk8^BqWf8wQPgGS(Btj5g49;ih87Uih% z#L(6KI%t=Y6jEM~y*jM)kO-BS-_1;jNx#a3l+H+g`3`CRE>)`|U8pHZQHK}e#C|ls z{*(>v(_!Aq7^y^%I^P+Y2GVmr;e8DpNx#ZU9?wHl>h4E|JR)Ul4f#|+Z#j!N+K)U| zqlZ7U)IuaBx_SSk@2q2n#w)mIXx?6Rmg}{?r-%%l07GJi75G0onh`kg)v*>S58UwWsKbSxT za}!X0qI;S29TMwCYhRRz2y;OMDW1b+e(SHq_}oCqr6m?e|gz z9v3;kVIR~zPI#6aI^R^T_b5$>d_(%4#S4_MGW4OCv#iuQYV zfcjFF$5Ep`*NEfQc_Eh<2Euk>y=+z%^`FraF$t=CE%@z20qmi{4y*3xQverUNc8be zEECR^<)MW`mm~ic={4{2>J*xJTV?O+hL03M;Y$SA-zVrS>M26?=c4vyH}S8maCDY5 zmf}r7B{KgM;hmB1!>iryJ#U3xq198 zdho7yl`Z&!eZG)RU|9ou0sjh5Zjc8c15DTzjk9J#b}9={+z7r9_tQ`wOD!fx0|9gi z^CvM+&r88M*Cf;syYk@eRUe8%V=;e^@n#CO@&*b-l$?}COAu^9&uI`O@2o}&+YvZW zRL{;>i=CXFjrLrTpR=aNOh?Z^XNx}S+_L17q&=y{vVJ1rh=!drCh_2T{pr)6AA{g* z(Ez0h=}~9Y$5{<{^!;wMOz^z;@<(CMroEzcL~v_^*0sx?|M@$-tnJsW3J~_$-EMps z%xu^>rH!c7bln&`Wnk*d%Ku|V35VxvRvEGL_m#^H? zmFUP{y?Y_3@rFYBC%;_EvpEpGEWHAE4BOiA(z7Ia94V^wkXoJ^?3)vL0AEBG45Hg9>dW4DM71iiXMax4I9*!ktfv08jx(?m+VH}P7(-h3=c3C3FakabQ;kjMU!t_em0z&xL zo}H(DSsR#hs4|vbE@GQ6J`aiELf}90S)kg*(0t;UKtXqJKypSKp8V5uN66f}%O{>h zoL9a`Gk2G((O)W>6ak%IPEbV&l4>kQpCMYJikw^j?v*+-82kut3r2(h#Iz+HXVY#T zi5JDP7BOA%@; z;z4KnGO7a3RMWz%)eM+H4-N0Jk0&Iw&`@%0+paAETS+M>M9=vS@U{Io zQIj|gStSg;et*AOLz56Zs7Xa6=`E*n2A-$A)v9Q%Z~xZ4Lfl<1)jJ#o3+F^8yJnBv zp1cX6cQwM{Tt{CP$~fP4-@trEO)mX~heb@i(Q~CCp#TW;?RSKvzIVM9mm>g|L?3qs zH@8Ic16*58z_X4qqx2}m)($yVw8%E(Ok(1zAid|QoY)6 zP32=1Nk%oBBJYJ)yJpl@qG9PDVCGK*RJj zw9M~S4||ht+5(L4cI6V{NWC#@AXjEIg!HcQ8i9q^bmIFum4sBU+4+)H8IePIJ9@k# z8xiHEw>e)N;O!K#?3!68A+5GEN<}>RI%s%|F{>Xht6&>FauOr4V;OfK zbZWMBf4V!p`#YNj@#&inDmt%}YWvM^rd#(r<}Q9;YLp+aZUwuDfXQ4qJ+^d2bQC5j zOJdlGr*Veb5-BtvuAtaWmK{z8Qktwd?yWHPB60CK#wsb`0Rox9i4Jn2FX&x00^yjl zx0Ihr14A&tu~(VroK7F70J0sX>oJ|OPv{f~C!940YnscrmlPO;*y zea|p*s-`ZTnU)6*|Bo<0orSbU^|q^R_J>xcI<|Ge_{7e(c_}Hu6_PndB$uM*^Z>-!7>|_+Qrr zy)pSH5c~px=6Mky6wP`~Iujz5SmJ;B=6NU_+Iz;Ez4-}WbEPFFJQ3Ff8P2xsO>GbvhP4Vv?2riRV} zb1RLxKgo*cK-406w2`ltq%u zcjMWuB*=CRc=5AFSPy9R>x~Xvom^_JY^+!$d75nWB;4D(9SD|wM`TWP6yFOXJlRVv z(!P@4=ega__o)V;z{-yTFCy0B2IhS=xGVrls;}XHL3-< zZyHXv_+{hx`mbTG*-O}e14G-Gq3KU>oFkPthF!!1#Nt^z+uw#&_6E@do`W1fAEH9! z@)4=8##L50qQe*?|0mn#mXp>uj@Mo|$9MaR%BF4Si|g)~T@KwEST_7rM2fj@AdWCz zArOuw>q+s7U0Ezb1SXiKEI&P?LKB0kG=`$*!t{o7hz zIM02l8vWIM^tZh-Ea(HvF~q!+ZqIiJ%>t8Ze8;&dN0|Zlec<4rFvYBVd9m(>%X^y* zmpVeFB~`~XA%#%BYnDj`A#Vct4{*c>07w<{eg~9OzmT^}FJ{ zYX$msmA|7=B2o{b2C2#Xt>}u^I!{T`+4jQo0GF54>vuqAX9;|qPo@tbkDPx+OGIns zksl~8x|xBKegAXy{%QN~H|@`-?e|arv5JT*ok-KSgE@lW*7LXX(%qHq^n)27fi$aw znTkZ7v_~(+j}HU_S9iebJHHpOZiFx z2rQ{XRo6#3PLRTXi21>tj2L}U`B5rkspT>#s4G3I)u)T?Yd?|Xl$Mfl&Pt|DPri;P zlI^OZUn%(kBmU5=*JP4!^CFAR-hW05!>O?_CHN^QS@>GI zGUi1uoWXu(Ru#^7Ss1#3e#XYRVt*k;QQt7$cU^Y=Ba%(N z>}%|mUrF^NDZ%bE|Hy<8aIiViz=ml#cVGFXc!0csDv~#q-%j{6M#Q>8Wxb!Olx0CN zx)s0l447%n1Hl_0$Q!r{GhrDh((1pE?B!vScQIYQln791Z+nV5&eOa3K1_fHhq-U1 z4~||;tax=(<1fUG!>q%CF++UN7ONA_Ds43OcI{+niM`4r8 zK`xcW*MDmfn^UX5G4N#+sYU!8-dH-4o6W zQcCH;z!+vX$`41_9G;rCc*{zR84IDSWqi>0nPs~X7cm%bGe)X4A0~e6HQ0MSLx^-3 zZ|goZMLQxrrxd>%ZokD^`SYW`?T<03VeWs&$ZtTDXPcb#AP&#pW54GF3F!t%*4)}; zP71K!WC+w@DcFb?39>|i_?gl}H}I}Kjk4r-v~)zraG3X!6$zA&mV>^p{;@pt+%Vc~ zrcmdAJurqMDa+96Al5UxEnlM#yy?3x5Z~;u9v&Zd(=_$4Iq{RBRZTRC1;(pBqP#iGbcByt zxkfRH;<2M!c_-E$?N@&*us>@UaywdYa)431v9ogvU=2spNMvzkr>)0fPW9gDa@ZpX zpR}ou;Yv_vxBo*~xN~40qmSl=e^H2$$KX#=4_%lfYPq{L#lJE!=EGC0572zujua*@ zww0j8EiN9VhF}gpx+CN6%QVHao=9O*3?f+zd;U2umWZrx{rT}v@IR|~Z9jTt03QI? z)pm!u&3Weu_6q~VjDsg40{2!5)vJOvi_Zrno4+TKQY{N*k%1YQgFwlK@aAc2I?q_YBEi*$M}lpO}PhOYH@ zA=KZn=jggpPbl~S2~&iqT>?s5RcXwoIlMgLdzsdKq*=P1V-mN9*eyRlmaKlF|7CoG zzS-!>#j^fvvdDB&Wy{bn zN!VMW#=nM;3^Y((QUm4+5~<0-@gcInoUA`94g=?|$on-? z7}Siu_7o9sb_gitEr6Nt_3ZOeY{&XPso@_?qi>x)QMGLz02L&P**Jo*YfK0YaYcS{ zZvO#h`^E$6s`jf+D%ozrC-Xs=m zB@e_3%$0Swe*P2qN-@+w#Vk4W8#hZtR7~qS9ohu_*C9^%q(#PjR^`vuH9`@(8(Ot0 z&8%SU2%NJ*hVeyZuIw`Nmd`A(*ezmEU80QYq) zVu%_mJ}gWT76ywgNhz_`a#VgDA6ZmYr9EmKIoQP7S*_rv4=d;o8GLTIIC%PV>H~Xo z3SjFXni!QCck@SuAzS^jbNaq^QgSkQvF@YR2FO|tL_m-b7wMeZ?EEt>iQ|o;r)LX6 z-XoOp{`=>|twX!^ySf&Hm|9$VI-9<}zCD67HPqpU?A00~>0`+J*K0(ebO|f19%y&1 z8-SO-DF68wfFOIsGLd#Z!|`_Bt6W4EpAaufgYoS^n;+fA8f^iA*JnQHXVj8?P8Xs? zBGwP`K2N(|zN&>23=IrnUJe^mbHp0@*d+?SN#rH@D)U6UHnXmyo#MeuSuYS97Ln{K zQWZ&~`l_8;L+godYtu_YULTV@MlM=8Ohu(a6}Q5k5ZQsKl20u4^9-~G1Nf|#`_0aU1M#`tYa!DzKm(_<%AU$PUTg-_MP&q;McN$DH6NBu8B`cON5WFX;rNN z?R}^41@FC%mNgpAclej-BL0V8=n2FNA2kW7={>j_UQThfKv)HA5<+VaHr1`0JQo|k z_E?%LNzqk}(m*;-S{IfmfrL0X_*}dKMSsMs)b!*&-00Fj(XnekHh1z zUm4`sEJ-Tecd~$7-(x@kL&(kR-63qTd+}=Gv62A@TVTs5viUx>+*VZ_hZ*XBCf z+RU`?FOQb(|LhgBwX>ru8#Xy=eLE7Cywq#=cYVfcHgK`8;^**IFkYE;s9%%e{K_ zisjw@T4w;ODW(+Bw!HIK*FhgO)ag&%V{)fPnv!z zkFwbWTB*A~2Vd=vTQ6CCn9T$!on|zK6sR5adl7<=C?>tf?GH!4g>!lF%%kY#B4P>( z7_RqFU+L&5MpKCu zw6qwAKioRJ624puz#Wn$F)RpVP6(k5- zKPCTh)m#C`QMY!1N4^+}q7=~%5xH&DueKP<#>hNOdtq0h;~N@S-HnSGyip5_rI-I~ zUp1Qk!o!OUdtBnrF1_1UqONso7Q#C=#~34}7fd>eOZxu^d&{^ix2=1aZs8V?mPWb- zq`T{;Lqepaq@^1PK|mUjl5Q!HP7&$u?(WWaagO`>pZ%Qu{N8W)#rwLhwbq<%@UJl7G3!!e37$mN z-``yq2$XWeDpQ&0WO$@j+k@+P8Rqkw?yltHZk)G9e%nl@Rv$a)-9p&Hl<7uq&YSM_ zg~RMM|LIp>!zn8H`VXG_BciW>o3W!34BUS&7uVe!b;m^oD!B$AJ&JzNJeQW21r3nZ z>@Q>%b(LZ5ZI4!d11+S94vQMaWP7YQhRpjy^inMaRG#D}-pn^PKk=KQEV0|cMRWz*?u+XSo9 z2FO#NnT|S2}vx9@f%2F6>{KSOz=0HXO1G0eKg74O6myH36$tItsqwEi1 zVQTlUl&;X-+JV<_<5IoUm6!Lwp92N^4F??V%@^pq;J1!ihF(?HQF*Ho&12+_TqcaMmZQkMpXUU+C#SY&)n2oT8Yoe~pW-?&m9Hq(M5XfV3wZ zq2wzUoCAtjG>)An<LEFc#Sfpb!f~|9E6y%85I{O zqSf>s9lr~F3Yw&(BxiReOj3co+6{_6{(XT{TY^L~x0O$C*Cj3H+e2}(t;S2>*xS@_ z@$d@zybemlxN$v;o+%_QY?k}_+?UktsitZT=PD zbo~Xu{G`ZN5c7tW_!XV^rt4fv6;k*#=Dg1JZ%Lcc(P-T0{KPe5Sz#A99$;QDnYVVR zn0OF|O3HrJRQO_3Sma5hT@-2i8>qwvxv=Pxuk*Tp=6Sv z$9IKfZbk3AOLLui*Rr+-5qg8SZ%drEMr0#+*9cYz5k!$EC4sT^Yxnhc5c?iNCAJ?Z zZBZtH@Y(a3OCD8j&Z%+u*%;qWc216N38F0>if;9sdm#hiOtozqc82@e4*hJcv)Ub0 zn?I2%3Xu*(O~*9_Q-uEiik1(Ldywj@1&1q_HRrngDDKp6C-N1l34g=YRw0M&oB%I~ zf*Q#6V3ho@(e;6)81EeTe0k+rt*x!p^z^c!7;GrjXt9U0uFD_c22m95ZufoAC2))0 zg9*TwROH;EC>}?@X|uIT4KUS|QU!G0?ak0bCZS#Ha?MA-gtj~)M4o_jj<3MV zC?q9n&`o`xVt!);j z(*Xc~oc8-8VJ+AelqEHL`!b?11!iLr8QmXt8B?|r5`n+?Z+bZUI~hysDM4Ulm}cbX zpI+wjV}m)*H~tl2$%sR%6bmktlmZeBH)8+mX)ed- zSC|nZ>vgP+7h+>YI#drp;>b;mhckQ_PHNlG=e7zv7sNKc(|I-+X;c!9RH*;^A{t|n zbP=;Ba>wDR%Gc-n2^D>O@`Q217YhL_X-{^RXB_)Au3H4Ou@2XgZM(p=;$09y0=V;R!zORb!tQux948vV9;L4kOzs!a&L198u zgC2|4mnLlJT*=7FswJeZEA>QzC3bt(>wMPWX|~J$oZOqXyC}aW7=9QWC@NmpXEq1B zpMTOiJ*@{GdLmCUy{Ny#@qbZ8u_O^0R4ChaUw3y5fK_=OLDpZV+8-L&(!8(oYxh70 zuw5-K+!N1~D8=_O%F0@+Zji69^hyvVXfI@U=Nevqx1OX74Gkr&J_MaS(x$I6s0^K) zD!ZD+o(&`7>!ov);W241*kfXdoZ<5w%sO;459KP8m6li(1ked_jq>+9{x|4;{v5Ux zfi5E`oz=}4YeV0JNysmY`J96^t>PJoCu?Fh@m9>C8pG9M=kmPakfUx#Ap z)nbKX@b1BUx_@CJ5FZOL=wmx~0i$tF;Y1n}39g^_zE%v?K5!6g9!E`5-IHK9;mi24 z4*;AxC2o5jJTfK<#?WAKf3gT6@t7xQ(pVW$Q$Agbe#Vc~k6hjx^)sl5S3nr!ze4Ck zsK3WKkn^=1FG~<{C;~Rq-$xBg3kp`Hw4_)iBlD0sC~)AXnt1hA&;X!f8MHODX;j3)YtrvNuAtJgd76U z>8sXrlqK}?Xx%DH8nzXw4!qiK#YDE2P#0O*(Dq-yUd|z#V9z@C3m%pF+{(xO%y_8} zD3iL*Jc#9)C4{^VSzSN;&tuO%7be$Qjk9o#=O`wR%t0oI5lSBR`dB}lK#qaWt@9-S zt0z|WZ=F7gn~fLgR9Fhqvi={>bwv&V=p81*>Cw_-xpBw_(m7bFi+0S`p=cWU*mOi& z+7J!8gn*F1jrpdgu1YbT)Ld;4v%56?-#E2Dmd;zGyY<+_}W}9tZEQKp;LT)5bAdG*5Ak|FsR` zWlHTP1v=UiKE~kVPEUUe-kYpi=j}CnCCvZhae~Mp<#4#=A0Hs0+r?rqqW;Z7XzZs? z8HugLXgaDOI;g6sWYlLsOV$-dCPdHqFL+w+QQ`2qwinpn_dFwrownv)?GHRB*DE$? z&;|JxK?lmlA)W4oxwgc{*WcIouC8)T_uQx1b&iR%t^iAvKkSjVJg5mzcP4 ztop;ykZK&)`|xK3pXxZoe^r6)mfJNT)-kp-S&^-lt5g8`{pi(9Mhi3?S`hJNK*aI{ z_Z|Vnnl*gyKtr5z~ z$vGri-QV3Z7^5#?mzNAMV=WPt7al}vZXx^G0isyN^55~#za3|uk)1Cq8UcE{%R|Vue>;2`6ySpIDM7XdDeM6|E=4`k6`OOE~~LTc1S9DK}#peu}Bfx_jH&i)-K`Kr*R@b=Y7J| zH4D0_roYF1+jwJ`YdNs}-Fa)ppB;WAcFKOq0X!EM%Exy|F^y^f)#l{o)#-?M zN?bW7i<|E0ZS86l^_UHfcM=^>rpF=|@h9p#ECTZHUn#vx<9Kx=1aN-2gask=cm!2@GMffF*AG zm_IxMYW@(UK(bdP^}aCDa6qO&Nm(IBYI&g-1_TKPV+R!J{QP_tBQZM=n)zl#Az}&q zCNx~Ob9!+!l#H8WFKy$NmR@TRAs?b_XU1X~D9Xn(sTj{9Dh%f;r4+3BSL1Mek6J}| z{Aj%6=g)j5W)p$;s!(6F?7H6-Kd72nwLOJCqrsv<6<3w6_rHMeNhaSX@z?iMAU8pz zd&JjuK)xVC0&J^wB%Uc!k)+H31*#BA@4?;49R!OZ^Tp02E$`Ls#~XTIN*)~@sUzXE z7XWD08OGX3hmOYteY!{PQ1b!!u|<174mAanx_3H!6!8w(D(A`keGBxIfDZE_7W1_# z#TZ}1rdJvop$+wVtxL;ZK^FW@GOf!?60L1kT+->LB9q-+BX@1Djjfz9eK5W!#J-q? zFC3pKF|7`(#SaY?vPH!pTE4MtoA5C*szPiXyp+Zdtsi^5NTH8-0fj;vu8Y)xzYAaU@y z>@oEVo_~4S@ld4!0*bgPZ&D_ZXr0^Yfcg>_ViJ+&bEk84zRx;1ICum=^^1o42i_Bi zild$@8ln;Ee1WZ+h9&3K=QSito8Q9`EBcOypl)pa^Ot0Vtz?nGXd2ae%ecm#MMIy_ zd;W3_MpPc>hW6x7Hd8WdBl+UyjCm%j;oZ0*d|edlbDr}0v(s)>wB6-u{IO^j zopz;6SL2mjSJSQLM^t5*sEvrHG#S!Zvh`aGrKJ4@^#YST_Fq5K?#(#T7$!3eI4t;O_I#?Xg}} z0>naiWdWm0PDJmEOZyKYC>BEI5jglxTZ{&cGe%e{yVHf{u-npPKK0!m3oEcyW*SP8=PN-m)R> zJ@RghxjHKzUmq+C{%#i4Buw_xET%PEKKo=jvXEC~L{SDR9CnCrKo<(&D0)`O8DUXc zWYuC{*`ISiFo7Uu;45Y^4It;r0~6*!y_0>A&M-&?v~OzwzvHkNqK-U=HMM6W=J|E7 z*tR-t+vJ?Qh%jvDD$&x0I;U=)>Xw7uxNzwNCNLGK@}sCIay281J@u?5poTCYVD=?a zya%JC3zsFTtZ13FitK8U{D)J))tLSPh0vgd4Q{hTUenUNs2iP%nLT8~Mv9$<`Ms~~ z2z1N%-k$jijfYaddAMQ+&M-vtetwmVC#2isyIQ;&VK*MZcXm`?Ta{JnPu(&t!a9`0 zIXKYWnKYLl-J4Y{vq^5tW;XEB_^SM(WwENQ>7M1fBhrP)iX^K&3sA=5RhDKLu`hTQ z=QEk@o$im470Xy%4<)-zmso3M1OxVJ7erqO99%53RHGg~^ExsK-AofA!=C(<()q|wbu}}ydU?3Fqu_bit!(e zmR^P$SPz5P+*v{h@OJtPdTT!bdO3W(Ei&~$jX%1(-oN%Fi;{E(md#;LRCN={RSw~Ih^KP#f76(1XL-5B)=OwU7tmpVrxb%Mmes=VfrIK z`$@8<5FGxCGzKAzsr75N{AzQh1*Tnbi548e*-nmUUKt`C2^`^5BIQCyZLdkGvB9$v zeLR>1?p&#pKve8qPiorMVZ&;|F`BzMJ>L)S*3V3#M7$DohEKSaz#K zrFfPz=MXl1D|X_Wm?^o{%Ol@)ut?gkod8Qi=1u4EE7Bzq?rR!}5EEp)=N#D_+L}j! zw$`sjXtD>_|2k~kz_GK7@%l)9()uL9dg+_qT<52^i0Bwf^;ecO zx`}N}B9d9z`Dw~G{N?)m`4_u9!D)&pvAt7l)*d5h$hyGw22Jremp8S0~A zGb3z)dGGwqm}H*CPQYug#HcnX$?O3T^`uR&^i@2hs$0dQzg|M;@*3mpm(d$9iRO38V?oO+s(eg!vWlL-^eJFCciTrPjZHL^q?5ou zSL<$P(w|+o()->k!PJjH4UFoD=CM?-x)`jlF=5rE{zOV-FtVBdecJTA+3#yyOaYgn zt0=02rLfOE^YvcMFTdmV& z>xj%JcBUoZG?e_zRNyEC8=le85mCoX_csSpmHV|#sP(ia85vqyr1N5*)nDf|1JxBb zU94Dsa;C-+@jhLA77T4>rMqtxMlsWZJJ;#=NC*%z^iG zKLam$=Y1y6D%&GVd&~J=R$b&b6g>dtbt9)Aupx9KO-goj`m7PsYyW^Y1hS?T3*QWE z2czBT)?J+rbH;oICBU-Wv=T#GymZ~tm$Z)w2{Fy4i^ogiLV6S?!F2krNMzVUgs;AM z9Uha`JKD01i%7o&r*@Z0G7H4rB8XDeBxl^DSy-8-5~MYYE9LbBLU|7?GC5>@cEL<+^5?Z+Cs1A60-U>-sMp? z2!qIeJGo=~4d$D?K|LfU8+n}Cu?saXada4YxxNx3yyt#<*QCn|7aPKX8=MQFyo;9q zLIj@(ShNYGyT|CH_QPRpoO$iUFlkhARrkVXVA^ih-7U>`dHm3xzBAXC@Vjm&tH+On zCC+fl(;*Niik;8!LFd7iMR*H{alFP-GG0RN^^+ujRTW_+(q(~N=nfL`px6TU5snkc z#h`RjsKr`?5>&tiyPx+^Wk1`SE#w=gAVz}~-OdOmjRqJ)e@feVF~5d%`C(K9jiwh+ z%3)6k>=?m{o+>scuxfW&Pim-{1>IJ;uRrZpGTaylQqta%pEww^Ma^O)TEKFvf{b7bf}_^4iG2$6G71l9Omv<7$Tcje{dH}oP8F^JS8 zP2GCFgb(K{F50SbC*?5TZ==TVhMtS%VS4%qbQffzEA*EeRQWSCYR=Ck(!W4xiaux5 z$Jv?SrT3WX6CvLySkB@Ummh7FBVX{ZVv_9*!J~OAAFik5b|I^jSVf3F9wD)m^4FD?fb7ZLYJML=E>pEi&s*nEzNEI{g`(2LV*VRTuQp-67mz) zP6hD5#f~0_T=v{abo8{=?!g|5LnYs34r0uEVw!xS$n4kaB7D=I$(j}kIryID_1)Vf zmR6jAEBMfS{)+}aIPj;Joc%R_;&Agk0$}GdxQ)<$;rse#Dn=R_NmMyrUS5U(1_}yM zQ{If8lJs;DHw9C3b9qob1_Di?KeoU_z#XPs=*DNN$Q!a8W$1pl=kc~lU(QM{F8b;; zrqW|?aFM7&Lho`Q_WmSh1(PVtFw>314HsYG+4QNibT1)o6{yD&8cfjEhb=4zicBVy zIqHDdTnops)vnoUUF)qnW$&11z|0xnsNFauWJiT|?9l0jg;SYi2mbsO){rG}sbeuI z?K(dX7dz=hWR)IfXwn_eaE$8x3KBd+IOTCH@Y=vct3??tLk`cXtNEr3naX7+cV#&; zd*`@c!z`(W|5=BbQW^^Y{dtCX|NdSt!VmehQZ3%IHZdWmd8(%9J74LQQDzB|Z7-Xr zI7a`w+$rMqI2~pcx%_y&Uu#kkk^zbYEVL<3(GP+g1Mx#8|KYaILe1i6Ah7%<#1ne# zJs}2FE(69;U?)lgHiWLV-ej*WsSPZci=m4B7UWU<$BD?&Rkr()7QD!)bZ{N2kF|W% zY^C1gZSjMfT74$FP6%y@1YMxs!5jk2Q1`FS%MPpJ@Wa-L=2nAy+ojA6UI&e3CO(Cp<;UXfK9w?iX~g z`C8ro^$>$DOx&e5k1E1$Li{)Is8#2LSFsR65&H#cFZ%^^*SOuWE{(BR_VqR5ID~!? zqG2HILG%tU!Eimn$VN}0B__6?0eco18kh-3z(7P``I+b?Zd_(6cG-VK-qKTKp4p3tS>4I&Br z8t^9F9X>>74nVjOZC(shW!CK=Zc5Z~gG;LLc=auewN7^-48PIW!f2H$&ZV1K_3S6U zB}$-H2vy0_LQj(FMHl0!VF5G7=i8Pp0cm|iIw}vwLHO-m0B+A=3$S3(tO(wVt^VS& zh?ddTPWeRPh(aiF`gEP|9$;G~By z`RTl}zq}zrm|XFL&rUL8-X+Q;*enDWe~I7ROLR+d2&r{=UDqPp?l}XXsCuvzW|+2{HeN+O@;05s^&p82&t7ZCdf2t%Un>b}3@EY+@ajCG*R55@R7lQ=43@?%m(@dTr`?f7cf5Al4NJAhO6}X&ysbrp zx=~4Obc|`5hIA-A*~xB1pflEbUKG&qsu(B*$|y|p!Ms;@s5m2&QIoLB&vqNHo@WGy z^<*1^j&d_?r%G?rO<=1|Wj8(%CPt)7Y+9dnin&XjR3J55U>hsx>K^_DzIV?o@i47Y z5qAbAmWvy<>$40_YPeM&h{?z0kCcQR7nCAEiOnpx_-M8zRn|x5;fq?+P|%ev$}fj< z9`Ur$C8T1@cN2hJmi{k5e#$2EVe-I8*;hy9;NuDkMlY{FQ~pfdc`yQ}Xp56be z)^5G(FE}-TAbbmZGkX1iht^{bPB!3cb>n?rZ4x&p6j1RYmqJZ4pI_AC4>cx03HpZNR`(Rl2@ zpridpM^{0cnHRD|Y|WttcYuJjw7$REh6I0&5Y#-7tl^-Q_Iav<4MA*OTGE??tuVwz zS|Wj{FjU*9-3eL%^c;V1_W>-#fsY?nI38VFnBXezDdH1K*q*NxQUp2+=pH81-=ix1|?x_gdx1K$SMlq&p0^0L&zoOrzTsF2|gO1p3PBc8Gs2g6# z6Pks5_|yYshWT0zpd^hMxn;t81zWc8MLPA_UUB64 zFVe2YOR1wEEUmOBuJ0&S)kgb^8!FWq&UfrN+>{CV)a~t1goPuW-S#|hJ#p*w9e_l! zXpVM{nfA#wCtT{7?py*Xi`W15xQu1Sjrorl3CiPOG3E!0zCh}XcDXs%goe(iU!nRKc_3VDyPWRXSo9S!qwZ~lm|{Ay94Esd z4pP%f4^#~>k$xUORYwawM9xu)`wC`%%71(vSz08hZ_dbqM-VDr>3B-=^WAQN2WQfG ztgvlr0M92kOo3l7_%ZrFE;4ZTfB9ryMT^1;d8MwdkzXVVklf|xN{@XFeVShLk#h5% z{!D(e&U8gl%~E#8ZY+=Hy8{w89LS5sdxTE|BzK;q4U=JY?0|b{-W&$6WE5slIQDI= zqt1Ci=z1T6L0HbKecOM9cZvrl7)}Tk1}c6gp#%>#Zpr8Y0-T~RoZk|#THLDTY_LV! z)~kG;efP4hZSezzI1C}rMXO$=XH7S&JX@!fMGP;)-Q7z}fwRQs;9mq6VsQd*qNq9|npxpL%27L((kNe9DAd&;$ zll0NgI@HW%CSz*Ew&D#oENmCki^|C#DyHd#-9b|T5iHWv7o2PjY>ir?(gu07l=ZQX?XKVf~b7G-c*0u-Zf&8HLzdC_!n0! zx9Cnw!$22bibPof1HJpPB8?bfh1xe{*M#g@pC=>7hON$veAt#Mif_b$Od zJ8I`_sl0^7h{*pc%KMSiE~xmT;RbAwulMZMQ9xu!p=&l@10LTPRW{>$dFq%x3AVki|YG|mI83!|viEk>pzJ6U??avxXXV|K{gATx4o#up<&K?ES^C5nSsUe+LINy;Ba!c>_={Co}Tdn?2#owUL z(Zh`k0cFLN>Vm^tT#T8{$%=yapHZFwMIQSA0)fPq@m*M*!%y;)VoaA|#m-DJG9qT{ zX!BDFHs}^r{=n;J^~O9r_C4a-5oHwCxNf~=6GBH4#pzep5cT7$1I3f9a@E#@JDYMAv{vI#?Up6igV*qtWwO49NlwMsPt#Xv)Fgv6Z z&GLsrS*Bc$zXL%00{g|yw5{slv~5)fHmH*@6b6CfKExFPiudyDa=mI!TcSH2dpcjO zxO5ruPgWFx%MfkOgsFM}#AC(De7g^Y^wJh%s^^dc+>eS24=ppfsqS(%#()TaYt4M7^LFUt^52kta~j(QxW7oxxyhckfeh@EkDu2oz^ z_&-1YpMR8~y3&Z8ZVZY8TMRz$^-hIpQpPH0OSG>r${V;5aIF=?v0`!cEqhbbx*c|g z!qfPh2GVt^e=v`}q6IchF3$@)o$9wKm2YQtxozi2cFfoA;KQ58ieH)nGw)D?hYRmv zI}W_H%h{O2bZSpYlYiR*bFKbjbv66MIKmi$$gtf(vY+oXT+uiYqv=9c>; zhmL1*0N>Oe$;qY()PxQ#l>G_)|7ovSAOqX<;(3%W8ET?rUSKzCLU=IQ*)rOf4+uo@ zB6|ZFuZ5jD9R?p%mNKo2YIWxGA{d$lV+ zQ%bfg_1xvq5xPk;_xh*UjrTX0!YFI7G@hIM@*xox5s;X3MF zJo?YgNez)e@We}h@Jm~O`n`kisB| z zmaXsA3XT|DK<(iYMP< zAdU%ez$*q#HX@V7iWh3#Q;F$&mVr{|m`PuMfF{mCA&iCw&DoIyYFOksVd?Gw1!`m% z4k}jZC<#R}ET||C!6!R~q>&*HMPsnP8t=jr;vh&0?na5uMyNdS;!IXy|b@UX#`HU5jZxnvRnH zyWYF_|5xutg=!#Ub(IvB3=sy=Jd=~CFvA`$%X|&-8I)u>H@w-aQ>}E$Z{sBR6h{MO zRGNcIz88);vDOpmb6B86o2ZmRZFA$=;>CfyXWSCTs>#{Yt+u>e=aP_R&_K;ZA$m6J z^K zN{}CK0$tzMVJW7=x!;3Y&oReuw zpT`y!(Lxr{?J15C!_$zxfs`Q<8*ZEE+1qQz$6X{4Ki!7&)PQugqs8EImm61k0n1y! zdHbKbs=sTFLjk{7ul?5fQgS{C1#p8gS9J>xK8>nV@Q;Uiwg4f0Oi96HfgPlm5~N zb0`;gCn=FPB=^$0QG(Pyu#(Mb2yC@LrZ0+NBgwY$v5Rej97`~NJMM2V;GFT&zgd`w ziH7*o;=-l$4lIU?!hzl!4gGy`1&#vLl}BVOrocPDEYcx0Hmr=J9D;K&3SVG%_?q{P zs}PhSSRwaYA_4vVoyVl(c5!V~U^Kf4lu$5~oMRP;4UwXgq^Z>##*rGO6l&dXVk0?9 z0vSu}+<3q)aF~E<#7%ja8l=X0-PPdsq?M*&(Y&7wYQkkFGto*DsvSUp{jI6_Z1Q+r z9<+hIW~74>DnaTXbXMX?q;Tgj=VD<+=Bb~4l<1F=*+@z<6mAY&?{DwF1e#+G0tDt$ z*^}w)FotDEwB+7QeD$AXJ}444v1&_=W)G#h-sIp5W$Z}lKQ+#W@C_+KxSAsz<=XdY?Mu(Lwykny}wETflp(9+rh7cYM~IO>Ub&abcdF)to$TK z`J|ln3yam6HLyr13e(+0{;PJizLQ`OofxBZg&a7`QiqH=!s!zAbNKad?1XeUF<*|| zy#v;!JkB?RXJM?=Ong&E7*eok@KL6?vl#(GX9FJlzZ-#$lID!Zoovm&jNzrQ)%hv< zbyo+6d4Ucpv_?Ew?$TvG-))KYhX}{*04nZk+>`t3Gzl{2*y*0C4v(W6j=zB~b)17_ znD@V-4)3k=SoCYZF&dx<8$pN%QB|(bftr+DRH~-`wju^)w5pK2(Essg1kcCmKecM_GX>SQj_c=Q`JF!&h>A^j=wTmvJ;_1a%=T;^Gz462?|!$o zcf)~G@ohCzAk2JA<*v#020WtLjtQPSEY)+{l-M@JpS*aXa(&ce2~7^x~v9#^pdcpRwrUwwa=v zXLQkrsp2#?glrYTjkCV|-m@zprDm%Ye4N9WVNV(^4A38QziB;w$ytPhe5^i)r82o~ zI|}{vmDO!4Ao*Ved@Y3;OHm}?8d0F2a05`^Jqp==J=I36d4yo5w7f;Hx!MsZ-Bj@J zj|3A)Mz$B0L_3Ld$8w`&d)9~tZH!k=eiLm~Q)Oc3&VfOUe1;+r3~qkU0_2vaHp7#7 zE7AC*htj(C(y?FGdc80Hng<9!F8<9c^gV={yHGJ=yfvq!GQFW8c*|om4GxPZ6Slx9 z!YJBRaGjbfc;xlBBah+)_lN}u&H3AK$T~c2L601HB>@srkB`Vbc#fq|sgEkvQ6m`2 z29WP@<&#q(guIG=q3P9$7$rX0^C<_OMNrY&=Rof@JcrpfF6A`qv19z~=6vu>pp!}L zXR34j^ECcyR2t4PCHH|iMXLAF?6DCPORO~#9JxxP6hz&P(a2cwX4Uh1 z2tStgn@qL#=dQN*I6{adeak0fe?yGDs~P0z68IBh{rxmmfNNPAa(?mH*>Ri@=GBr?()|MY#b3iH4?%sHopj zxWmOyetKCxf}0oN`xZ$u2Z^6~9dll0bj+mmZ$(hk(ELVctzURYy5kI6t20Va^fFYu zll=2@i|Ch+k+MT=>kosl-?PmzM8U4o$DruQu${fb*Uqeyq-9#>nuvThAR5(Lzt#-q z3nhPd*}*yQWUyj*lX?R)od-@;O-0wzt3XUxLxj8s&raR-=`cklm{<8=squ%iN1#N7 zGu3}E?L^T(yTPjVJsm$u!Nm&+>(Kc76+-CjeEHkf%DzY{b&> zcmh!jE;|E)Mc*uJ5$Q|VRx(k)=*%pPmkDL%3~cAi6lZV@Fb5~gG_8f>oh#=pkB z$t}lcjR%-Pd3yQ!?+6wb0W1U>8pjMZ>T@>O3fkC0j1Ne2cnQZieoxoGWV%?2k6pCzvrcZ63>zUc20fSM_)K=XX9H7J<-l8i zGCrNb^ju)Z!J{KuNQ+P-wNGRE{G-%PCx#p$duzrp*&1j3m>Z*rDbR(eT_M}-JP0LU z3QNbTXU{77shPOPHVF*Sp*f6&n53~spsaZ?s`XS3b+8vpjQH>U{$hSGhHXfIUroAKVW2?pzcz_Z%+g{bbT~$V8r3+El zy?E(co{-S}F2FE@ak1;o#a@`NH85D(HB}vOi+3BSQW_l|p8BQzRh9a881tQfsA&?* zDM`THQHVV55+;GS!82Zibp80;pPXBp=7yFDqzuAHAqG@I_q^_cv%R?bWZNol6g1%v=4~#-hm2?Oa!)956HB0d>+$>7yvzDuJsGv{H?e!n++!$!*F&}KEAHcTk*8DuyNI|YI7T(P_$b^UWc*%qR z(()dX3iS@@v)3j3%4c|e_5hy?NbJcxORc$`8`gre&%Aoru_wRVoYu?A#@}gAQ8W_; z?diVycmb%!LFMz&Po*YpNWsUtw>_2yw%YjhQB}Bjq%56BV!48f^+SD=4nNTn4D<+$ z8%q!F{E-R%zF*-!f&D5{&p3mF+B}?NYnXp=tek=6mGUyqGY^!t#;Ic40mkn z@)%GH|DALIymE~WqFYaOlTt4Q;0bMlfxota%xX}kdBb!SRq2m8 z>TPh+d5n8EbAbox|M}P>kj|PNj{Kh9Q8#EH+MYE0stKJDqi&;K!O;;DRt&FmLG#Ps zBu{`H3YpYH!C9FTiNhxv`B8M1kXMm>U1bY*GA@$=6I&N|>Bpnu%Im$D2iX^f%6NU@ zWK$LJD5YhJ74mI5(-oOh((-_)CmPcsV(+4jggc$O(zxXN1Bcec&oriP&%@L;WP#-b zvzrcf>+si8&p5r^Hey(`alEGEh?V~2^=1f-TjzzClqR5C1vJzf zgQG&&z|iTu7GPALn|HB{^_!Rrgf-E?Zi7l0%14uqsU_=wkCg*aKvm$PwgC!{5M;vs z?(#Jw(@1F+$|`(2Og+MD#IC&^u56>l!$UcVgN+n*4VowX?@-!{I^&sX1lY^Qax0$T z%B=3qS~8<(m+;NGeJ1emZ8l+KR7=vLLk|yRjc!>+d9B!T-0$(ZYx5Rxz__gK5=Zyh z__Wfnayr5h&%|%|^|UG~*mzHwjV@eFvii-BD~+5=0-}OF@IV!XD1wR8q>w^YI$}if z#j@Uq+f+d~W5@g=+|Y3Kb2dt@s;Q>mlINmY*JWDjc|*VQs3LJgLk=o!{$Cyia>fCE z@YXr8HjRmy-0M;U(MpfETm&d2y46Yy_jl8~nlZtc&RZh8+mpbQAQv(INdz8y!h0?0 z^{<4{&*>;Uq9K&FI})fu5+Kk&i&Id>*q`1+vJ-+(Ku@% zZ1ni?s82@xXCgL_M_C`Ow?|~>G39Pw*55dg+puPxJ6wz#BKiz!C4>lFy{Vona88;w z9JeIR-dnS}o-`l*=yU_YO}~Npq!uO8fb(tIbQ-jwGKoJfn9FyB6^8te^qES3l@&w1 z6+&g{rEjbUt#Qmv@6IT*7un{rBB@>B>S+9*&R7IASY_|{&aMcdMTAhkEjrnaIsJjE z>J0dl0CarTv4o$bORSAQGO8E9ks{`Gv>6ibD{0j|cL{r2Km5ZW2~M4S?#W�-P_5 zr%R7n7j6dJLWdo?qCbo`{rmfPpcwC5)#lB z@NQqlf^ROH;P^=->ahqMr1TaB)Mqarh!&5<;Dbu(Tk= z+^((=+s3wcvp6C>CS?HzbYvQGG^nWC^bZT{JYhIZ!ysdR z=jb)odZm9bO##BXu?l!dAe< z-44lPI}XWW;%C&~HsdJE0NtFwJ{5$sc0&i*2C5twjr|bu*M434b=2!07*8sGY|W7L z1ZTrM>2Znlp!IX7gr4>CM4KNaJ!pu(4ZfwPYOYlN4R3)-9RQw96+JWx+McO3XlT6Nvl z5&YNIq1MA47NK2F`LpacRYik^cNm#CRBt6UOx}H$NOS!vQDw93ER_(4j7wBc0>Ed= z?Ar?26Z>EJ9IQJ$l!CLnN%F$i3SD5lj=R?mBZFM)@-huD7>Dv6N~96z`978VUf|%E zU63wfeR;V>R`fXa3)5rJzmOVDS(d^t-$S4KV>a{lz=-~SuWqa7$Gmq8XghfDl75X4 z(oGp<(i^+8D4Kq9hn@baSQV6~7_ODMc0xa>`s9cDG^5PaVUnF(Wk+N9n+w0V4}WgHew}0ZuU(MyT>%rQPO@iBAo7M#Fy1G5?7-5T5%@p+JF* z7-!_aW6aC}MumVZ?et=~M93>vW|4us^Eom9SoSNGA10wBUxa4O-yV2YZbfUA8wKn% zY_lXBe_b{_T4;gGbXI=0Z3fFwaAQ}2qpy=dST(U16fI|JONax(T$Lbdd>*AA)K+^Zni>!f^&6Suqi& z)^3;ED@vTJ-}~_AGllok|2Y!_1sAUBar!s&iWdkE*JnY4mkE9nx~wZa71UJ)Opi#_ z5KbL5qICQHB0CQe`s(IraR~PIr=(4Xq5v4@9`?8I32mYQDJwx>DV+F2L4Gx4Cer_rB&8o&JzpKD<15~)He7bORIn=bw-*CVE zLS1JR?&14^U^+vbl)ZVR>GI|h`_rdIdnQD>$dDJl2%ouO9eerSw14p|2OjE?%9ewZ z|2!EVmD0b%se|+w?RdF*)rjM99)E&NHgX@%Q9NVRhR~=2H0Clb%>q?sEx#=obyNcQ zBsr-jdZ5oU+;RlfS${PH`Jt<|N9ANKS@q^>+h1_1t4o4`fuy{KWNm*)vfQlyU6Rd; zoJpGw)Bq9Zy!`%d+~RI*zlrt#BkZlCs@}S=QP^}hh;+AfgOrG1uu;0Z8>C~?NOz}* zfOI3>(k&s~B8^Ccci}nbeb4uP_l`UK$GsWjx7Tm3wdR`hd7e2JuEh}i@%quI#I9#G z2}u(nXUP)@o5%K}_xPTZtn2q2g#%DO_0@iXO3NTO=LP4_cTCFeAB5WBS1qX?RP%KmT5&``# z0v#8EsM9|(!L|tRe8miAhwV%zK*mrW#=A@2Eqs2><=cA4^P~h&LM|6A4=CT<7pdrP zTl&^6Npd^_=yMg2%I8Qaz)eAD4W~1NJyh{;($5wg!viXk-G=$YN`=Sx9i!!8&zpYx z$p4ru@Gw_NBv0X%2}^qPwz=T2*=R?{l>7<&Ct@7LkT}UVjIin2i+NO|Zs+gRbDV7p z36+QobH@^6w*-trx1~zoE{@}-+I-vhlgViRFoE$F&m8{I0^F-h{TgwB&4OxObiFF? zn((XKW4^AxRaRk9LT+=i7{3a@Sx9r9DF70F!JD}ILRY=cL!S$ZG1egnp;&30T| zW#8KLTy&fwO&Xm5n%l%5h1tGt|NOr@ZY37R37I+o9V+^WEl~2TI3p@+hue2iv*a}S z0Nnmgd>u2{(}o!yoox1Nak8@Y{KvDkvhZ3Bm}Rv^ykt&gSPMWecuH!_H)TJDyHnm| zH4-3i@W+Z+CVY6%&9ncucrQKT3sv%k)95er5?fsY9R#Pc)~jrOj+@p~K^uWN_nrJk zu2df5&<~(t(UZ&oUmnYwRk>$RYY@+P@7n*3k9Qh#@k?%##eC(J=`~ zJtP`XIL_?S0YT6N<5wu5A!DgezY zq$wbPYohGkseC;BIJI-i_DMZl&@K7hI!=ckw%WlQ-B}#I(w$hTRxvx>@F7Zh?cI^g zpq4dZjy6x>=c=R4+qC& z-KL(Xzy`7f716VNb@hmK4)7aY>ldDa*y44CzTX{X)u6p9#~6#F=Sq}G2afZN=i?AQfXB!IE3MmdeXrG^V=BDU2m7ckJqayj+J5u*;VeBF0k zZ+|f_^e>_=kjGCG0}9Y;USqaY@0Z6Uu6@j)IAg5zvLTk`#Zc_qHP#yj$&N25j}@8U z{A64}d;S!DAl`mOIqPgpCeH1v3bvM{zzdL=iIA!_ePcqzM`ci5wW*hId%*B(zqp&Z zm|3?{`b;ZSd#OT36}KwjArDv}&?<8;#xxmo!2hzHiBHuffOHdL5z1se<*AHrCh7X!2clJBg^b!MjO{b7k9}_RajjvYFbg99k z2e0D7)xyNltzVaPYL_L0<`Qurhq65il6n#e9wm+k(_>9g+qa5@9~qY(xH zbST#469*1_RDQ_EHmT0zF^61GijnIwks}nX1haIR(TNkY*N5(O#Xwg@lZ{EmS3#f#JaPEZo{?^IPYeiyqj-PiJ5ylRsek4U6 z+SdnNdIpFtkg4emv|Taxd`dt=J zM^q@3A)Fb5&jkmZ?iK{nT=#OAueCsEE6V?Eyf$HZ2%T`>dQW@at%5>4Gl|)YsMEbVDW7CK^E6LJISMs@Y$M)a>yAg=My_b5R3d} z!^susOL>xpHjP=jLY{+i!I`c%-?Le~fzF;%XA6^m{eZevI`C^JGvlY3k|+6*WQj56 zQ3D_@-aZ-pl{~yzqN==tfJ~!=;RdcT9-_iCt)Jh&O9^!Xk{d(;_-YLb7c$VfbHefR zq{V03jX2L8qE-Yyhv>k3;tX7>^m(*J)8A6Jutk$@jx@R4Qi1shzq?H1ne( z&walFc?eIk3lQ}=zG^N=%>r_HPL5@_uDI0CHG?3d<#O5_$&52P$+fIE3Ge#GT(sbG zHurnsu!hf~(#!p=sVZD>IYhZ%j;dd`tNO<$+8);UvP~^J`6)a(p)oF(*WF}gjTT)) zHrE8U#={VHp94{(&$c3UkTuZSAsMyj5g=7Dc2y z?~I$HT~mkk3BjTIUnig+Ob|lYI-B>>du~QgbjWm%SQTKAd(Ee>Z@t~fY2v%dAAs%v zp-Zwjho1O3T)lgV>j~~_TNzvU#nVNg0IDr!AzyU;}y5&REaFfRIf) zHFchiEiptH^jWxi`xeY_??Kr}7=H7F;^Xm|pxJS+g9X?g_I@#($yZ_M=pL^a!)3J@ zTb>(70$h`0{u{v#P>}dMH}`Xb;n~ek{H&Bhz3@kIp94@tjR`z{AioP=B|;wM;&}Zu zDq$*??)F4j4t{|1fiM~xB97mu+iR18+(;%D%C{S_6QFKBU>jGe@r@IDiC`Nl(vH$+ zoBxtTV`!ja$~jL)jnC7eQu*f4*MGzT$R@tNOJODHOg=uczZJh=r+NM)e2A2n{;v7w z)PrUpw_Vja36rpZi@-|UH*1_N%CuTF&^h|PLY|P%r>KVom=vs)pwVxvhK6DNT!I?D zm>WoB6GGyMW$jv7Q0{(G-HsmlD1cH+&_F|W&!uRgQYK8TjL`T)3Z2maq2Cys-bVnK z{i=^#98`WvEYg}gG+;`AN^@UdgFTe71KkRKT?7*5!dnZ2N238bF+6VewVW!lvbCuf z&F$JX?qLUr@}U0p#S-UE3ngORgw-7m>L3D^&P zQJw_9jt#;J{>(neEa_g$LXxj;`o?vAX=o_!>NJ)%9KQv~6k<$4f3@d2!`l|o!`q|D zvy+PT-#o=%&_Z<$JztfhVzi-W6)m3*BHHf=016Tc? zc#`#6OS{gjXbkU+cde$rHZDEI+A_ON|I!>LoJsaE3B=Lz)&;L#hC-qh{$T{il+mW1!u(SZbLsHf3Lf%g z^SNB$w>8=6C1+08IP}d{5Rr&p@K_vv#?P_*nNQh?uaFl3f5>Y5xdFqtsJ+|_w0U{e z^@CQjLlwcqH3Abho$GqG?-8*nro(hShtVho zI>zL&?x2@W+nT1Kp#Fl(U}rn~`6>v+E|pSAc*!jDrpk=hpE97Ee_Zahx)&UzEjom- z#(rA@$LbQOu6i%(9Mhd8&T!ngyO5-sOQSRn9CgRS=OhSDHk1rI8;BxZTysckKwZY3UGU8Hq~m zXhcW^oaGl+9P;lKY#&7GJD> zo88DcLyUocotVsYBouUkV8HZV7UMR&3G_L-Js={cC45=4#mLle*>&{~ zku9}>-gtkhbmX0P=W_)e0tyZQ!u=>2z@4WJAD(adVMQ*KbCca^WW>PEU742OVLd;8 z3313csrkf05rnFh?Bu7mS!V;mQxwm;7(5U;nsMu}Lnq=&kzi-s$~HrmP^`s7ht?wY zuF~U%A?o9YzM|=^P+$|t_I7RaalANRPDR?>lrgv?iw6yi7?!3I#|DdpBK}uQ>K_Iu zt(-=k1$MM4D)<`PVKVMcr9jKM*P5l8n~qA(bwU?YZ-)Fo_1N@Nh$s)VUBA+~PZfW7 zb^KH9`Om532W66kXPr9a8kK|8QtPuwo0vT>{#wjoCgt%+B#1hagnzxT-mHNME*2*P z2zvii&_6Z)@2UP*$u8~*plX6{IJV&Bb6gNho-jW!s~4zk`f57tdmzl>b|9=09{C8> z!^U$z$%ZG-DFiG2#I8+c+s5U3KTgceK3-T9=RdyWuP2TIue*HVZ$wYTMv z9E?TbdHgh$&v*OGB&7RhG^`j3on7+N{*wPxEQtIohp1LwqBxuaSo$!C3UC1a2AEpRFi-D~d_%CGhFPy0d&5pHR7Gd#ol| z4ub+eQlmcO^v`)l67wG73&q3e38oFBrbeF#r+yD(zIcPEEQw15FC~>9p5xC)6X-nQ zS$8w)-T%VW%*+-1i+|~OC`{RP02a`B>Riy}*3vcg* zI6P{|6BsHkJgp>R=${Ywxt8oXJYL~CVwle*(n*~i%8i8Cw}u8s8q0RLBlD?eaWQ}^ z-9|YM!I9bWM~XS$OvXJLlfk{}C3~B}Wt08yhyDHJXW)Lo`~N-#-sL7!G`He?LR3rP z_z|hn`1pl?# zKPHrDEIdS{PU>$*{F?xLxa}t`A@in!r0IVDIQ~9F8g2vU7sBf>VKwSdP0hh zLG$*;ts0jvHkKVjfdGGP;HXdV|C&=6Y6A;Ae$K*aCsh4OdnQ^!hByY&vd>^QvylTg z6)U<@)^M8C)>@D0YRHQ$Oh2P9jX&#{1^%-n{yQRjJn$H8#H0m$zedTNy{os3PgZ>! zr<8pLmncLb0=y(zw|R0eAEwM8Jw0fa^Y#$YXJJTnLJZAwx-l2{_pl@vsezpq8%AoL z)2_UR1TSd2SnA$oiNl5mInor6uwG?4TCi^{2*7(=vVLf};YrYKfXm|0l-#H?ftkb^ zpW#tQogF(~k3Xe@DX6pqZFr# zBFjA8eLHVQiv8A}D{wS(Fs-^kF%!MFis#Iuy-;aovQ#TkyK1l2ipFeT)2DRh9Rba= z_pWM*h;IT&ii*e=_kEnVh?!fQ(a_Hif+l=_Z!QMDN=N?IAf#c7fkPR%mM53HF+?h< zr!Kv>SVrOIx{LhSb(v!@q&i@M2HEUf^5?&{9CZkoT}S5e2f)&MjIhXopLMoDYm)`T zTgZSm>L6g8-LL-ms!yXHyGr6FCa4?@E2!c28b0D_!&0kwTYg035x(>C%ob%JHM#PRKeOh}_QlQ`PSsmX=1w~?YPx>y+(y<>PR20N z?v-Z?d?EkxEK%Ti8BlWMiw55{p>B9zG8jM7fV7@3Am9xq&-2%R#2J~d3X&il%X2Ua zz5T&fmru6X6p)((yCrY6r@8pGuk}`o|DjZJ0^+yX*&m0VZ{9Yk`s6?C8|OHb6D^BS zhn?(CqHwLp@oGU5XqZrJPxtzx&n>^AcWw_O*m#L0=zQ)}FcR+2-rq)XK*47Aaq}1L z)<sYNZb!|*mJ3EbCQBl-Ex#uI92yO5P z?FF8$F2B*L(QMG5?(=n0Tb)7dm>FbE-c9`$F(xx*QgJ!t0+67ex-A)4^Tw^B-8tWb z`$+$_b&YYU;T4eSAO3kO`(QW$>d_SO^v_3@lgRHS?ow{OoR0Lz7MuHhqO@BcdNsQ6 zYal-ltSA!I?Az#%srMEgJZ%p#wbqRCPUlH}v`Ln6jYWy+7yVc>>nlB%mpxS6iYkuf{9vr)l5ZsqU-hu?#anu4jPUta^i;e5V5@X zojW6+L6LrUl}SvS6$v4Q2x3qOcBOEMI}rQh@;SUur4BaY-jy?V`=PhdbjWk3F_?@X z;Qv&nPIv#C)OV}$N=JDHVRUbU_`4gsHWN}oa=D?XF67xP4lSOJuJ;vYd{wvW{2aH} zcDmi&FDMVobZEZ6V=6&+j9Q9*O@g8b>Go{8=teu*8Aqr{P^NfLcpRQ6Ck#_Z2~dQ6*hlqJq+7*4?t4`)uQo=pIJs$uGh9T5eaO2&-HhmR1 z`x(fHu;}mIywFp)YGbY8p0;AhB+8BaCaR& z`dxArF^DMFq*NUq$@46wxi|4{HT(<}dchT?!D-$=Jku2xU7#m{s#np@4J9^31C8a@ z=H4R>-gFGHy?lZBy-bJq9YT;8;i*uUBU70LKZ$5-2&wVHP9U?kL+Y~yv7ztiUVGe2 z(beRYFL>UrJxSuA|H8${KBMgJani-R4Qs`5Zbu|$heHO!q?qhkh0*K~W2v9V$OW%hrrCypK?olU(?Om{ z62t2l(rz5iAv~rKt!n#{@rWY1mKBTk-Yg9y_Xqv12b2$dn39Ibw%sCIs7oWm&2kAF zj?`rn4`~wX6?&SD zQ@JRxqE2;=Z(j*fHBCm4_BT6v? z$Zl~ELsdo(l^E)e9BMgf-t={BZssbydq)k^L42L9EIa7z(3VCh6TvGT?Bn&Q^NCV{BHP-6G=5wR=fN zm{U=LHke*v2A!abt`NSl0v^oq@rX8yWUhJFtK&)Yi(i5M$gQC-GsPbYDDS-wLt^f4 z6-P!`Am<6>EWmKPFhI@Z?qWyIg$uHK4-1i|G)rMg;R zY`1(kP}<0VMt_uJXw7g=&A)d|-6Q$;upm3(bMJRm65v{q>2fn8C^Kg9cX7su|nT?nX_YA4xLl+;7>MTD+O#--+)Po z_{WuHz(A8-a$aOxzNdB425dPCrnNErED{HqZ?Qa|XdnQ9%pXxp9=N^EGBz;0i0v9_&k4k{y$wxNt_H%rH6``brLtB~k$ zkSM3yB;r>zm6`X~K+?e36!PljhTe13!lmc9d1sZJYUX!q+k3MPsU#e1Fvvn$vA$|~8jKmVmFzFYNV zMla^%V*O??Wsz1O7mkR>od+?BVt2}$P7T(%>{#$BrM;Z!_oQMDHg-}=9f&{h8=Ygt zDNz*oKhzIOVLb#k?qocDhk$$}8ZK%N$45g0!}06+66H6e#GmxN5p>2pk!x>ca+=dwFb8Rc>g{zRC1UG`cbgGzgTc} zc|$8ERS(gUNU|auX914@nZm9ZMN>*EAg#Li8wm?YnGVw?qQD9)WB_0ZYg^13Ci8Ph z0(7I2z7-XTdAjeW10}Oj3#|5wbQ^=DmqXK~D{~*-=~!XR#PP=RV0jSvwD`q0frwx!n?qWEa{Ir@V@$>U8tbm%Gu zG3X8L^GZkRGw6kq@v`CC)`vr`yL{?+{c;ky%XB1xJ&12ELc!!G-;)A?!>93^rjiDI zomRv*3r@JFQ*TT~`?Z@NoYYyfv?ZtU!4qO8@`2gtzm4GpZ>TMkbGqycuL$K>Xd-d$ z@4umSqYedq_*Z5MaNHJrnyiSK%HJkQDivWz-FQ;G(tZhP>BxBy5*4{C^?Q0`v0RRm zsk7Bd?SsI2jjeaC3hLeave$xuj%Ec!FG~$Mg8L> zUfyplAA@dED7RJ}%xvZsMqT0$1%tmwk}+IM_Wvr_5E5%q+}thx@Vv=uj!x19Bnj?%UL7~Q4i-O6 z<0r2pdV1`g#;C{4Oslv%MkR1&od%N(OykiN;S(J2bBYZsi0E1r5y9G{*JozdrzH$i zTI=1sIHwp4i(`&1Pi@LCEDmVTPtdBk?RIDWx}#Hd^I?gCMy&nVm&wa@i#Fq`{BQRuXD<2&H^qjo9q2a?1A58sqIrSuW-OcD4wxg~d%}EHC0_2l$5euBzxbTAa5W-~; z9tJd}Hn1Q8jp{Vq=Z!GoWN%0%l!0nAwpJrV|bbs+nGSqZ&GO>pO z32#x7bemB@q^2@r_Z3~bK+z1Vx_HpJsL0-Gnms9XbFxKLKi=c5zJ6keQ8b*2#|}Py zG7k zBuY&L*;|$PL)$5S8>6X<()t=}8EtCNfKweevp($&aNN*ppp?l87;=yYnR#@Bi z*SAT(d*&=HFITE0qil@o=Hby^yUPL`QERzUv3_q1CHC+pBT@1X{r)UHcPgwOax%`& zg{g`QgF6zB{72;BZKf+cGsBV@=NUnCZ1a@Q|BoZ)Kuo~RIWlgz(PV+w_S#vW;qXZz$$ zVAAc9XUxxjCdTqgZ$4|(v>fX8kl3m~#4#o&wSF`==Ee=L8*VDa?==><)VQwN#~t#% zn?#b_ugn|M1=-4d%?ssCNwZuYa2W>nUnDSP2gdQ5UChMrIt=4uQ{bnN`H3hd1kR{Bw+f;Cg0~->F z-$y&oIe?CV{Is}6;_O_u$M@;#s7K4`E^%*wE)L^Qj>}D5lE^O)N=9aO;CMNY4~Y*B zh3*bJD!5S7`7yjagwBr?>pl1BVJ2@eTqwAA<~MeCu{aK_jgyoDJ4cQQw)Y`;L>oBk zhjw-|1E2zAMlKvJ|_?^vB{m+m_x@>4e4!rk>EFs@NjX?G^T)pOFf-N6098PmY>=gBSp$oaNxw6|q7 zL$cv;)xJU)K4%)Jpd+!`qyDPoZrZF`q=4S&5CHDH4A$?3ehcT6-dXn5%J|`QzKGlE z#%(2SJA7(k5i{G?)*o{v7d6W+ugrt&fUJL$EAut(-PQz#;>cngTvG9mxCHkLr$Mag z(;U8sAmqq4DTu>^r>{6p=Cxl}#l~5W`gn{>&b}zRGqvNpKfr8BhzE&Ya!RJn**>^P zJ)g|=<{dMiV)Sux!dSTpI?;;~v4UUH<@)SP@-aUCa~6PK%FJG{fq+92k(`!#?~*{s zg*^~JboYt+#L?;ym8nZ$^G#L1)kWh`pBo_xl)mDN*I^S5Z4&2pK?*8#Fv+i+dOm=R z-ue^y?eP~FEneCq8o$x3Dkfx{p&##}Oy6`fxK+;ARkG*2*YhzTmB=t)RGS60=Vk;d z4_c!u=Xe1Aflq$<;MAjUu?p#=?P`EE9TlTwT(SSNQ^^JfFHRt4;I;F{2XU>8?!#oA zR?H#-rm7N}A+fGj3GKv%9yicm~EV6KPGlUsvcouIkLbLW;;sU5}vf);7gVw z6J;^09B1ceb3{#=XQW5(8sP}M`?c8{%w1hVHw2(fNI!a*Lnb zp6hr}#WRdAF+aT6_d~*lT~+{Rb=(Y!Ks?&>UH^OXe`UwL%z7V7XcbV>b9Yn zP}o>t$Y9nXJ8xac?o3CX5}gKCL#ECs(a&G?w%oaI$OsTXwi1%0kG1vHGX3^FyRa)` zaEVGWwd?xRt%sftvSr#kW0dCj;0(2W(r&&X%f%v9{QZ16cjMH8`LPqSv!HDXV%fsscC$SlgfL7Raq0Gsd4nh!LjiBKV{Eqbot>NJ!n^nr-;VX&9r$ug z_E90V0UR%_3k_<> z)d0A2QqZL_1dCe_?Zx#Ik)$@+Q&&xTM+_Q1?Osa%86!3HzD32-@E%;U@ z^D#I@ER2k-zI?PGc|{=P@Qd;0Xxn6qNraW~As@4y6}8Fss9wlI^FR{vG_mj2-5D;$ z#-X#9AxgThD2m!^-j2~3GR?XY>wMVytIR7BXLbDDz?The2v3kT4tKr>dZt;T+{b$` zS((4E;+!A%5z((9i=H%D>PlC;yVKT`;q;E`RQJ({v|75bB-AMz2s41V2+*O@U6Xn( zO3ptPQg5lAhxCWSV%jIf+oY9>qFSaZWq6@lEIVHa4vs#EY7vt>L|s3yv9d~$IV4F) z=r}p_F>(@hZJg}>Qn~(HXd^zIuTl*q6qSCppCuGskr7_OAZG)F>G!@U0YoSXRbu^G zU3lm@x5T?h- zNS24JO(x2rc-C9V{l-o1itPP+&1vDczv0F3xru%EiotC3g!|L&ZAZPG-VqfaYV)@m zhrwAE=5xBE+wA0(jj!~?>enu=FjXs!lAHA*24;@Zp|a{4GEN+hW`V-=LBa4jMRiE% z%ZbE_?Q0i*TnQ=$?N}TVJh1U(_4_%%_Wu(^{xzig4|GtMP4tb!Z%CI-t^qVnCr>zU zdcr`qw<2gy4RxnjmxboR+da<#J_s zb|RB0GGA>IZe)hlq=qN4(ftti*LWXkkws7Ljl|cDjJ@BSi7fZw2X@Z-p}zen>R;CV`&%9<%a`7Ml-F}uk`bHs&mS( z?C|#nI{c^Y%*d52(I29XZ`0I}fT2D?`fC1wAOMxqg}5nXGt$04vyE{9KIU%@R+$Z6 zjOZZK&ZnsAK?T#rP;S|7f%5xFnQe{1i(6(Df0C7TB+g%luT3({X|91sFp=O*``gbx zfjc71t4Y=ip1>e!ua?CK)vm+ji%p6aXEe5b1?Q_(GR7i?eKgd3I?D`1S0k`v}z`wHOhIs*|l-FL6zB`$@7@{y7 zP4Bs{*{9KzwEXO-`yDQyPxHK8iNHcMsa-QXVE@ufxOq9O&#c4O(f`Y$-P3Gh>HG*{ zkpoRyf=8dPZPV`(AFKedt`g_u?^honXoyY4AoIPGasR?29@W++{sifAketJJ*4s%9 zOcjlUf#j~h%I+q^vR`5>f~KG!b|i}E;=Ba{JzOin=Oj;fkJ>fZa@Kd?=R!%^L73@+rJzR9?n3!>k64^9iCaEFe%qS z5T^3(KE&-|BFtS_wCuWd%ky;QB2386FdF9j3(?+^iFm3%W7mZoJ}Fc1DhM{7XT!02 zagYA5k59h%Yr7UgAWp6~+DOIfdeZ$7w6^&!yh(7TBo5QGk0{f6M3^3;xvrWIiL}6r zjA)wHh9m)JMYAs|-4%-KwwKlP>Go=>1`aGW3$=(f2H=N#@%ru|G}SU=2dmw$0Uk}R z)!F43Q99VZ*EUCFe1*OHrH@cbp1%sNG0R3OtF+!o9w! zzYs}gqg(v{NymK(ZeO$W4T|H@`O0o;k|=;>sBho&F|2po{me1G#og1by}fRqP>Zg6 z6ea6$zWC1_X4gtBJx&?b!n7O5dL@2#W4lS@EY5i?xHlf_mFFCP$)Gg2~c*r#na6hP-5UV9|)$&{y7<5{)haF{vYzssrzJe#dFzPz`H6- ziIr$2fZyzCW>n&7k(ikENVyHd?9wj}sPMdCyE&qa&tsrH_(U12i_<&t8hUvlR3i}n zFT$@4B|KbebDzV?+YDuuU-3KW#XEeF?OV6oxM?$woQgqG#Rb(5TWno?($3oCf)$6j zS@AdUHxg{avkd43XPJ#Z@TMdpAp@+(!lr;Tn1({cUv+EwFbtX6QJ~0G4i)D}#roZ1 z!EZkey`*!wO>6Zpufooi)5eNH{BkAVtE2rfpvZtzcFx~9hIgrbD zGldy>W|#s1TU#Wdo%eyl7C}4m1}RZwOv|?#KX8}3X?mlu8`pQ+C1VM>GF`04I!$TT z#LolK!NJ)=3?Rr2n`LYueyIFIAl?T54}lm-R{@e3c)jn}(_A`sD783rj<#r2VlT{XF>PU1wdMjSRg|A|oU zvQK?lJ2Q0l?lueE_o{zQwR39-e)QwX>#1;TDpPIl^^bc$RJI7J*DK)b%m$NQ9N``Y z=*^lNUw=_o_nQ>Ty;FgIE2z25x_oz>1mHm(0%PLoe!~Ni5O}eXDawd zDE@)7?oXVr&vS-PPxdYaupV~WC@Fn?_@bORGRJE2Dy+ACKicUb5h3RJf;Ex>m-$v98Ye(N?CzVp;`hGU+`H0nuB366y`!@HY*RoIsC@#JSqzO#5o+-(!((aYkx91tiRM-E~(1^L|qf4Zfo3FzKcaOZ&S2ea8+j1-5IcG@c zbXlAhhbH|*dWe>}lBv`h6S{? ziV<<39VCu3dO(n5g?^1=z^LW-U0IOuS;XZfBc}RCimW5X@#KN=Yb91wb`zOavB`h< z($Q6J<~4@1ovzVypx00s$ugNCK12iJF{4!T6s%C|K*ocXK&Fj%Usf(Aes0rfjCnP8 z6uk-ib>RqRAy|>%?*wZA)j6$}T%q!-WKa>DzFWbFN?ogcE~$(M>}ez(v)TT#I=V+d zNB3&W@L`}mNuA8I(P2M5)~cK!jAcq9tWq+*3M(gi$+@87lg9!Nk+!KswgOE5nqU~w ze;L?|3V>eYT?=paGu>;1&Un$W)Soi)0YYevFie@j_paaUcMdMW&$i!E$H(h0W>-q? z#`B2N8I69Ed%^Yg&IL)_p?=Qx9T~+)5T3=5^a$+Po<0jox2u9xnrp%hKBjvFx9Nul)D4*9O`%{meZ#KQekV-p`i}Xpxsh> zV%Z8z-i;uV0Z%ytjirM{n1*JCLDBp2oH>E>#(U4T$=2kRy6DN=of|{Y;y`Dvqs^G} z{AEx&0&-ttdM61xTwBJlmXUEb z73k~Ek5|HDa;L`!rMXfK`^ei-y{hTkO32DKt&KH;U?VR0!zEQFW{D3PcKnOQO{O~k z@ijLf)&BpL#bu`-DM5o8Qh!btl$oB$-!pO8im@-L{19joXs;p7oOCI1LG9Z}6(!4! zDa)Pzl3h~%Y@hqV%2o`Gm#|KI7mm#L2vXBrKt~k;Ad|&tnq5F~mTPGuYY#6cE*NWF zQB9`uzpU;D#q{1Xq`_AF)2+^lrW?CfgIsXCP++QU%5m_PgfnxX$|)s5@8S1b^LOw9 zyi!M$ro_Swc+}W@%6C}~Ek#)o~ zO0|dLF*0o$`}H zkFHic6XqNG;+094U;;MnGn0cRR;;TjRoUbP&malwd*+Py32A#iVjx6$C50Q7Zzl~O z<~xiAuaF!E5oEjKmrp!I6ATqEjN>>xM|0m{K=rzDNIt^FfWE%1vR_?znND5&UZckK zOB8OcOR0v~8fM`vh3Jc3eWxD_qmPZsEda_kcyv$uqRXZQjwH5;V`fWG-CUj5hbMCx zJp!82X=fel&b^0L{_k}#n&$L}>N-E3tEYuB$z3gYa&m4M_%ai&1g3g}U|vRDHk!xa zjKmfLNHD0_h8}i3HbtAwXzTpILrBrH1}ySWbQpv!_ZImAfT>>DVT_kiVIrp?QH;YEqLxc6Zz# zf1_({qKL-(u|Xba3$bFs?OMn5;$~9skP3nJt};%za<$*lBA%%_nY*hH>`POlKnS7f zgE{;Z>z+q|mtE2k48h)9G&qaTXIxF1CM+X5-H6|-k*XwBP@pc5cI)za=5T#J|4q8o zOXBvcTJBLhe4SwPsBo^5cCY&H3x{d7mnl-Ak^S7)kyc*OkAHaoZZXUcI>41FZT?=v za(}GB$OtY3FNv*1uSmji39{FI$odx)pnS+TlhW0t@x1N7{wCcLa}sa-`=vra?4eD~ z=e%pJ8W^s`CKB!9*=K!rW|Cmt+wY#JsF4+<;duxjs&Z%%0&YWVP=s#Kj9*6=ebasaM*X6dxq^+>&b6BUYtUQi*cXC{=BEw+(2SY*( zr~`;0_vkAVZ+?+{K&`6y@JbFqXEgClGApx{juqcQ&>=Am12#&lP6NY3mXtnBnj{#ZqYA`$5j8ohu16atF39u|d1v3sWJ003mNH^v_pcQ&~!(U?= z9A;JR&bSlSQPu>#9Q!Okizx;IF74nqKsZC_&i;-QY*v0A-y^AlwInu4gG2Vexbi)b zS)|IW*dNM#+p`KS6&;B^KS&I4$s@NiMt>z#?d}jjBOzzD z-h8MZ;Ry_1XBt5`3;E`L5;%wEF#Vo%s*b|V-A#z7f?E{HxhYxw^myWWo`&yZ>l>?3 zrgJw>q77mqtYydMuj!JfJUTB%{3(;)k~vTHSsq>?>t$B_N#%C zOiY$$(~ePb6e&e7LXA**L8Ydr{E;Xf30Cz@fxYv#&0dF0p0_|4zu43pJ($Rq2o{(a$cY9AmD`) zr$!b%0xfPkCOBU;ckR({d?2rP^)Q1_)pW?3g7Nk z2epx~YDmIGgX?iu<0Edo*IDkp^m+Ypa6wo@cEhP%bWz~Wlq2(rz{Es{_hD+qs<^cV z2!(`YGb9*LpWCa-Hv@qQXmaWB zL_2TaVoC^5M$SmT<~BwOv3bE0e^S^KCJvnI%Ii!>3w3BD&n{dVtPtOqDi`o%VPWb+Th)gK_x8 z^V>U48Y9e%0{r`4x~1QBl?uf${C}w*9H3R{>d?&R;oTY=6ye`}kdE*T36Wpgt7W2H zRVyoBo(zrq8vEkHUO9@3S`YgE0b{?;uUabU!Do9R$4spKBJOdhVx76T1_BD^u9d`S z6t|p*p}u1k4f#GOr;Pf1v=(j-i|D`)A(@d@lmPru1&CIF2s##+LAcgTx9u1dK+-f+ z;B5bQXfPT7?qo{XvT{&+SWGNky4Nq1_3cIa_zt&YxZUPKMoLMhajx@Q)(2$XB6p7_Xpg$)r_`(ZkK%*{=c zMuYz^_|G~h>97q)y9wFBy$KaX<-OYWubf<3V60}69(1tC;HHS=EwwS|k9RMOQO<4V z;qN5B;%}-{Ukb7R&?!$T)ASNY*bfkdE4+y;-QJb|Kx_Fh_!)1KH%Hk=xwv|VZD5h? zpuXIH@I@NQk?JEW=8_R3@yp$q4K*D5I)tZ<)LKDws+-n!PgC@jY+`ta31u)>tYY65 zfX@Tw*0kc=TwF>QJG13`Qu+A)}6eC>?S7fWUU;=X?0X@wF;6BJow( z^~XVPp`4Osz9b7oFnp{v zB0(lps~>*SsR@gr3Fd73`R>wa?w11ixuGJu^vJ!Ba&mhnd%yke_Fmi;;KjvC*~gcejjmshA%hu2sZZwJpGR@f^(Z0w0bSAbvJyi#(B$gQhuTR%*n z$|?&BtDkfBK8~iLxLfO_ngy2hs&Zr2Z4H6xp$*?h!sxP-J>Q_m3$87L&EA-@jm|)S zv7FhxLm8TNx4A|u3fA#Yjm#!ix$zj3pr7%2`ZJ`!-Yl8<5~=33*UzEw(HY5>a71Ip z40Hf?DhoR55FU7-{Bt1l-l5QELJ0}&8~rguV`9Q?BWV+wGDd)Ubc0{N)bX3>$m!Dh z$W!N?cX3W&@731^Vomm{1WQrXb;p(7(m?^y41D}kHnxWg4SI%{ghHBUJ&VD?wiI%@ zzV_A+B&DUcGC;$bGl!bYO*O1g0z^OZ0THMJ8vuZK!E%Tqzws6bLTlsMoS&B9@%PY9 z_V1=ULqBsJSx!fbkPI2Z$Ik1%hsMfHgH`Q{fMf2LM1h=AC57@nY!aHXGjCV6|3a45(f%EU;^={>wXqzJu z5x^l7N$9`8ewlLHBdS@pN!P=|99wU#}jI{}i zxUVrl)R5<4B>W2u{0;E5b9XY8Ehea;gBx8>*DEREeLXK|aJ48~hj73J%+YvX6sXKDN6l`2DhK8vhxOh*qhj*0ycG)0mo z^5EXv$3dvk^6X=jf2PC|Jpc<6)5x6eK!XIeAv}YsH^a0_P%0wSf29E~$w#cau-T`2 z29_x+d2!_P)!WY~s#I>`Bn;>CBgI$ZP)sGLpxwzwx+^?mi9Pwcawl5C7r55Zd5P!- zJZ?-bLNT{i_eh8FKiW?Wt?&!(vgtoUEK|@4>RzM4M;2RvLxABlM^KZ#d z|9lVuHMT+l$y7#*?56Z|_DrRBDMqC>*8^EY83A;_0aX!eMN#d47?8k8@N}C$iD}AyAL-Z489J5mSWM`CrxE-?e@b?S%z3qnw}-VOB^s z&V=z1PLHiS*|#4~<2NPlg}ZA0Yun@wy4r+X=?E&&Pib8q&YUY)KU9(vj7p14|v6C|8Qf&z**3@Kca~d#`t>yb+ zy0xbozZIs}Rj2G?qGmZtRTDL5F=+?;ac%q_Xz_KANk2+JtCSg!ML$)KkKA-0zb{Wh zj0&82pr4~g34daI-rqO13(A&;$QG;{9o*j%KDT$!3QG<`1S7!JegfR}__CF|4~Rx~ zr!OGt?gjzJ0N_MR?m1a>;>yg9*|zB1U09s~NJ(RL>N*TNuI$K69`6wV#owu2_VaIm zckM^A?cxS#e*_Hqo3jtIcwc!f!5@V*1ln)K9QWT|a}uHfI<~bPn)wCKzvNPGsjqe? zyd4Akj5_B(!Tdf?veh8P#mCCEfc<5Ensw;&!gTh)%@s$J$CUia)m0rO8bI#7TFw1t z`+hC8`i!v#FF(fbL#ya{CS1|x9LN9*+hsj1E*3BeE5k&G>OMu?F9)s<2W4epR(pzg zjUrCYR`WVfU62XS6*{Ir?Bs);mY_m8u4*^QxAJo4UM|c$Cq|A(N)ymn;C0yWdi2E* zAEL_;y>LA&o&4?Lf$4cw^3FzNJufG7EmA}?3;+Qnrrz0z0V;Iyz8X8Xxl%y6KpIZO zR2MlwqmT(ic}^JtdzS3DxFsT!fIsKiFB(}={^&&&UmUtb%r^-v04UN;ICt1n_6+rxNYP!3EIA- zQux}~!$JOfJ}Tfu1-BTrONheJRR<#^0EnaC%91;d(YE{|?RL09Ly2!CbVnMHNiK1SNoWXrl9@YCPiJ6`Dil~xD4JP1-p z60}DaS6kqludZml`edWj z_M5ehUcHAz9+iPDrTvozZ2^VRL_2y;c*zO0wZ`QbLwAVcI}Ep>bP2HX%4v`{QM)qd z1imlX!sFe*@{ZiW?)po8;;45Li8*hb<7PQq8$K&+bCbK3KzibJsdiv^fIstvz4<}}|T z2NW=OD)8ylQaD~K*4Rg`L2{v&bnHgEjz{xJ%?0o`ng>{jyleHwTrj#?NMEX}?SpOX zcSs@oOysWUD`5JALy^{O7SXFW28{&HEGJO^RwWM-u78zc)Cmohb{C=sOv+2haL5(RgkwG*w11~@dXC~D4Fheg18t32kKWS~TtFoyy!ctl zN?0@4z2rnW`_;r{>PiL@-KEMH1c-KpEqEb;Kjy@Li;w`K#OgCKpKk*$I=8kXphpCj zvJYG!OVM-=SDx9g8Yw)O07rUnGsPFd_h3Wm!eMRa_tIEp{u@*&!0O_mdQlw6A%H)h zGZKXw`a0HsUYahWxw_ctym<^lb6qfQIEHuCT3_!Zjp8Wp$p2(zi!vM5Pq!OFyM?2jLsUkjE?EPOpIQqQ-+d z6lfsL$vilq@3mHHbltT$cRPj9zj;tk;kl{YquWe+mY26a`ArOF;Gt?Chc3Xj6 zR6*`uegE0x{NFH-Ey@LJH;66b2?n^ssk{r zu|n~=;+<6N+XN40ZCaopl7Y}R>lp<$5NQB?hXjGEa`!9kM&j+9n#M|3H|`SWZss zIkp;Z?%-f$w8o?)BpJ2@GD6BK{BloNwCCE;m79LmSsp2_+AArk>TEz1`ZGbKP#M_w zNuInc3h0oM0bYfom#6|D1%D9xH6y2V^R7D4%bPAui_1+jRJ*cokDA#nv%e#|hvm8O zYXcl}AoHjstsjgV?>}V$;2++!0~KGWh36pDhNeCFK00LHL?qfz(fILZ-Wa$1qRD6ydI**>>zG?FQg^3Cu}-^aP6F=-tZ?sp?V zL7`Yq@AH)OA54Y=bcb?7txd91C;?oSnvp_jezdr(c8gcGVTPdeqX63K1T5VcA|cn4 zqGJ&MK?^9s%y!u)2?bAdAwjRK(pS$3Q$bBRC@^>{&Z46M2g)xD>1*s$_Jyu!|d@ni`x&Z?KNh?XDeh@k@1key&ze~ zURi_w{A;uSCG!kzH~tMPrznayOlv}_vR7Zc|m4!53mY!L$j&7qrf zW`~~StSg+;PJG%V@8orh~u6Lc=I&6Vku$c@R9wY+Gq-T2I?)7V*CyWL-BxRP{kQpV{nq#e5vDj?A zXOUpAnC5&iAT5^xC*r1ny@Iaq9F8m(h)a&`XHeKmvJ&H-yRmE-x~GJSXin!vm4fQ3Hk1#dYOIo0vGBPVuf)8G9Z(SJ?o*O zwLW$+qD{IYZMX2Al(dYOB+qlj-kiAIAd3H#$m0xH$TalK1&fHp3&(%}uou&G;>~kp zS|@wTzwRvr4vVc^$A4_|JiSIEuTn%7^Y z+vVLkeAQ-4vO4!9&4h96g&nwo&<8|GPHQVfMIm z8t6iFUiLrH<=2b{Ok5V55hw9XU6(RS8xY9q_>HvWSGyA)NA4X2YQdV)Zu5K3{r`ytod?lX-226`;XQp&wS-qzcL z`e}C!08PMQ<{!mAlp9FJO=x5sg^{N>UlFpRXARf_wB$&BS2|xFW-J<@SaC;=PwmS+ zJs%u=i77RTV26JwON_)>AHCkh*da{Pme52@TgFDtu^TY3jA& zl%e>a?V|&>iu4`@pt+Z!gthAXk`mG6A$J~AoQwceKTmk;-sPtbGxt90(J+d?*P-}rm$oW zRv`HinQVi_WX{$J(v*q}k0~~ZM4y>&`r=GAP`N7Df&3&_Xd9!8>2ijDjs%jcIZ_&8 z_tLH*NM7>?)ian7Bzi{Mt{0bh2#l->u_rXz3RfkJ15WL`e$&+A5~CFFuWTQ_y8w1p zGgF#e>hy2FvU>%%`y7c8JPrgWO_{-$7px_-()hoia6Auopmn`J6d9qRE=dqf=4M#T z8SM#unfW{Zk~W5&LeEYboyeM7yiZN#SnzetHPhc*0KlxLd}_823xr%g2c}U0$UVAY@}jcZV%=;rKh)HE4PvJFQ_M7H%1Jbut3n!*A`; zuiu$s;OtGu0;kzSRpw|ro25?28e+!2yePC8+nhyoc0AO(|K z4_mT^u5d_P%GpTzIgRlVE_9?qTBsq)sPT;-)vlcbfM0s+4i7nY!3}=Gfg~Jz{WIC0 zayN$1Q+hO64x4>}*}%=f4kk2coq;LO!gG#6ld7KB+GDRk@>gDXl^#M))iyR&a9LNB zcx`@^+E;V61=}@Ke(qIALI4YG-4O|z14e!a57u^dCenCMW1Fkq$NRY(cVO(w>z*d>p%6vVAts+sKJN4OT8iKb)-GrR z%%Y^&45Z7$AgDKPd(XcqCwI(g;#j?^5Icrfa$7X2BGgb1H8IiD-3Xeds?t1R8}e$~ z6Uj2TAT=fUYB0>D%-2b(swo}RRo1{LuLxL;a9kJGI$S6CX;}f|Y^NS1bpxO|jGIOb zW#MCl`vyKr!?jIn^hZWX*+S65@U*OKUBeA}i%p6h4(Gv%5p!G+7b)*L1t@_RgR{rw zV^GY&VDA7!C&tUDu?H4@z;6K+UYY4ITVCA(BD!)YCXs@KT`Y871e8;*L0khK(H};v zZ~zU{)G!hkULQ8LsN+5%)0b9zA|**!Jw`u|9WtR0$639vKs(Zx2X0B|@lAR6_k-d* zv9Y29^ zVfyocoVH+qI|i7Qm`@z`{W=%j=<(sQ%cF<~8M4uFTYd~H=ut!pDmLi}Ws|3D`}Ml$ zee>f|Ye8>QEI*|vfU7*6?hZ;+q8~HGqMKgfuHQua4<;GeGiI=d_N&@Z@lP(0mUc#q z01Sw@B(wMFV5e3b1iMH>92D8M;ZRMlC({ZB*NDAgfKc%TQ)RaO$N5Ea$z!q;t110T zD-OLZk_u;qBN`!~k_Q+V@uD7b3Pj@lj{XWO!&8 zgZa$mj??06Qznp&jo_{QlYq!J!mFeh!zJW#n{HxjhIBRJ<)#b#baH+L;Nh}sWZ~|n zml$l)Gw4qQ2)I@9s~zafcPDurjP|p}!d9~+Xz85ohWs$H+jMo>Iy<0W`>N$A68)H+ z(%-nd_WFvDT;isZ`rRZ?CYIQv(mCwvZQpeMcZ*;L60A%x{a_KM!g@iZQH`+1RrwW{ z_kCT7O#+o^)>khwC~I#>t1utf@1<=(EW&W+|&1}X#li3^iK zwu7ZpM1To**4U3Ee)dNkDSQUGyPaO3A0t>>MG$N`YuV@%3 zqF5s%a|Q=VFKkoMUM;fjAO#u!3?zm1bbGg1$ZVy<&Fn~eJ7I`vDj9NHWp^t7aD$>s z>e+DO@pvrLLTX}uKrU(mz%@=IFv)=846_utx@qs15h}d8dT^lf(x#T154m#NueXB7 zuh*BCuU>M;Kw=_&1aAzCRKK%1=yHOEh4hiN3#X&=b04Gb$I|xf?UCY0vQ<6A(@S3% z8^P`eH_Khd8r$3xa^yh#5_T+C4dI8T{U$H@VugoPIv_Q3irNeFl$xP|J(4VjZ9U$< z5F|Dfts~-{vO71ps-WDGPuc1lKwY(1s!gV!dL?-zK^E z8fTF0RKsBElX^7*8`rn<-O$H($rj3B1I>dJoN3?DSETLzO@?$ir|VtMLHWizz#W%O z6AIv<^diG?dD!z>8yht;r-JE(JIg`osG6;6lWJ+#(?03G&4f5P9>~SdyLl!)xc3;8AlwfUPA!j7%qb z?c~ySX!C}2!w0=t?}qZ5&UL@RVioPr%C}B?jbv76*_ZPW$4cwZV)VpMiQ*{H6)#Yx zijoPNK^K~7SFW3zu^6`LZ~X~OQ;cA=$mc5fv1yt(nSL0l^h=phRNb6Frp&S=Jit!U zW{!Rup)ZXqBmmKq$=4qU7#x*?~tuS));i>lEGuMxr7)Bwn&^e(pHkvItG zYsKZ`c^~Zg1YW{RkvtRr$aJ=}Z%%B#iR3Rn;fh$EVu>gk?}mSW$Mx+^sv`^TgtO_nyZ^bIDqhJE!O2dnyx5_RHPMBIQ>0GI=u}+j274S57caKBHFfJ$%PSD z^GvmSQIWCEJ+8z~rd(-Y=}XPMFdFH9WtEmSk$G11z7BRpabL5VM|K)-?CTGGp>vtf zhxLl#vFM0OkOlMJ=vo61`DF%&Rk-r+g~#7b8QlpQ6w|t4seJ7~ec8Y&u7Gb0v$6}Y zmc(0149YqUj_bC_{JHl~OpB2^-v~Oh*k9Y%-=Oj{iUz|`-&wce&;p_tOO(XXz$Ew7 z;9G@SUnu{K&Tm-~*JEtcl^uKCkGCSzKFk+3YR;|-p_$TDxPaA{z9;_>5S4%2dO7Q= zH{t{|FX-CH&22*)y;=OlsS~oNC{9uYDal~;@WFTO0*&+k33OHOQ z^v`J*D+5BM6l!Q5hJDX2giCt64#PZmkG7p<>=7pV?r=0aue8Vl)H* zCYLpjqsQSrKE*IM)Rj1-WZ&A z56(aW@E)e`Qmqrwe2KNjNNeuL{`{d^<}LTzzkWfvn&=-=a5L`@CJGS&c5|Lx!x(0J zTw0@RdL)=F9o7~BD<(tTWTz5Q-ZEo9*6lRha+Xq5r&9nzSL{am}Dh4Mfb0yvWK^Cu5{2~D~i^;qgTf+sW zvb(#(>n8n?UE}aiA2EZ$5z+ZG&HxO+V)HiqX}{wI?Vb9VP--udTcCfPC`LF06s8n^ zxk_lI`amNKTW%KU*>HCKdaqfV;SQguWIPB2CN3v(F%(ziVbhq`0#>}=Bti12c9g7f zSjCY^WNk4I9l-gMZLS)F-Jvs?BJ#-jG%=_(spM>AMtjd@UBs=XV3)~yXh13C=1Ah` zU|cR9!wexy7Gil4oB~*$lpvTp^5XKc=Swn^hOAb-c3~tAghobYh#!cKTruh0;CZBGU&Ay`Sck>E^aoZ z#5FZWMB{=fS)=!yWNksE$PRpj*W^Sy^h^*VA~C=EhU(H+pO9+DJ& zhEetVOwV%{&)D+_gnKY=aqRHy5+Hs_$jrs`sOpzZEgSoS?0KvV-4|*+KBcM*aJTKT z3{Ybeb%jlvr5aqM(NcF9C)V`VX=-*oeA52>b?z363zcIGY?;Ur_`QiHM_J1_$fSL$ zIF#4do5WUsyFQF4kdDJ}Bd+njcI_az3zE5VMADD3v*cl|fS7^d;~T)QOB)hW)0ogh z_v;=9zZ+kt#DxOW#!@CsnJEAFT!04JQ}87qPwe#qti6pftdmRF`x|{x;YZAjk0@h# zXyM1j#4Lq{1q}$uZd#*-@0#WFE2pQXqf+|i4@wj<6(UM7kh@w)DNVTjrNvOoZA+*T zxgx-PhB6f>$|y0EK{OmD!NHjq#~>?hVj{{t6u(CEqi+oPKPDxn!->4mYDz5?_74w} zLrvQAOA;_m!woJp-w@%Tjen@Wfq(NUjAxSTxn^F{@&pSzA?lv)xU{6#OTMX%*B($e zvdX483Cs79m$32qWy3zG?XE~77;~Gd`@8*q_|pU?D?(0AQ1Pt>av5{a!KiHu);Fod z7X%#}CzfPeFd~brJj|ZJ9W=V7V@OtR@jAp2?FYxCEyHlP)@_FpcST|Wc<5gGw?$X1 zFn7Xh2N_FCC+V6mVtw18-cte*?;G)@vr6iz)4=y+IT8CQ-fy#c-aW2#5=F%oDw5I4 zbkd>97c=<|>qa@w{N%zc<>2R`r*ja2q!kulWCcB?6(n0o0p%EV`14j?EHk=n;Mf=$ z__ME2cmzn1UmU&AdFM4ty8*5nx!65k`b_*|s*YrCl`OK2)*J~lq zOwq>YskQOodAz#2Hrh4@r()n;$D;no>z;@!Eri~+0v$v=MW0I@+doFa2lqP5Ar zKyd{8`t`$OgCKpQ8-e_A8rOC+k^xINMg!@wb8bfntCq=r_`xbZuN90 zB5C4Z2lBa4(GSYP1-IL=a^U@$c@XK_aO2GGh{xgcv*pg~x@dQ>LjZ5F#^>>9nOc~?CHBWtfCSkfnPBk_UP7C= z2sO{Qi0YXyDBdJkxipe0r{8FBaO1dq^6C*4>AaKUe|gvIR^#H zAH-rL??LQGh|OfUo_4lBz5aK-y%6ceTL9My;9SOW{CzYxOXAiP@BZ=p(i02@f9tu~ z`u54!+^J-qB;>lhd-O5=5Cy(*D4hX4eE(=f(tLEbLdZuV!aRB&GQ)@nl>X|_f12g# z6*7svxh7ThFCVKNFI!*DnLu@C)T4M)uaR$4lLtRz-~tP{8w9=iMh%ku`OKL>`U4Xv zPdW5873Z@Vq%ri|%qS=vEM`uSYKd=}91;i@)UP1;^&r|fxyf>;oYpJQaXoR*0V#HDO2B$p=UB#+d>G;Mbc+x+pu>1^5EcT3*@!;9# zpi(7>Z!e%3Z^Vd92p%rvF0jEqXKsNGS;U0J6eah8Zo0o+&rVIPXLQ2GG0}YWrDk>T zx=t2WhWf?d&-3z<_Ab{bQaBdS@w^^fJ(fM1S1ByCA}rq#XmT?RXlg-}hQn>%)8wY{ zd1jrXOnB_&w(9Su_JO|<5rsbI5uRNH8d36fi{2ZWLVUIm;NEaI1aw&=J3F(+4wHWB zhuH=wj<5Kl_CR%@K)^?fDRi@dHf`Lw@Vk$3TwK}pk-J(8di{g&&hEV*)oy}R9^4S^)e+9ofoR-4>Sx~jU%z7w;06KDn$&O-{aLnOT6rN2Dx~l5j82n1zhQ(>{@v4viA?%A>uN`7Dy@zp=K&d>887MSstS3&|IXb4oty>S z{}{J7#KHqBH+(Xch644&`mj-BG$p3O#hPe{8vOINA3*{v8R==Mw7m;_V`yy*PAw$>qilwL;9=n zfEL_iI_ekd?kuLSGH1Vk7?$hWTCUoXvcL&EN$B%@ysnyH*V)uztv0@or-@~4AobA{ zC18;k2I`ZE;H~n2q`TJ<937hyivLgdi;_L#^EeJ^__QqY^T%n8&dL2iX|&+h#h6^+ zP(pw2CFASPz?ZSnOL1^XM&cyp=go(@Jnxcsek$$SE&QPR@!MXU0%(OA6H`ej&@AG3 zfezDo{pU|-?a+NZlj2l=PaRQ7*zt$$`RvQ@J>!AWpJ&^P739BvFVu02EFHZ!?#0m? zC9%nAX(Lm5_PueiB3@IRKxPoSk57S<0%#HnIKt0|@jJh(^}Z&$T$MOsV~cm$J&mTP zk4#6T)chJ&n|K}6=`%B&Motl`vlzJ#30gYOw1v5-jL90eMVC+Ya5{@UDRMrk$AjN? z)(HgG{-fuy-$BoV2d>rkM!^2d|`w=)nV5vY%~@{(-L zf_w+y(q*RIj74QUMPy^%*Z$7{fdbtk=eirp?fb|g!LH%kkdATN+T5+A#e)WQ#o?MG z@bBbFMQ&g-dp?#{W}OO9uTjXhyzo zGpViRdJSw+q=(Cms#TI>Q?XyKzT1xChQC@S*_5Nd_h8OEo5c!-R1y#DCY+bZp2amp zf={p*kUia=vJ$2WM5`1b^Z#VpD3nlbq@Lzh`J!P9g2=<@b=2@;Ia+BHMu&zG~a z7?m%sjVD!dl6a&)DyF*8*%ZG&zflDShWj>85*!HDk`1^;g85Bx!>?&NHj3)|xKZ-I z2VkaxzLH!Xcm9>VPA=P$ySDbeI8)z#JbupN16g`q@}Y^?c^sMSXusBvPlIaUn_R7b znt}#g{<(1caOO7_QA!enoU?P?38vq_>!^))Ha=^AY3O18hZ9!s~`ut}uvSnW6eP5!MTi|oHwx1!9_|03{V}qWK-HeUMU(mfDo$w(<8W!0XxL9FBJn+q2kT}p|ACdmd_0~c-Touu6PU?Tg+ZGFSafTqR#ZP4-wq+kn|Qa$tQPvt6-+rVWhpF=m~rrY3* zBDl{FYe?^H-gEkyO@vHR3%^gKs7+1wgNs$ONw38cZP5lvefa_0LOd{ZP>>Rp#f(X1vS~PgACapzJLHV7Z7fKADRC-w+8px#r#nSkyrJJkG*C_M z5WY}(nC~WyAErZ_Al^Ll^7`5nA*J`b<7hnxuRj-QjJwme#tafL1pEmY%$Zr^Z+xI% z5fXL?Ih9C868=6u$N?F3OP4U_{j>kU?y+Jb!W!?s%USIdksXdU?1snX)@VzM6beqWqX( z<8F+4Pi#iUx@N(~Wl>nh7*2XfhynBW+W7a{gJ~P)ReV!NJYT2!lj(uRS|t{h;5th2 z?UjRF?R*4Bi6Iydte&fe^Ut9A_lHqXDD((AJlX&5zyFLAb;J^{GV!$J|5o%*CjlOu znxgz$WdzUuZ;2)1z?S-KXZ)Xq@!x4nOx*(ACzddn`#(y22F;vV0M4QPdv$_5sKQ@> z3knLVOiohl^`BZTkpn%1US}yS_0Jppvr-md>Vr)DU%vRabN+cU0um5?jF@mS{2U(E+dGkMi@yP;j;>()y>VK5T1T=tFma@|S=?T#mG3ZAa$5oEsJwl0z QK<%OAq?9F##PxmtAK3e(U;qFB literal 0 HcmV?d00001 diff --git a/docs/core_docs/static/img/vectorstores.png b/docs/core_docs/static/img/vectorstores.png new file mode 100644 index 0000000000000000000000000000000000000000..fb6604c1c81755a649c9718d68c5b5867f6e152d GIT binary patch literal 118309 zcmeFZWmr_}`!*~?mtcW_0t$$9MArMn_<>kcU*B^=XKue^#c`oauQmSGiT0_D=NsSojHSt zI&%h>nuq{=GFvbk0sg^xswRK;On%3exie=N&nU{=(Qq?b93gzB-G6Yj8cIt2<)QE@ zHS0Y_TNX;ZbJy^2OkY%%5s!Zq3Ky{uWOFFBWOOKHe_#1fPT#6OOr)CZ3m3t2eJ0}H zX9O5w&-WP;7mmCSreo0wuC*N#pBja#wuYZ3%-j&Kx9dRJ{w%lo`QrH*7!jlI|N8Ne z*jgS*6-g(2?SKD`h!H_VS-5)rzg$MdNa=qDN1x1X_|5;iCG;2^I`n`29L5Xo_|^AY z-hUel^sux4?_vM3aR0H4mtjYTTMg;uRgd+SdHNU_DqYo8U3e z=DxrBE#15`fzN&IadSt$m?>R8k76GRn~$kMh=QkKcU3n$%De#i{$dYJ07fC)#$T`lTzs_OXdjvf{wX zMeeAPAKO#&`7Y$#!PZ!GdAZQe{(7hB!S06R_iKR)1x9s~g^gz;3|&VAe`h>fZx?p_ z{hHOK+HSPgy{Evq;nl*MEpndv?Uaj`E;)ZJPfwS18S`>&2<<7%NlIe0;P>A9Io2XL zU~u87y8PSA*IJ`_w))id^w_lxoi*Dw^zvX8m|n}}QdV(4bS>PAAK6#)oUvx7XYF)oBa8wtodWHqn?n zSvDRWj2$&YJM(w!*Akr7*_ow^G45C)8@>-4ImQR~@@v;x+=Rlb4Y=XR={0KKWhUTe zROc8GCEwv)WXA)$OKjk|RF+cG@|jCiDW|ehr+mzNFRWsglQTYj((vQQkB^5d9j>93 zG5Dd9yII3dy~^?Q>tjcU+I)O``;xnh#(ZQBg zPeeN3OnyMu?{(<7>?9uTf2&kJs^I?mHmw#cW&G#QZg|$a`wa(6c8(Ll!nD60qDSkd z5t3|WdBUFolGa_i=!GoAk5U~pIAdB|BghzzRy+u-rb8JM6Fc0>O z1dk_CxD?`DI)BZeZ{d;;wAJ+;?$1cIs)#MU$a}@ihrksn1>w+wDgTv`uP>K}*s=_; zILcRv;KO~+Y~tfvy&$SR*!=0ZIcV-VPcLZsw}l&(jc$c)y!Zj8_m15F!ghaq^$u>e zY!FrRHwGW;z7nvQEl59HS=If%x(=rwapmi!y($sGbi%Z)Vu@axx+bZ6CZQ5q7~ECc znx!A<u#TnINmg=nMLiAdY?!Av3BJ%X|ZMhNABo3$NmTv(dO;ydMOW)fZJLgL%9UyqC$uF zau52#wsvL`(Hdpkse(b9MQLfwhykb9dGvjcsPbClBD0Nq8@V=tNhAs6<>jU9VPd9U zTu`}X5b@ASD77AK{A~4Qu3?SVk6c}owp;y0qfa{}g3A|t2+1x}t}&jk+sdmLeYZ90 z*(s9e(ilMQ*!NJLhLvC;yTNbs8fH23Q2gV|<*!*%(Yqxhb)K%bF#o|bc{B9rv$YCRCexMOZV%^3T+zP>)C z>a(jJ&GMyav!tqOkVrd~i`w{E(K*QvQy^bN5YoFL*N+5qW5wMayXKwIcMR!msPa@7 zlpniq^lOtZWMpI%(x)obvMPVLxS~g{>RfNMDR$imrEi^{RQX$iv`yJB59^7w1TNO>E!PCwcUNo^ zSd&t73k+TOuHuu=-LSv*DylP3Fw5Wtl=a3fabQB)6jJf>koOe&`*uur#ZZsR`iBE6 z5&k&MdL2R9M|rS9KYJ#?=Wso-iHsKgJIc3xEIZGSEYa;Zv58Ak!}*(tUft)yW6w(7 z((vDy6-)8^s%swKKS!H`Z!H?DxOGJ!DB? z@5_Z;q3Q0N@Y))oUP;imt&%nCOt`A$HKe_6f8Iyfz-7=>Od$4#fPmq^gtG@{&dVSzs5^VmFKkXU} zJ8sX!y-H~RdW?kdE-)e{4xl_m2}csZ>KPcznnfojw;GP>t~hnwIky3iel;JcH&QxU zQ+sr{7jI@E%jXYlV$fFGz~SZo1jErS$@{UgjR~YYwO!jQ`JdkDIt6skcBhJ!J!*c( z(v-_S_PfqnK$9Qk6xA-z;MJG?q*`1(x-^Dmn8q3ySO~96^<%B0+555Zk_jh*u#lGx zrs1x`4uRh^Y}#pLEYtYs+QQ^;3CS~@ViLDFnG9V3!t8XOSI<4z$MmX-wZnT`1S!|K zGZVc%kgi(hV><-?qCc0*28`B+Q}5(*hhwyzB4nJgKhLRz2)DZ61|1 zFYySiIb6gn40PJXItS!%hi)7W^6~VU&+bjuHKa9+g?iNOE-=`X)X$9a5A|U-28_4f zDyNK6%*#UCVh?getY_1Q1Z-wR=^$NWvRt*DDAJU8>NL)Rfb-r%M@!4BO@>z~4JVdc zvCm${ymdC0rlc)~U*6Htak5*UK``ohjI6nip;Nc)qcZpJVb>BUerA)P_60Sz2z%4b z%!q{%>)O%FG#l;0W0!xW1%!_B9|>^sU1G8F@#yBSt1mj@#VUHeeZt>f8o;h8<2VN#*(BzN z1;D<_co=^)$TSRvWxLmob{7hw6C^z4`crjqg${fR_L$~41<*7KqMl}<5}T9<^;-$* zc-DS_ivC5lwc@Imldc6UJ73BL;l?AcA;zfwQ$PpVPoJX${~z)5Aj}&*$(oSBRowc; zZyw+-?A`J=TfCX-T&{$~WyXh7XrH5+i`3Mf9|xo_AFf3=$ZUv=;VL`bh^D_XZf7ST z!KKq^RWh5KZ*kYf@P>5{iK9n5|BUt>NA&QSya0p;=)L4h~t|`zOhU5+s|z>(0m3 zP#81L0peq96yZ-}1#|Q)Ujf2XW5>!i7bi9$&JAE~Ct?%^?+XChk-l7?UDIl74d}hM zA9WgvZhlgs+7R|{H@=yvWbf>rqTA*``lD8Z(0WLST&|X)y9*cpnega-LAY^&-3$x?=)ImAD1RNu2ns3x%E?Yz)LF zIJ656Tn^;GV?Trm`6S12%eJ?)tZAf{G&^aGz@5T?BtdaAO(Z@#)y-Y$$F zEwxKVOR3HJv5)?;U^e+$b{qq@Nk9Rpay(^(G|s_;g4TlC=y&d}CIA4ZwC2@rv61vE z!^$7rHEdw8S}FBD@G$Y=G$hGXLOJ>5(@S_<_SAhMlA*-Zo^oVtUX0IPj}wc2C%#^v z=)J3m3Dy0qt998*%Ea*cN1J{LnBXhkpoRzwY96EEClemzADmxXTvsZr`yz0rSE5-oYE4tn|#^zW3qRo@1^uY)-f|R9i zW3QV)wYs^kL#0=0w3Xh!jIl4l4NN%Ta2V#8$ z-6pKDCP^l(YTU1W-FaGr?+y2_x`q6j$wBi(@}0`&2-enM;Zf9-sKib9x7#0n+f}7@ z1ds~RwU0f$PLA4dXC~|S9`okbDTJ$llJ`Lc{(ik3lFGtI5>}q|y6PO1Um3|j1$!&3 z5)b<242~GltfTZu-0&Voaimu?>=4-BNp2E8?{AI;SAsQ5ZotYA1zW64ttKj=oP_=}a)?#}1Lt917j zELihPD8NSq-1@xQF}GkgIl5qej!&PO0_c2>cW*f}^I8(Oa&S3ip)AKYFRTZxUIbAi z^M>;=L=F23XN<)$e9>ld(zo4p`b|et_hFOXb@BwKfaH#1z90ewh@9^iYtZ$EA5!iF z5pbKm7C;2e%*BLn_EzwZ0#g#RtJ~1(!_a={@9T40_L)r%eA|AQ2!L(XuK+r6V6`dLOE<%&Phz^I+ORhV+e4vuh73}I!cU~LK;i19NH!(?#a~*Y$|8TYANx9!X zG3qaO{wiv$kr{!puf9v8KOXye=A>3)XZ2_C>AiKHTfVB@EGen-q06Nd`hbwQ;)Usc zRMyrEU;`J%rR$8GD($dc!@=ibVyA}yOQ1KZclKk<69&^gBx*mqHpj)|3XKhF6eWzh zfm$-CQ?wn)~wJ;q-Wd>U=N=d8GDQ!zs2 z@O`?DAwEGer&ZG{C}AzzJiJwBL_;fIfzIUWov4gjpu1@g`|XM{rz{%v&MC?Rf+%< zE-Upu*2|#WIz~WPnC?z6I=8q}a<2_V*Ir?fSjJ%1eK^M_j!sF=3W=l|A@Ttwaf01K zf{tiV`!I*HzXo}bK-e1BfWgyXi&#Nlk>9VFWa=;;CEFp1hdyWt)LD`7GYza99#X7E z2N;_@FlEFGnNitjs_B^-x`S?qUSFNP)FX1|-Rxy99X-AIY?Ve@wAa$}b~3W<=Q2Cw zUUmuY(=;YNB>=|I?!`WF%P5%Yv6eM2p-u@N8T(#E1A$GO$fq{W_9DA-*&@Q*tc&2CKPKR}` zs~o!D0jiBn(REuw-sZFDvs}A-a^KoHY2VJNIh9y6P~4sj3;W>l#v*B})^4m`?kcEA z>a05ZD3og7PovR!LvhzSa4{qM~R2#lX_B$-!DLo(HDD=H``XvWvC3e zcM_23`gP)0%VTXu)tJ@68)A9x@av^C(|ltPeks6y9IslJz!tY0aMTk+tNX6^3W z&l^7cCC|(BrBe}^K9*k91^P+nXD>F}=UuO|?TEY0XERi)V9T4|Z?7yZ z>Jogp{-zzn=3|8bz~b=z?M`sa1Mm{fdaKv-_ukc<;bHFa9KKQrN=4Tu99%yYX94Ad zrB15O@i~y9!d54~1=8Ntt#vEn)&Dju-?~RogKs^w8Ex!6NObozn+IQ3M<0pSvuJ_X zfq72Vc&?~i$8T3%Wonxn8sKd;b)r^ExR1zX8`%!MM}2nZSR3`D?-Fu&6a>`QmrP?4 zWC*PF8V>KzOX1DlVfaO5NSj+SUrUOkU+G~G2m$CY1(4)q=GBCcN9{n_H7H)~^Uijh z-m*M!c_5ATF}-u&L+`}gU++5PUYBcVYqtU_A^4|r)BvD^=CfN2uFuyDXjCf69B>5( zrNvj5szrAYa18m_RCGkGYdhVD+3a*Bh}{ZU4KGzJ=;!UGY^s?p{@l`XkI89`t6E&e zTxL#9bF8kZ%}NlVj?O8l7hRm5%}%4GC*W9J&P11B41d&hW4ty;eOkAw0hH1zE-rpv zu1PK*a%FJp10ln+!d|}NLcf&S2aSAQW?3n%a;z$G7FR@5TG<=fGNQS8H-sa)bETuL z=l3S{ozQpUA7xG-)(RT*>W1kxa5rvpnISXym!3E|lX&W6PwtN$+nyeW{=xl11%1cyCxQ5>;#|pjX7F3ru{P-hPo&q2iue%)|-nd3P zHSmt;&jTM5jFOo*(jSejU7*(&y`Z>N*5lQJ2|cvC>M9nUr(Z$q893*ed$@b#`qM_( zMD@;6!!j?GO7|WnjOb}VCc!OGsH=3M*Vpo%JQZ=s+; zXeb9Ay<>bZ)_fSBB1Kv~6Dw+|U&bYxU)>N-tM+H|A3^b}DIehSPn!tuk{h7G(`##o zp9Xixtx4{mkvz@~0AT5YWYPR?`t~udhY-EPxh@(5KuRw-Rr+=~_INF{soPGhF|yym zG9KW^XMkNuFT80zejb*vXH8O4be3uWl)@0|;+She{sTh7wEQV?nq%CCH_Qk~^?-*N zp(l^=RmR8I6O3dTZ&{OSU=VKrcbE<3Dv-s2o zjJO13_K1SH^MAgBQug(2w`G#^Ja-&0&)`rKiv!98>Ql7dA-u{ASh%DsE~rP2m~Q!2 zz=O|S$FDx=rD;)v2Ei^-_vwP3EE2RhN~Dy9x*l#8S58I)&EmLd^7ZWpRDQ1H-BOA_ zOZ3lb0$QmD>`D|pJuNmq{Kr+$^$2G`z2$^bE=m9Av;J&oAt1M7(*pyW{@MLM4+NL$ z7=oG3Gs*1z=d(_3!pQ|kn$7)6b~qW~KVv-u`+w8?|M_krF8M`8QKhoQ#KhplrVThb zIVpH}RIh$B?zW<(rDf*gQVyVCSl+4z5PE{PBp;C1+j%+VU0>VTxs21O7PW2k=uz_I^t4Nr%0kZqKGONS=onpi%SSpw0hs4CObi?^%#CEf8p`dhTV@B?I+^K=$4pwZqlhu zO-{POD9O#t+TM4|I)K_HeyAUXLb2THnXxc5Bi4v6PrEW3bfN`lCVP$H{KNJj?~_?UH%cN)X9jLa&t(IFufcB^?-pn~-FS3*|{ zi1)5;a?zfRii+~6Uqr`u*{Q_v$<8k<1TVnqjCbkhJs(kdB_15)#`vWXBu#t^-TLVw z*ySpyaXEKPSlZI((c?GXT&p^NN`f08TMFM}zl-&Kq$F$?N57Hfpo`9eD$-lkpCEoG zt>n3Y#fq?%oD^n((1A-dH9IXWjr4{0_yEPB8S%xlG8gplNZ_^734qG^-jl8*O~7~v zV;Wmwdk}U>1)1Ueq~=GSK6RgxM44n=zAJi^&u&z_VKahNRme=?$%=-N(X#NFZigR; z`#~Xz4U{SknVtN&VF-RyDL4Q9?xM3)ZSSvzHpKk3d)Cs>tdhpcaOm*C5(%r5U?Xe1 zZYI>E!Tgj*kE_-7jaAHg77WaZ%!_4B%-TgIZ`IJw9*_v%^ChWhc#3uoPY;F{z1&vW7YX3wNV2tRmT z=m=b~Z}dCIB=>!7sx{b^scVsr-0z0+hsSMUJroCiurT{*+r&K11>c-w2!+#u^$dYWCgdoJSiP zo^xHGDEnKSo99v_)^V6M!4$KH#mS&Fgv^GC1}T|)%n@qEe>L@{+(?)@Z$T4-?kRxt z7uWjd+M7SdC;9rBycmO9PIYiAo8M{Q3(-drIo-y3utIZFL0~I~f{H4%iIlnqo$OV)-Yx3c(xd#XJ!qLoD#)7r$6-UXj zV^?=WK;}Wof>g!LBrAqZ%4#mOYRst7Gk26d?`k!Go5g45Z=ws~)1zh!`nDd^_6MZ{A$7 zHlw$2iQ$&-NxCnUG?z>IR5!}|Rxb^WhlfdF`$6Lq(gEzsqKuKQ*#%pii1>p=G=pUA zQITB3*zmBm50M9y(M9fq89u_j=Za0nzd4eUV?dqo+(wOlEK`l{7$-Ck&5{Ylf6eu* z#TTuG@j%||*9WNA^dE_tkUW711&2$aW~Vnlon(Rs>VPw_s7nB-EOvF>!rtUDF{mQw_7-N>RE45dqC`8f0P0ny@76U^KNUpq%z73n zWDrRupxbVD+-=Xn$6!>+2TJio?{xITov>U3k_uK$optDc&L?s3uSyGMFs%n*T7Lfb zqOsHZOg#%6(ZRzUNC-@)+hRC#h!aCNuzSjR1CikNv7|`%hGo&GWPm@%-9QBYM!|a( z+mI)^k(61*1yYR2PVZQ=&FsJNOfu;F-z8r4f+q97@Y8cho#gQk?1BF8ELGRw5IH%y znvTwQYa7B)enaX1v6RdYlTuR~-Ds8WM7-)q#okwqJ1jDiW!E){z#3>5*z73Cp{K>& z1X%XV!*o?FP%^}GnwSB7jERNd;NTb5wM^Jg^Uu8(cQ5AkS5`7Kyh_MeX*M}`6Gp_W z4ty;u8?&BQDXMpH&In!)Gif`2w!MEW~ z1tcDjPjE3QOF`=wP5QpoTNkc}c@?+W=+KS(-AdyKaSz;4eVG>+RE9zWf ze}BIs(9PiMD?}V$rI3%DY>r4zPX6@1E~5l>6FayffX%M@ec2O6)iQ};E*&rnU|E-Nbo zZh8~AoXZlyk{noB&TF7HdHpEq9Cl)_&)>ooifUlK_ud1$3XAMrJw4ZfsOVix44bpF z^Z)It%eN4|r4+-S3h=C|P=+I}u4;BYx(Dsi^(l!6`4L>}RS*g3q0BDL#`l`!Z zAX4rJ=1l@_txd)sj}39y`~UMp0SKt{Qyw3W3^J#}a9>+TAR+wO3J{h}pXG;>qLWBR z8a$GA0?u)~AI!H*TP~VVlmn$8U1r)t5VmuWKwQ8WaYLD}p4aLJ=O*VQ%SnlhcO7ay zP`+(FUOSes@bE_hm6et64@?&Yol1pX!v>5z^Ps-8+ixS!B^Dnb?t|2UN`H~MU4*O} z+A4${n22X6LmoEl5P|J!;&=|X_bam+XbHO}EpucQ1ym%eK%hZ<_2e2~_~P=euk?JgSTS*;j!ekQ>@z zU>AIVH=kgBcp#%Ti3_~Aga^`m1XbqU*R`2kDRd#WA~{@#}Vg&!6N-qK!JR|_+HYh+Whq_ zkhy%W4LT-H_CcgrH9C!x+?Otkx#ZSv{LpW@U&f*o<)jAD+LP(gMvmEJBXURy7yyA= znDgDs*Z40PVvQDs@=dy$k+RxuXZ7DPZSYw^t{i)Gf4htsU}3?0nyWz#AoQ zm4MX(g+2BIWn*)5GjD>PwzmBDsK-9^=7xU#Ioho>=5g0R4a57q%oAOw9U{J_pz06A zO3XlzmE?pT9nrbr0izNpn$D+6FV*9S?&HzZK|LIDi^6PeViF`&VjXvOf?C+Gt8(tVR^rk$)qZ zUF!nrI@PBQ@pSQ4Z_!P>HK4)*7e@fHza@yL{-)Ssp+9E6zg0bgphs15Ck@j4HF0*` zemX6)deIiRQ4T36pI>!sn$h9sgkFSb7%^ZAgOC2UV8Yr}9}JRP)`7I~aNtugk%;2B z&(U6@f&C|ZxoF7dg-6{|t%pjpm5}9@{p>qSc0N`HTR^#bnKc-2ct~B?u2>S3th!sq zB06aGJwU*21~ijW{cY9LxS@8#9<=mZ)m0@-#g8BrM4bI+;$nvhx!dzusp2+0G}b^l zR7yrKj?%fIXJEj|!otF4#ZMu3#_tQ!zz_#+hdUOkJqG~b2^!k zHb16S259UWS`=C1|NfY@WiL&aS1c;o1 znKZo(2OjfXa&OV(ifthXU-;xGC-13KMwP2YLbIw zdv6nrK>x}SXrDbj>K-)`%#GAk%pAq?qcEh^!V$~;KLGXNez-GZ$S6yFl6(IxTaG0v zEh|&JbH}e}Q5F}M1ZGRsr4D0opFHOer?@>k*5IRq6AQGkB?G091}qH7Upw00VnB2| zjvydJCy#(5!QusL!=T@hMUa&O zZSW&ZL0MUU(LtIC(Tnq4xFq>i)3g)CYio{doSbB0h+fi6<=7ifr`veX-tAKEX8V%> zKo*Dx@W<^K5nSO&Qj!6F{dn=Jph*+ix6O!jw~jPT2c{CR|}JmuRE1 zk4oEb34i5=&CjGfm!*{S5`a3kxuu0*L3FQ?1cK+}CCA!$jAK*)G0r?l5w&eawBj4li zVXuH(=oipxMhv@-c{zSLC;uI!nu|>lRX~}L%Z&iqB|UpZ#n=6@N?3CBzYX}3w!t+Z zBZ|A?v%k6qy6}5>xv8nCy~V0ypiEW=s{YBTDVi-FgUSaUTgzAV8!N%Ssd4{tE@73S z4gizgJuR^5*BnGNu~2%OV-*Sro6m`6iIdDIsjP{0L~vpy<)Q=U>8&9Nw?yELTW7Ed z`(S70ZyV3hYf#R%0s%|c;ze~mKbXmA(y&*B8>|Xevi$}iZNe{Qty9zP7G=4GJpwz zf;4&9v6Tylha5C>NaRB4xqxUo-(ei*AMXU#>55G;$X|b~^BU-W^q^3)E2BLUc>AOR zf`Tu}-Zy|_6}LeXS>h7j&rrpM-p}63wWqW-k_Xm?m^cP+_PKCP%}A)!JuoQ`HVKRg z`Fe-(oiEqf&{UE?ZU;v?fC+@qsfl898G__JKgwfsku8Xd3r2tgDqlzf4O&PdovybZ zhH2yDxI%9J+BP#upX~wGbN>Q*(4h9v8f;YHvq%+?z zfs;}yl>2VYeF6w|Um|7^XWShCLJ_c$-^kgn9#e5(?$iVNu0F9hxFbze583!UAI6OLVdU6u%FoM$p z21;@WEz(RV@SH#uzbt&M0wXTgIQ!=_-ex5-%FV$3a$4 zP>8UX9&6%3&`?N00QUnOQ zJjXnO2|l1LAt5vgDJhGT)EGAOd|_V!BFlsnv|1%|T^%D7dtQxsJMlG)ekZzEW9qynX9$;x<_fD-XDAeVLo2T44K zzK{tiky8YppkX>Fb0flznG?7Z%tKU9R`w;36e=kyDmq2lQ^UAHAL{8R#$H*xeWJgk zqG*lta&x~BLLrn<4)kp=<5Zo7@WL7-Xc@C*#yP&pq-#Tz?7iyo#=A>8OGP5ut4}Jm zo?KT6#a<#%FH_xng+8wKK_op3vis9*AZ7!P0MpTfrn3t8nxF#KvQyrp{DMxIWh5kt z?X@*4LD>>Lxdl51ArS?D(;SSSJ}pTD2XhPvg|UmV4@$wNpIEF8{irkmi#Ww|X2dlQ zFa&x;0|t8}T@K*bLn=tsS9r6JkKur^6{aGA0R7tF;9x=qzkhp3szU6aY{wW03@p2E zW)Qo!#Fs3dJUKUFV)pq3@&3ZIyXo1(%i!-W40nb=6vuug8>;*_h*q9KM&S2?7=*Xi z0oGr?epORbdsjH~fEVUMtyATgjzN!&^^%(v4tmKvIoWh(psV8={4I`cjxj!g(a_K| zl>i1puEXD&y=!!o8<6Q>bLY0Zt^b&qLLHdN{5M~!W7w#G0NSDAXFHCdT@jIzl7c7k z((+tk1AvwX-@@n_DTCplBU|(m7I;M_^_v73FB}S~qeR%d0gy--XxbV@qcpa?GLCwb zVFG&ciIH(}S5%cb&jsVMYce7naL|c+AUPi9GEV-nkDd++%>QpfB>)-LI_~%P$=fTsyIn$36?vBSM46nOCvpCqd5A8 zhVdYt>J@~S5F9=Q^9EaV!r$1Is@T?7QE-R~Os|X$WnmTwqX8P3L+WM9j3yeiawo^H&`4uF6=O_aDe1$YHF&cp)rZ_?%~Dz>(cgON#|oA-A%9r z0W#<2)*nCe{Ryrb7W$2Wmef8R4v^UF+$+$?jsOG_^+sI&b;=lEBae*?f_>HTKwIi< z0|O~3X?zdME;P_Iz{hf#V#NOx_+X_2kf5KoHUwbqxH1%s2X8LJq6~p5QF45_tNA4j6;aeo-7i zIte^{u`h-3KxVS|p7Dux@9%1p+Bp_LH)??y71j4)*=r~Y=olfN;QSn*(^9gMkNxF6 zOMk@LFyzeB8j3@HQdoF`csl60Zy^EsFr3maAb?<^rZJG>8%P4riJpNDI3LiFbzX9b z`?Og>F+X`&!4Ns(I;8yLXuA$PeG0j}*Lu6I^W9U=M3xbk*(T4^5X3A4MsqD!1509r zS)P=PA3n`lWYd!;*g50W5L&N~?PU&EZ!agc)on`Ne-m=5x_aUqcHR2z*qQb_&H-yzaCfvBLdzHsD_{j6|0qOhrmGgGE%gO=t;+40d8 zXhbgn*rv+AIDn0=sC&pgCjk(EEN3mON$?iw3$Jc<-IBHmjraWS8v8>r)OI8tEh$gq zSumH_DK<0}tch-^ZU>#Wd#Y9Uxo7z$@q5J21Dk`WoH%ws!S#iw8)HKtAD}a&uSrQU zkb|gy9mrA{z85}{3e?bB6!2gA@zu3?e=8x<%XKeu-(f|7$w15!-B^Rs+IqaVu-Dmq zfEG782xV+MJR-Ud4a60jK4@* z=#H*)vGkCtGDmS;lSi}}^YEILKFPj%TU`Yu#W5)Xg2DxS)V)uyQ|~2$Joc~14&4JE zZxjgdRWg@`ObLG8?lj;==N*2htXQ>&U8@=4s%9(+1eTDUxQ9WRc#tGoLe;K>E61w1!9llie&kF1YX~D#)I`!=;Ye-P(9?tGCFafNJg?Gj9vB=G zj$p;M!2l=fPFXku?n00>Xn6m6VFN3dkLrl{IN%fGu>S0D!tue@K_l(g9OGKW!)Tc?>4+>nu$AJs-nd_}Zu$X4e*3zRr}M z@R(B6?GU^c==N}r&QZ2epwrp^Y)4d0XXWilFN0IJc>!W4Da)5FtlwL+JF<}~ z!L|wP287)D$%?AL$n9*y=h(KdXD> z9G!5raIe02hTHSIXO7f7rVetJw`f7NkU<`G0*v@;R~XI!>8zAYdS<2)kRmK_oq@LU zF9X}A{M+dc4_)aa%q4T9 z+x2|}R4%j+RaB52HXQbpFO%w4xW}h$i6{h^kvVv~Re0+R4XP3R0&}f_u z$anv(2nQbsT`crOnn^o;cnfcVJRiW1)T^M$)TH)*;XlWT{6Ny60moG{dD_ezz7+Nl zjWJshUWbaB;zQ&$Mo3b9rnyG zB;>3vDCx);B;$NyR3)WUOJk5gh|U2t(VL|oXsm_k;2(nim=qgCQ|(Z8oq=xUCqo4E z4~jr3;=ukPAw=%Z89yYjh1#npkA_==UyF7euywbuEPk7>a_qp+kY%@44<}ykwZg~E z=Ye+X?LWyEJ3h)n=yaD?)0^WKy$s_muU)6?yu8T8MeFWDUtryS_~0Dz)!PfX05Q5L zQrgG(f@)2cLNo~*BELZ5hgb$zZ1UC7fJ+HLr%!=ef6%`@#YCngmkIP5;){}4h?te& zM+t2$EqwF?DAf0{$mZuWP@7T*D%s9*T|Mn+eZJtfw^T6&JY>_L?R@vja3u+j7*wi4 zWKcK0_w}!4qB%Ozm%9L2o`<0)pd-@0U|4curFa~26pf8sJd8Eb~Dq`Ua+Lp-XwZK z=>`}ZQ&5j2y8zGXgu z{2uaH4NyL(-%FUm0&!*yxT;)lI?tbqt;_u3ZBVg+9oZuSW$m#qDwKg0{V4A{gWVBk z8Ac|i#;j<7^J+EbZ{chro}zqvuAY3i8hp2>*_{zPAU-?-vdfH1OG|3Hx@%-#B^9KXOk%z~4`l91(G-0f+y3dV4Rd zQ$jR6o&hko>34n^u_u?n8T~+~K)NZE0ll+%K~)?&poDkYCV?~F2cCB;x$^WAdr0`J zE+6In2aUjsL3DY?(*6{?RqxnA#~s1x86)P(-D>oXy|Z!g=E`)eT}!3Igt#)Go4!r; zsjHTmXhSW{V=NhV9z5ai!u1DO+mIpo!!@kF0%X3Tt*z~K2Kf;pl+if!9)Kc~Z|4gK zy_x607GbacmvoBp5sZtA>s@rTd0zoP3aI)nY;v;8A&H^g1}(N5{siQXcL!#%ZFT4d zVZboU({8O(VxRn&boPArB@V6cweIUW*FRW4OE4Gn1Fufd?~vTHR;>ZApb&l`aM>>l zGV!T?ZR1GLJ3P#PdoL^&vT0)olF=QTD=?dAQgF}|dOHWy8<}EvV!)Xpopm9p&bzCt z8w1{Na=W1sLkYY=AqiM3%pn^%-0`)A{z$+wGlBC=QJL9*>9{O7^a|(!pkkKg*hS8J zzU;$B3LW6eU;R}FPF?xZ3OR}%bOLTKmG&Y*&V3;7e>PY&()0>sf+=k@e}^2*OnoTgh%O2 zN+7+5f{N6E6Y6xy{w?@kWI91F{tNT-%s^@)8JwB^|B+?b$FCN)$BRYRnS!#O=vvqC zFef-zwq~HIug@VJc=jvs%s-F&msta8B9Ixpi-<50(DC;NFlJ96A?kV<3|Pm-q;&;6 z&g)5pS68;uWC{EM2k4szoX%R}nZWTes4$4>0uFZP?ioKym{K%%B!8!P?^8`6hKO@$ zqNxNVOe3}j4P-%0-szxScNWbx0zq7=KmkB=+mRV3c1r#Y!F5Vw zoRI>J3eE2w9aJM~f3ge2Kw(^lp(urQ0g<~%VT32B`b@$3rI1;=cwmi4m>E*&*|vR* zgr|@O_G|z1HA}u1@bvWbn1Qmi_YzEq^)0&>oUs(Szq4-o={Y{2&OZCWldFz-M1bF$ zNKfIs8?gl{E!KPC9h;Br)>bb?nx0c=0}%3}MmquF9>Xb``ttO#k=4stoScd>pXonxrdLPLwnRIaGqS+Ah4|R(^fG5onFG0u6B!14lGy z(zL!h$0V-Ij3`;N=oUKhlN1n!F3GUB6^tdUxrnmPp7uoOUuH#V???rPv9`N1Z5y$lITp z=kI7``Hh06RhrMyp?5+5pglNMC*LAW&58(`*BrlbV*f}0Kbu;iFak9P$OR$QDCfEU zy0n|ilwIRW<{$Bg(D$RAEg3VE7)%B)v=3*tkF9kpq}5SSbx90k49PE{WYv6_2kN0$ zPn;F31pkP|v(qXX8UT>Kd9G_r8Z!4zAI=8qrCt*~h>c+e+!B{Vc^_yiVCSNEzKi7( z)WR(t}r1E zC3}5?eZm~u3VY6h2nv`={Ws(8oPXTmi#!4-zMFpGkIHUo0h&Mw1y=jpe*9h9C!qqA zeJt&wK{8twLGtn8<#!5>#{DTCL&yH~(KTpE;CYV)G#w~_;86h?7oJmMvBAK4tmEni z!B5uiTRe8;pwESaLtb+OoIZN-WI7o(#7U0QS-UuT=3L>+nJ=0|BFYe6PLN>9dqx@5 zJ3I$&C<3Bld(6bWg5&sOAMd-BU{=IE=0rCd$l!g`JkB8?^QX^FtMMfCZYet416iaq z5Xox!xXw$-rCCxa3?%nH0(@gGus@T)sF362JAMV{;7rzD?i^neIR2(LrP1N`eW9%u z(h2(8rBJ}PXH!ET&vqq)bElU@=KsJi9Bt5uta3!n0r<~2ZjD2J1H!My4c<+!!!LS5 z1;uOIFM0f2P64uj?XMHNXtuD?74YT_$%G~Opi{F|${P^(XQYmF<$`G=v%Wp#`Lpo< zqs!E#JmFj(VS9NDg(7;@4k@l|@iO#=%Q3yf42paCXvdVZceb}DLZ?8Fa;ugNRPRU0 z$vc16H_Qpj%n|Ew$X@+qNZ*5IV`f>IV0UQhAs*>$n_zr#Yi-z7GP|{aLp)3YCs7qFe{3ud_0|_*RvwG`_ETI-{4aTgr46Hrgud ziJ83yB6zTLp2jM89T<2WhqO2)s3kA_8I~{nG02?Z?hb;`oWTnzoT4H+c7JII#FFW} zZ+BXE?7sCnrMgWRQ3EtVkHHH^9`=7#2G+uG(r5>-%(>_8Ugbrrq6B4aQm_-c_75yh z&IoN&cTJ^K^XP)Eago8_=NbG|r4BT}E2RWfO+i1nBF_1T?rL_N9w@(xVb#M%<^=o` zKZ~Vh8o>v0U|>SDod1ZovV1#4x?@;N%-Y|yj#`6BXcJYc!&NP8Ei3>{KC{wm1)2ds zcdH+FC;Ox*r&K6K`r_cG)zyvYzzo4lZPw>fz-y&qAQ02LShHNM1hCMejH~}Hs6)QO zHa0d=2yy4Gu|u*CTk=pSku5^!d(f+?e;}!dmai8EMv>=zGl9?aje}o88aRZ((CZ0m ziwg|GSU3-b(z~Dp!UiMoBkIB};n9w^HUTq>ht(um;oM6fz!B!QK=%{cpCUUyKMEkh z&!>ztBV`7d$omHdK!FS`Hx7Rss2=%+8Z%K!aN*$}!4+kg#X)r{F1^2d&RXv|)LCzT za~qsndDyeGo%6QbdS~+XAVw}d(@<>DGj5SDjXizo?bI*88q7MJS~nx8Bw4U7WS?** zGiA{8e!Fj}d?-D1=Ac{GI-4_=>gbLT_Xk^nM-}0rt<$Cytx-RyYXqJ|-*Nrk!D<}a zk|B4n*eP{n0G&_oJg8>v1(-M`Ka}nizf}+ej_0%o3GvT9VSpwk=+!{>FMcBj|e?JTBLzmS{b|^N4(-( zp)C9N4I-5KAI|XY;AcQgZ7$;FVbz*Y{T}X@M04z%m74Izs|WVxWyQ;f32VJu9KmKw zqL|K}rAiK%kVESmaHw=W;PTkmPMX@F=}NrfQpnaPN{@NVw7nlYKv%&x2g`9ClhFUfm`3MGSXC>owgOg51L!!(T8)KmHW^3mRO?6 ztP=cuBknhGq zPVi2IOz>_=-X6>2*9SmUfPEe?PpiO$5GW3GT2!RHAweV%xDT7N38}}O-ye`;bht2+o-el1LM`#I@9az8_Oq!nC%%Mi0pU=K z{^}4mAW%j^T{@_16>R%M$v5J~_Qv!7noT?)y>SN#6;L@rwQ+CZ~_ zPOjNT-RJ7uwhzP4YZ+_n*7{uRrbQprg1@H>t6I_vWqhX=l1zKoup$06;d;dpI(s7+HuFyYHb;t3=_^LNLu=caAs zxr$i)x>+nRb6J2^Skr#W4gD*C`q}al)3f?-b~WqinKx`9T2s>0G>*K|-P=pEX0kXM zs#gc#hJOWn2oNgLWv<}yK=Uhb>+-0S6c+IP+*AWlzHOFkF-CUbGVC^`}^=j%3lA86NvxD%=_%=(TZI;fxr^9)K4{&ZxL z44a2YAhGfgSLbNx+mOOa+6>12xq9GB(^vvSLzzGsLJlXws9-efWYk3dASF*bjCRc$9~bW`f1#QRDTRRM$F=bok5Vow$pd6J z5|jRKgvv97G{VLGCnGKG6Znf?=3~Zq+SeCAgzVQFrs#>uO(@2&P-&@HSfhHd-{*D;Cs7+n#Gf$kUyMIYzMpdvx+_H3h3)WVQ|}@mJOQ$6EL*oS?GcM zB`r=w{Xn3Z$PU8*rsH?w^ZIAgfY~9-D84N4{MoO7^zf}G7@=F4@$~&=Pr;~n3{6B zdmMnm;*f%NlV8zMRpS9dzhxx&<0&iBQoYRC>Ss}WNlz~9N%q4~ls)|&3H!$1n|#{K zThR_}E)OHGy9`*0e=7OjWOjhjJl%o+9G83gJtLbKLl|H}dcd`Xtwud`jfO4thp)2f z4g7QI@I0^qV4}P3$B*00EG%!eyvs5&LM;+ygUsEwexU+{D6;!4=p98%T?TQ6!*%2Y z71v^f1k7ip?%(GpzAtiPeXdUn0Q46W&vv49SV80>a-5E23Ur76y`CC;0wwqu&-MAI z&?~ptF@%@bK)Mu~!(t|f+0!e}izi9wg|%PQ-uk-*1p4R0sTpEO?1fO-LMS z1+9R0$1kg5(|f3|KEo~4`%HK5OBUu|&OO`u6xBb%*72=@b5CpQGRzB^(yN#dh}5~u*z$jc=qKI8|Y zEO?yxh|VU^nLm?6DBn89?3}p%u~`T>25b2%e=;?fs-4DvkFEFLLHmuAX0qh{zCYl$ z9z|o*a0^2Ii#NlMDnXsFLs|WwTP`Czgt~A7TVUhYbysE+Lc-K{bQS;7_08;|K-soWRwKbU~ZC@4IbwtW5rKTbanzTo^Wp`CSLK zUyQxTw&|woqemL+{F9eS;<-L^6XQe#NraXjt@UlBj{K$J4)*IlvK%5(LlO2)-eTga z^!=>zsujG3nVFd+&3|qhVgWWyA4)Qc*wdUln{e zwny^sQXDPFxU4jAk8a=p?=@bCD&2d-o%UT(owiM-Peo6@yj@hqyH%s5NG1vF04S$i?f=XK zNwf=Wd+(jjm2*f;gTV;b)ha>&_Wu9y5&su~2olP_;`-B59ZP*PT}#7I6j(Dd61jO= zXb7*1nX!tFcELEIY0AXVP-^2%G@fa$3zs3s;W^hpsJBm!-#zGzJfUnMiO|y2d`}WF z{9QzKhAqxV1CR6BpO4YebTVAIMe3pTAfowtPCj%(S%}BbrLGJJe|HQrDv^xN&~ zRp3-&za7QX=)uV3SP($kLB0JEvR2OzT6~WF~I^ekMLW!zolQx}8`bru+z6 zZbog1zn_Qdgw$PI1PrH?m**ZGoXXqLBZ(I)rDI2-i#m80K8})6p*ClDJUXFhR9Y;B= zB)+BMQE&f$(-(WO%t70~q!m6z64$s&qCm;_hDK(&qa&nfx9S`9T3a5Z3pes6exk(0X0iR_7P%WI$*NGBU`^toW(>SUAe^amq&-8;+(_&qK(0Kg@8*YHJ z*13B#YA7TqXt+S)I70t&_^?bm?F^|f;P$P5-uMmj(7%R!>E|Zr5ndP~@T|Qz*0Ot# zaH9FcUW4DoJ2eC)B?Be*go2z8^JzpCKJHaAmJ!V`iHq+yYsv>V9Eeq%P$qVy-6gs-vpk=YnpPnioBP<~`{^{y3JX7}F3Ms`r@a}>x=Zw#C z>bJ$teo~hE04-caQZTS{Iqz!^PWJz*Ug#_0kSMJLxa)s~6KaS8iK)8BzO-Wnnc3MY zXb`(<;jun}t4IK827a8&^HSioM0s&%r2vgcPL6-_%EjByUIST+<1Le=%oSXK=GF{^ z6}k$yB-eguugw8d@zes_>g8Wr`Hv2?u@8r$m?-(o25^Z+Ah|xLQ@GoyWnpI|0g&?V zOmTaBTN7WEJ041kgqoc{#0Y(a6glLcDcw=yh(JSe5MZ(&K{CC)jkAw&dMY&|_ldGm zkoO5&iHMVCP**EXdz6(ZBX1`WaWQu`t67*dM9LfkA;olK8>EQ^@DNS3TF%&l02~2= zmsc}Er;=}E|GNXzpsK1tPpySiWuR!~xf%viPu$D?r~EruUgy|dm&(aeRVXb@S5q)H z$+=FNu6a7O1M^ngB%Pw=Ne4wmimKjugHOq%NexdK7|G8J7R{A(UAl`)!1w#&-gcq& zrQcmH8LK_`-8RdDCbMNCI6ZVmi*XvnyxPk(SN5DiU&^6SM@~-edCOS2-_rp+nxy6^ z0d?Rl4$2i>p6?0fLzYpB4dTUi=l%wYgF6zB{I9frt>-MrT(`qPN1el6WnWmVrC0Yc zqX*$gx#7j|#{;`_4&E6dY+9Y$ju){V7ttvQ-xB7s48~xaM3smgE5@Kx5FgDLBp%Cs zB98l(o((jusJM((%ygqoE@d|3S`;t(M0@OQGFWPrx^AkYF~EANi)}3WXnhc~DPUkx z?bw=ju_4&9t@gK@yZJb{zZn*y=3OP}L4KIyJZ|nY$GG1@z956f zt&!UJSPQzLqtB>$%(X9OJ25F8;(?|>4kaBOUjS_rc+e=qOnsXuMVqfz&McVwn^)MF zOu$)7C8gz0(kZe>6!}NhVJ<+~lFPnn?fh(`iSaT`UXg@efAOV|vh|#&ZUGs<{HAUL za!@vOSJSl%lmHW_w|REbj~t>5kdRER8@H`EK?%pL=|P-G!9#4I$e@edUZc_+*oQ)C zLTsJN^??1;6I=-MwyNm&ViDR?$K1cqPiP+U5S{>)N`4)<0i}8GBw{H(3Inh>tGjsl zSlqj4IfeL|wsyuJj2c^$I)-|7CLf4YcF;pqlbife&)x?5 zmzg~jEh)26MJAMe9nI^by(#G_i=CGXiYS^>*wShO-0626>;{%iHk=GctElNfV@adG zcMNE$8+GBPdrq)e-fFF&xTgUBS>E3Lo*rDV0x2787Zm`*B;H?8WW|hJMz)6qF{*#c z22JpX12a318rT`**#h#R#wM-$rZ(^$FI*ze!-WrR12$VcR05d@I_w3((&ACa@lWa@ z(053jhwa%{g*ZY~ljfPqccH)V2+0RVrEecFYO!yQhX_T+{$S#V=@#5>^DYBD?^vyf zaKE3|fcp&*)7*t`ZHeTwNh#@qJ_d_;4k(J(+KWnCuOeeHptaLM>(F~Cul2xyXP{uw z(_df(q8A4bAGm<4`)vW4l`tw;yhkN)FKrNG9=tk{p47{C2Dw)#I0tsmk2>pGCkU56iLF~v=UA8lNvw{qaJq=1) zOa#|?$zS868Y6h{gN;c$b=@~dqkKjvT3{kg$r39B3Uoe=7qDBb1UL`2SMfUq_U^MbwwB2tj0fbi{Mf$d?MaGMLe93Xr`{?ukTc-| z|F)USX=y~CbG9dsj0FRzGHRx;PcJ01EO_j%dx%wsmq9pHTG;~n@eC?9)JYiP;RCy5 zpaVPox0uXkUlJ zF8HYO+~LJW3-_!anrs3B0@2?p50N6|K&-{ZX1MWZEfcB$vA|_Nqv3bq*5%2u+Lz>i zsZp%x{JYRNv$Zr1pcC@SZJFyUnr}?t#fNA3EaYNOp0>LjvwteZFO?1nUj0GDn?YlF z`_pl(b4A@lNbjGRs(G;&_L>H)_uA*Tl!>0@07|gydLBAiFdj|(!Uf2gc)=!70uFMh zt~=F^=aLHTH9>*4x6`ANkO{A%`;srsnE-LQsaraB9jeg5@_}~oFS$7B-S{wIEdSJm zP3^4I{!bYti3tT{SRF8fa2Pqn;RQE8-jF81pM?Bg_grS!9bpntbayXBCJ{kWC7-UD znLPPBf-C_ZBBd-KQT%jZ+ckQSUNbnLDOzjj4M%QuZ)!;gi#U7BOxlB4wA=QCc zDDwO9AUG%m0q{~e!@pGj^CtPHud}g{ROW>0Ix}Br17`i{mnOiDw7Mg45^x=hmpIIO zc@e+{a*WyH78j*1K;m^J3BKM-nAhh&%%cjEf`-P0mnO&rf`wrafmMg{*)go-$w4XI z2fp~$&(S~FqqoYWprR5Fsr?r9F$alQIy@)~QNu5TtF&AP-udwfu;qX>juZsuOGv; zgA$$^{;ytKWZPrSPxz2fQ@lI{fIv*VbkNk=ST706nOvjP)rlNk0wNO?eI8qFF;ftB zpnX4B${+@a{2~JvSh>=2!5!$PiZUd_A3km^-Y%fUgIQT;UYgLmE{H7z0j_ESR2~s> z%i*uZlES~7B9G9*x`=i^PX}?EcN35&a2dA578Wo&h`|~FySko1I|5ABavh12VMmOF z-oMnk>?s;Y1j-rmdsMuZ3?`x1ro^B10-cx=R@7?o!UZh<5%I+ft9X!%lfN|C)1Mgz z5MJjiO5KfB_DaCNGK`);*ro+%I!(BpXC)3Uxe~bKdIWfy{$o*0$qoj|3lnxeTFwxL z_Fu*&ai8`c$9$;BT|tY6vg>$NWZhEuCoC=jo!CVv?o!Y&QrbEyQ|S%YFleE-BlYLLM>b4HtE``K;zeWl z`RrHD>G9lV-FQRc7`cCkwANRFJBLX&Ka2D6^_T1FB$afo0>wjrg+Jv zo}oafgEdK8tHe2{(0-Qcr3q+VKNw-Y^&j$%VG_CJ=04oMggi9rIslO?UlHSoF+4t@ z>#@5UYi%~$oihb6r8)3A49^aIEAqgvZ^F}WIMcnyMg_4Hc!>|g;=t09pmOgF1GS8Y z-fH8b3Mn0-wCHV_4y8{R>_k|Z!>&`7Z+^ zmo?>^J$vAMSwr*vYOT#PibH=9SdLZE>dA6Nm~dkGAs;?`D8KV$;>ewd{!ed&tSY$y zE}E(Ul>5CEqnyUV3SofNu<6TFSVAw>-1Hs5a*EFW8^?YGA=dvvV1Yjz&22^{@a9_Y z*@sa$BEUW`TBm0@-%a8Lb zpf7`g2Gd)F{;!aM3+(}I^2|P%(MQE^Ygpp{DgueJ-T*#5=cb}Nz z2Em*QJi!!bQKT;4vJ{6`z?$1JR3XGQsniB@Sa7C7EiU9tZ6KP!_3Rh9AH7Ja|9sP| zlFr~S#sC#}clYuIBzyB}hJNK>smSJU`IUEiJ8^Qg<&8&A0VfW)5b1M`1z7(hN=u*((UCJy=9BePkDHjE zCOUWSToM#Hf@vUtEr43#arsCh*X;Ov%ON4gMq**t%Cc@>7tPC8|_}I z)LzrHJzSEX;8E)AJ-6R{ zMrdv9ZPZDTk%`>GZ7=U(3#Io2C{6O$xVeg7C5#~J&#}DEj?^sU6X6Oq7It`X6vu5} zUw~01#*lyps~5mXIa@HUxnWg4Aqrdp9}_a2US)5WJoK7cOuA-)8mFZt}UAEjzCti*1x{&(m`yZ%!sCyX3x||9zYP&^(hU zVK1B?k0)YH!S2$a$A|Tk_@>qhl-ymlEInhMM zBDo9W724O!XN1D5%Y-$*%ItBLE#>5q%_h>HGAxrEU&8j(kPZKzr5LG;D8nHqUlY*x zl?!va)B2YPk!>lQ&F#s>XU`izKd$nrUQ5(=i@{rxsq1i=X{a-~QMCM0sIDD~=jL2>t%!ziwcc-<~{)DihS|{a!^l`OXBDXL#(I2ti#g7?Tg5z5+EI zgBbMh-*!xyj^)6v!jg9ApN3yd@C=z z&@qVAxJP9_zc@hi`STg;7tPJTF$=!qZnFyP()phyhzK|6pn5jS){;1<-aLEu=gA)TKB}}L3ffS&tStA{^j!xJD_xW*F42ug zKjN3>xw-Kkl;i&jL}Dd4egCGcI4&HW9}Co3IbN}M6lP=^NGu7(zRvZ`d=EClQ~9H` zYz+hXJ*jpUG07G*y-GZhsy3{X<~J)rZRQxxKJEBZKp7KqX!;<8=bss1zdORz5=XUG zyBn4;7{CO&n86OKV<#ahc}5%H)l;p+(0$QXeLa8KrEj&QboL3~DfE%wkp`@8$TPA} zlh_ksO1aQ|8}i2%7+;9sehtZ!6!oFwGu4?M0bF7hy=53ZsLH-aqGL@cxH-pC!oj20 z?4JMP+Z%9$)Vqt4LLW^k8?b>ga_zFiwQKXl92++%=;5|rUM0m8=bpmH?H~kz?i7?G zw(9}_u#6Tb<-^OoLaFfs{V=Fs_siUDR(pb7&>-tKBj=CJ;gg4fl;{mu$tPdxxUdz2 ze|Nz;*y+Tz$>OGP2j~o;CIG>!lih4L7rM4_@X*)6WBdb916ye{{#PtqCFwp+o^L;- zACS|Ak3HqRt&R+LU?mi1aHS!Wg{fd7(I_7r)?ijlq3z^S4!Y{y zL3;sv*#mP;2>Wh)I%0fy)hbBhLX{;Oo&S$0$GlH-zHEPw zZeSIZ1ph#dUwhLzW4q0LtL zpnIN`;pQQStlLMia<(`mY6a}+Uo(M=bk}XbOFBa?lB(`*euOo4VI)`0aH7qDSuXQy zXJD9e88Vhyt#r=wI4ME*-3U&jA3ZisIEStrvECvitBxWU!9J6eA)!#Bn8)id zj*?x_8m)E@>^+3;At7A?653B$-9Z#o7*_GXw$40sK1|N0aISQ{|0WkjB`8CW$WJzj z?C($+OpNa$6G@lGA-QY?;#1JG%MXzx=dTNny5oT3uwMm#$R0l7VSNE;ojooBO<3gpel{cUC3u2A?2zwHkI#@TPleWC!=c2 z+v8;lqop}i2&EC`S$06k8>|P@@RTL}IP5mqsi3Y7;!cxLGgbvuUFq%bjQ+fLO)%*6 zoqotPxUByD9QiDmb(l~fYwZF@FW=+dDTK)=s;`IFyY<6%QmHefi}I(lFeZY^QHP5j z{RHXnjK0n_?@NJ7tO>LMf(L9OCSXRAdh^$@wL@&4%T zY*zGpNKMafN`=d;n=?rlD9u_N7ez2IWzXaJ6NDx?2l{b3a`rGc!WsJCoS;M72eaja zF*4ehu&a!{K^XpbI|dfN{W>C)G68gYQg>nN5h)!QxnALM6GS4QM6prlB<^#HkUJ}B z^0SCX=^i*;d_92X5$bX~uNd-`|BpHh^4<`}-Vd-jbioXXxJabit?b0C15V-=wicFl+0)7B@>=Sb1D+#gq6$1WI|O1qWS~Kn=}Sw9lePJ zE;9U#4g!xHvmUi(!R7aRkX)aPpivP@*%qDXlbA0UrvqqF-ckZuLjZ=^cxS+*hM$H% zwUJJxlpqQsU?aeaOaX-<^Ab5WpDpi)RCYFs+5l^NR{}jnc=LCwAD@zZLZFhaYtz#C z{i!5U2G_f|v%l@JFJA_iA4?fHneDQ0j>lfmZD1)I=YWO`%?~T=*1D=PbelyGarYxc zfbvr0(-oV#Ao|L+Lb~lvO6hn2lcUO=qM7W z^rWCO#xMn!hhe5VaVanMrZ%Pk*ot$@K46`uk^?qIE#F@oIQXn8=2z(B8Pmo+R`LNWHk!nIt6h%=>Jia;U&`i=&(i9V8t@W zGJseU3qIl#c#TNmm!)bog>%VZom$Ddu2a_WgM0HYLB8?d{T8^y zUjOa&OvSoKr26DV7R8L%c``eq%nfLhYx=Pyw>kz$8vbi5!2`P?x2}UY8fz4Rp16A$ zmkbeclZUv%f<8xFOxP#UNH{swUB@5u9&ICbLq*L99P<=Bocv_C*OEyT(MXk`9ZGLC zMTh+|TD+ad@i1V<^}o z1dzmqb8~8jww)$c)nTLRwaBLpy{(dsEC-FF4&n#AVmvR0hP77M4bV(i*<^g}HP1-xjhGKafwK4tlPO{e_OBPKD7z7E~`y z0L3U!(p2<*OZ;`flsoC6pz$j6;DZ_9^WtIK+0d6L>={B3aTU{Uh`2GJD|=`8im^tm z-wajiwcfYtSa{AAl&8oiUeRUWQyG%VePx41m5q`sH}?eOdQ@>p2X}1pJ~*3EW9SF>WROG&gGS{~_Er?!&wN>d3Ac#2T!z|R4(&s5CPF|9hy;Oz?iiCa)gw9@= zjrnZafXTLR_?V^ERP~#Fv;w)^R$!eHz=I}r9}B6PLaXL%D4UF|Dmx7?%3jJGz?dzmV385_GQ zeUWkdwALD9<3mK;B9bEV0Y!;Z(}r(e@^4nv$zqQ#1m7~>@tbV13u4bUNG6Ab_`u!r zrc}d7Op(dn5%lJJyq*XYz!_N$LE>luw?!MZk0 z^p(4)zw`?0YsX7!&+F9Fx4WfJ3CDKx8Y?d~rxN3*xc|+0rI&RR^A`7IpH6+ovhQm6 z-FGQA*`c+neZG3lw+cJaH&RZXDOIyOe^qthECd=!R>OWNI8w5hrF;NBQWlrdNbx>Q zo^uMI;k&&NI~TLb)!|qFbbXtH=*JGKw9r6>D{#%F@^QJpWOjknInCm|t!aUhn)OAh zw|iX9{vES#HC8p?onWxrOIYBj52mCb}nN%pg- z4J<3*1K3ti+gNon-V@!2Q=cepHI8h0B&$W+7P<@;`1773=f` z>G=zs?dM2m@*mY8BW2B`Iuc|;>+%$2O>pO=t4I9lCrk=Equ-};A7_*|mmkaL`Ep;j zqRRiv-{_a}TqjIb_i2ihCSYd#0CU>%zzN-+TSTKKM9L|;sleO}7j&}DL5)m)!-8y5 zp>HNeuBV*;D2EDNIM}deZV-?%WV}_BW_%)IfT!a`Y-xzjZvSh04RkUUdkSvqBt3nK zE~{}?M)Fj{i?#Gg&AqVN^QxDfXF7CJjRR@TEVG6O`bvHFt*T9I=Tct%nA)z-aDm30 zy|cf|mce!B&Q=X=5}noEk?C!rOs477`yRQw{ni@AOygH3_V3+66h|45*4)3fR*r*l zI*#X)G^K~hZgHu)ZHh;U7;bt;h-}<1TgPt?7bZ|kS6jlbo?@|e74rD??rv!~T6NWX z87-_Lf%)Me8*Ha466?Iv!RD`Hr(IMOB64cW+f??_n+!Y1u<*pZMK~ZHjpA~*L^qn; z|56>@^gXd9&N$(7#&gUROLdA`W;rY>lt>VZ3t?_q7riaqnG-Cfy^yH>&^KG+~65L(Yl)#1n5^s(*8KM2-*m+^(yfhm$( zzIvbPpdF^HOs#%nuKu8Cqa?#%eA8d^`6Dg1oM@3O=qmn1U?7nO)lyi;aYm|~O&HB@ zX%e8-UtFUsJ8MeF$>xLAm{fR&i+=6jEz;-naNB_GpEWZQYxu*yOOfPAo%H9?r#>aTkibt=bOb_yb{>^J5wH4wMQg^#{vzr z2xi(=d9l+q8A=gT+}`BCmfKNRC@razd%O_|YV}M%IwAhw1`R8S!yP%jAi)i>T!-AR z*%;l+2j^&>|6jtoi*VqytO* zFzoi#zq@TX5&ODyBJQr%@}Cca;9nat`Yjt&FTeqF!Yh`0AzS2Jm@^P@VS`>sTc_b9 z*YD-tFhXRn@{rqm+nqr_1lh>PN^l*0D`aa@Zf?;ixHYe{mU{~tX zY0I1%D0r}pPuq0^*dyllvOydi8P5}j`y+1H=z^RT9#rGEwGQ$NP{4es-Jh86oVQINv!riv> zg{^Zji=6f~*mD-R1DpMO|9u!ztc2Iq)6!Qri#M+b7x8dkxD*0hNZESOpUP5=IyR`dw_DBIlc4$!P#uZ)aIBi%101#dF=xE>L14xPf@et_soy-_IsJ$@s z!|VN>%p)JpehBdxTHnz zuQh839z$cl6arBI0#r625gW4kLOAM~1**2?o;XFOjH~{hY000Q=gD3~?%?hTly?}B z{&j8H3)^XqyodzQ$p{XhPClmKpbxJQfICB^5wOuqh2kgw`+avAN}>cQ?FZRBxfG(M z{>(L{;h;N(!Xww``Ge;ub?P*#k077FQ7oKQ(ZqOvWt}!nHSZEp`N8&Fc?akChoCf) zQ^HT6nUeo%2JSc&sFJ1ZF3gh6Lo=Sh{&IpJCk{P_xSS?VM)Fvn$k(;z!}hAQfZnx( zW=Q!QY@I#R&*p;#6OHZ1D|Qb`ed@}Kq^_DJey^w*JjryEHQQf_kE#4 zZNx!+aAfTPnpz_O!z2z4uqH#dAE#!mQEf$`>MRJJ#QSlgV=jBkT*2uUIXh`|OAULN z9N{W_7s^y|xQ3$o8FU(1b%M2QL0X7;>jMv^Z07=N*U&{_m&y#%Xu#{i zw5pW6@0L&Trj*NzmYf}Q>CN{Mw!^5%S(=0AX2=A9sph@M~NvL5W%e6 zqD(3r^6e2}l8>gDNvl8jHi)w!)qhuV%i+LH05pm`sas;o@<~%9A0daa&*ujadnEp8 zdx6Hj*c+tBW+?xD5u1mCT{qyZ#rv&5xt%~$3a-emr-PLp1o^L=;EoQSjpWlQx5BS3-0RF^)}75uf2&G{0+oXNTGeb!NBY&zH2hciUT;U32ZMA$BG9Bp#JX}!yP#s0hk^0k@sPfyLXW;8gOnB8Vk?7Gd&cca ztWG5f*mS(rzGKH#W^|1q7|n(NGHo;~dMOyE#BsBj|$L zBMusbsy?P#dFCXQSt+P+4|-Y>D$c+%*Nq&~i5<=TsfEUh@G`){d1E%WOTW#Je%~|z zs$j(~ZGHjUpQlK{r30-aU^mfnX(PI9r~968ikigQM5{QkQy6Ar-t3586u`Q}2T(3a z7PgZ`4o4Pc3CVQo<#WeSVKZQ)i9w$4+@>`pWBFA#8>?`>VqMtk4 z6J^_xJ7%lGi#^U$O{qCXZdR7>5*w{oqUtR=eKQ2Vk?FbVhWWL5zs(g3-FYxd{a|G+ z%R}u&Q09s3+Qk8-tX{$MO>TSy<3Mr=?wKV>aF|GDmwk}qk$SCS-L1GpsXHUAUD%|k zdnZRJK#d5Ow;6*9)=~m;ql_C&Zm8H1*`CQ7#Pdbb`Sz&yt#yO69xa?Nei+o#o}pJZ zxNhgP;Wn5e;cF;txSuBDHg`bP677S~rXz~W;Xe>@X^1Clp77qrQ#XKlZ9ySF9Y8WX zNi)`5p{!ZGwjFk%1`P<8>umt+iizDR$d}B^#jv9kGgu{Mo-$=EU4=|0a)51*yk$ z##PG%_vL@lK1o;@cRj`z7wCjEP@BuBC9x+`$+#EucI2p`Lmh?S9193h^li_>6=G%)O@#Z}( zTNa-kD9WHH<-T8#uIZEh1)nx9vf&!PewmyYm#*!-$6r&|VaAgZU*uN30&y5 zyDzLl1IrLv*kV$jx&*aI^2`Y}8TWn@4% z@!yX|e#?D!&F`+SeEbV!PY{nAZidDryKR&5&v@F`O*(zNeJ6q?RV=J7JiF7Ay>a#a z2Nlgrc1G74wOCip8J!4o}K8hy4wkqQ*D(#nmU?jOyWZo zMW^Elp=l~=-@H540!y9e0$};Qpz{vn@5Fl*)C}WzCjhd|?)x4@oI9=_<(KE2eDO=AA0n z(}yP;?;plrgC=0EZfxwj|5d^P>gWCATeS0on`JvcbYa3`tZdu#n}YL8^{){~wzztcm7v}95-zK=1ep3-r>tUn3z#PPj} ze^N-4>TCW~uqEuHi8F%xq*$&8ZRLiRUjL+KPM{?~dG3L4&IhUl>G|}JYt11`JTJ@U zyI4$dRtvOpA5iniNah7oQ265X;&lC~osO@vsX8_vggd#w{rDFqF}XeG6{ktgr_L5? zCQ#(4exS#tzN{9S>S!aOaW&8IO($9-Zl;%1bEj)3wvp?r;&5zuhH}4qcZSV3eLwEJ zT4Y$LGvqVgfsM`o0Qb;xHB>w99ed$*g4v@O@fb=7nkGHU%)0;eh1x%Q^4vv)B1ioQ z3vIjfpe?K*CMMQWI$_`3gm2ryOsS1lm7$zWH=ytMQsO*OE3)_9na<`Tg@w@D||9J2jm>a%}bO zHFCySvgI1%n~AGL8y|;w|BxW5B5%O(X4_qeXJaMtm^0z0(aKU3TaBp7XeO#8Z^G%u zPmQ;KG~`G#;pB>T?Mca8Y8>jlH5(tC@Tc~Kl0%H%1+`yX(m}#?WMz5I-k-U$dgLEJ{?@`yBKX{KjZG`j#GPsHDyLuCe@L1@ zW6pPddp@@j*Q+kOA+TmB{P-Qc+KLb@URrcPrRj3#LTPDP6{~t&j`?LaW)jwVnrLW5 zihXXjIuYI@N*m8Nvb3tbJ7(E56f-~ifH$DcEMb~8zALrL(p>RPd!qE;CsQn3%~{#n zEZtXD&6wC&32O&C|Kw0tE%TU$2y3$0nNz4$6rArcOEVW+b))$=%Gs^E*~!b67O$1Z z*6Z@GE7W#>_9>Uep6mJZ6ep*vpd8$JG}zNSs;yORY$ice~V5TF13peA6;!Jqbt<8DYPHSUh0_d zzu9?$e-(SgdqDBREa9K*V^2G&)XabNIG@B>^SsxL z&qohWapLQf=4pQzTd4n{WiN4oSJgDQa8&A@OzuEc`e(GgP0r`zoW5$L|45CP6SxR4 zI?G&c{;89NoMf!c+E$$@Z?cPRUEU>mb2l?<#xw?-(#2GLx1&y2@lnbP`Lm67t^ahJ z$|u7vUoH2lDUV(L*d`yx%Cv|EV~)ugwNblYr?j-)Wc6H@DVT-WBKM0}6W(XHm^w)t zc4neh#@=S8cMMy=K){>AyZLXmR$E*$FVjzMNM$ zptr)+v<>am+zhYCqe)x@Gj|b)G!h|r*_FS2N3;q&J8fW)W(sSS=w76CF&-1yi|?rNr+6>`f(8>|s*-T`6+|qef!1UHCzbX3ivdQl9TecyM z;<<3vf!lW~=bIm$&3)xeQV_;kIbUSm++k_%wyJ3+3#u-;gNrV6G7sX%E)E@ENi|INsZPm)o6>}iimb!5k}ZU=g*(7NF>-p01SYea@}3c z@aTX@$gTfBF>O(#=}r9~7VGsFPaZZ)nF#}f6+d)rN&5DD&dMbp*BoHivI zj5Vs>%DkGDBxw5G)^L>N7FbV<_N%dZ0d1S}p{c)m<0dKO^X@h^ksZI0C&O)V4*i`Q zbN)F=6~}L}aEzu+(umEjn|jEq=|#1YO<~gS+I^$Oyx&YqOOG?F7w~qR~GLV-4@6XwkM}F}>Jzkvn&ZKQWZdbOnu*LUpwl6Pfl&D3Fj|Bc1 ziWKJwXcn>IHPG7b`Q2Rk)LM1$?P&c<`Ae;p@i*)E^JgsF7XX`}hUf@_Ow_vzr8fD9 zfGrXtUHJ$h(q$pG8f-xxenNN^rr=T%6;^IFQ`T6?&kC(ev7iLSIx2<0KW z&P;}pwJ{k%u~ja+<=3$imC>Z}*)BKJSkty&PP1R$GDFkY$fS5PS?CWM7aG1V4XEOp zKQ?7qM@{0`c0A8uYjrPrd@@&Mf2vNvSKnOT?$`t$-*s)xGw5$)Q@8V8JD>GV5eO-) z?lAkIV^itEcVn4L(A?tFAfJmHpLz}7h)w=jVDotg**ld^1;HId|K2K1);~!`Ro-%T z;~N;jBe1?c zl3AM`Ec3R?u64&|ztru;g}wYA_2Wg>jYGlWUkx4P)b$Fe8dVK+>l;uV;Usf<1oPAT zbDz)qJS6Sb{xW9ScuD7-#N&6#ti3c_n}xc04ePe`NmXpV=A;-`+JSnLQoeTU)d+7E z^)$O)ZnC^$=LS*?6;~iXCo$`^pnab2R`K3!l@l-L@?1PAK5Zc1gw5bsonS=%){Q$l zELMS?+-cm)V`S)f8>w*;dYT8_6+=&cUo$G3;m}{+B&~NP)5C~q4&^d5cG&8gG%c?< zo6V;McJl>=xoTx80whAF{Z?o zMLU-1x!3iVh1|>ns@E4wXdU?$Y>1OH^i-~K8Ir4g;3K!q9}7|OeN389^FBd(s{6L~ z*`|my8(Zin?`UZ3`39GqNPAq_EGk==curN0rIGYJpcO1!YV5G%wo5hM9LqFr#@Oml zRhl#Wv9{akJa(4(6IL+SL3nql zlRBH-BL~<(aU|4{nuih*o}wwxeoNRA@Z3D&Nj2d82RmbNuS{ zwA-ghk3ZfzAq0bKlMvd&jwv)|kzq+C=#c`H1MMwZ7B@BWlh4f8S0o6~rZya7Z|~sW zBmGm{_O0jct_}V}b9_7EcyTV@d;PuT!N$5PW%^&U?>~-6E*rOO#0kgTwz)n^@zB?# zKdF2=bkoKB(&~^Mrg(Y6IWL!`*k7D?phql(%-4c0t9CNB4iz~+?{`MVBAD^K{i2;-nhA>d1C5IlQ6a+!Zp}S#V2ZfaW z?cKxxVaRib(xh@xJ2BUypN;n?`WlBV3SI*Ds;@KJNwRjVL6~s69HvtKi#uW5cpGP1 zeXEDs&7so8HLm%-R=r5=?!LH+mT%R@K(wo*-h(A}eyyP6-%My#<4SH)G6|I+ZwHZm zZK>_!?q=6TZy&8xk6Onbx^-j}-I+6*yL&kOdnJs7 zhWhbk-r2w4w7j&d{{1_YAh&1BH`WF(@pee-sA^-7xg5W|Bl7l8=>g41B_+jX!M-W()ln>Q*Qe-ScfsUI z**Yb_4;G3YZg%h%p(B@Ax)^T0IEP1cn*Nf9K<}l85qm2~+&)R+*qTaNt@1i6XxbHL zq^~Y1&|2G;Oi9h%S)V1A(i%C|(<&UF!g(b*2)Zs;ET3O2#~2#sGfhJWetT>LgmD0} zAO)H6Iw$?)JVU@NZoAiidh%z8%;a_-Fvjzrd3<1E`}42EslweJ zh8!NL_$+h~>F`rWAMKT}(wC9n0sQDDGq$gWB`L&x^Bn{xAZ2ROQ!H|V(mem6L&nIob*+T-`m{&dW0Q8%?UQz^$p z?e*1~+=?ye>I0FM-ML+T^Cfl(twND+q3i?R%8tZy%1{`msF};`vSR6332FhNRUl`P z(!c@M7o>fg!C9M`kHmK5E5G=0sQFqWAmwRq%rmvBz0t={R}B4*12pq>au>p-q<1ar z03Bm-KvqIa_$@aBh!6P?K&{G@C`_sKM)sd;y-&)yV~nQ97QMuSbw-kO{OKcJd;bH%POf zX{QhX4hvy4BHwm;(j_W39X}V6rPUXF!Vzi}Tj~x?1 zdOLKv5C9M!UDDF}p4XfT%w^iDKU0r;e4pGBbK1aE9e95w& z2rXQh`98=&yXKxdzqM7R5HaIdN#j?AVB9$vT)`Q9tZ}SSBY86zp!#hs!h?~oQc^V7 z`QzfW-1!gb*6~bKIzB|hAc&JVNH2MD1c=V5c_h00J`zA_?2N|#%;ANoNE?l+CPXm< z@s(}Y7PfON$&`gJ3DJYr)skf63qq`cZ0>d9Xee4NyHKmhe)Rd)&dRP2qgGvADWBd# z*WnvCf&Dsh?n-0Nu!?AgCRFFJry5DFOrd=%+CLe$TrA4D*YnvS(RF)?Mn&u7_|#yx zJ5dneh?=mQ`T6y0=A`m8(;B;AVgHf`Yzm7ArQsP_xbT)MwXDk(*{c7t7%Hg+P-!VD zD!LerZmxO^6|bVz4U_%aLPUQ?%DfaVZ5R6$aglrKcdA59eGTA%N<~@xcb}zE>vBFq z;h)MFg!h1Ao8In6^>$Xm@jTkoyiF=VjLLyHx5XPiggp2jRJ~r(DPY+HyKJlM0>*Nb zcT;o&68pvQFkG5C&Yi{;L!IX}!l@V9WuF912973x9T+YnMcbN{IV1$!)p}c{xQI3nGgKT`0{Wgn5 zThKMx4LB#B`&T9ELcho${Hp3IOm;Q9f7`tq;WEtD8NA4ZChrI=L<^UFDwDOjm=+4E|_0o%~3ole`Zs%M?~$pz}W z56_`}Fx6uNNW~TR;k^mV*qA%T-pYUKkUhuF z53&92e45MAr&m?M*`VH8e+IXc=IR*5b?)r13*X%VWRfbxI%8fLKepha={5R`%1*VuZgT|)*=!rj5xLjNxBKO_K zuLI~35=32XDDHXyd3AuMpdPyBGe(4$jkg7)^qN=!u|}$puk#sL{mvYzV0wD zmomgvN*m(3k(oDj?xGs2ViPxQFfE45v27ngR%c{Ply|6ElrDXJbMA1@RV)ktHb82! zIQRoSyQqx!l98Vw&_7xCA!C6E3)(dpef zijMsT3~xxv?N+XFo&S{aIr@4kIy*Zforv@IIa%p)+#9^Aa6mfqiYZrEY}>m|a|V+XJ+XeoyaBBERIN;+DGli9KEG=<4$iM8$` z)%s++uJS|lpyI}4xrXP0g-uew>W!SfoTg{dG}Jj|ST?KIE^DTPT~EvfzCY8nu-N{= zm0)Qjqw_jpsZ29UK5U33{oRCcud1i~ZJ%Q2{f#+d-rJ0%lVJ2 zXdfqkVq)T<({J@B^HCl)0RQ^92-k<`=hcjes_t&S8 zge&>wf~8@6rF_Gg(#7hv&r&&&pAMz;bD}$GY&S}Hq8s+mJ_jq7(sX0xUgR62b+ISR zg^292KMa}l+6eS$_oL(eueA4+<(M^++;@3OqZ5c)Dlkc6h;X-uxqXD z@kPhl55*ocl>O`^v>CspKxnH;lnCL}uT#;jw9`CpBSTgS83Ntx|F+(%gtT6qmynu& z&bog){hg48ohH70|I(Z834m3CT}TVq0+a8si^&h1$SK>Cx0(7J0+gDcxyTs4JWeMS zeB}6f{jPP#BU?K4DEsjeR=B&;@!c;b>BNd7fIq!s72Eyoo2r8YL%G$oW_Zo z>XgV+pX#l`lxe8yXo0>)U$(uf_0qJ8KG|}!{~|+YQ~BH!-dc0d?&xD^k6sn-K%Lo_ zX@+1vGG?yYS+bTWQCN37JQt#2eY5V2wqc~jYg1hf5c11l?GStK^yiCk0`b-4OK16? z+|QgaT;f1GS@KBtq<4~e^)TWI^kM1pc6e+-j`QT_QK8+r@?k~Blb?ZW@8#MjIhxCd zYKt1kA+n_-uR_mH6CCVa*n};0LV$&v?DrZ<>IT?{Br$mdJA$?WBTCWH*=cqGzyL$E zA@WnVZY6t`lZ;@`Ba)`VYl508nwceu=J#ughpK&2MzV0(N|Sbi-#auDV?;W>b!pfe>Z548=sx%4Xy?1i|7-@I9xsLs$k1-5T0V`4bO ze4;Li4eFiZ6EZZc41SN;-_*bteDYMQcS_!7;&bs*tQk9hZhZAmi(L1h%&$|7EgljT z7Awz!EIy@@SEf(!RE7+>>QY$TE5t3ijk3k<*-q!wIG8S{48p~KFz~=1?M}GNBz+Cy zn4i(D^6{x&_ISYs%$f_U9fmV7t_yYiaY$DHhP<%DzdN@S`2JBUqK_f4A&uOT=vBk# z7n&YH18<5XidQEL3(U~_(Yn{uT^qb#j$Yy5jtzI9Er?w*JxC~OEqhXVjMAtNcYECT`w(uuy z#?lf zv*hUns#l1R(lB}ha7vZKa9LCXPE!Yv5NiH$m|#7F-*gy0@(e)x^mQXE^Y0df7^%bdVWyk*0)5uk~!PvV}*_XtC0DGs?Ig=Uc4_@F7 z;03Oh-N}DiY_ADaL>8&4l=lEK%s#`5lTj9!vu-wdkaaPqLZh6iIq>O74Qi-rtUCK8lN8NQy_n{TcdFZtE^m z@EcN_)1$zExqJ6eTXIIT9M{;TVf{O$aM6&lLXIn`A8Ern>BbS(cJZr%MimkzbTId^ zcccIrDkQpn2-uv1+G&1+<-LY_zn~5OWR7FlPxGp@g7Up>8S)iNsnhrw19kdWm_NgB z7(feT>z@HZVAy~N?~QA<`y@wfMQ9c}Btvp+YAeLdo!!GZI3 z6jLXgU#~0^Af1A#o}gxz`?|rYx~e|YmpY`e`o2}acaJ^yZJ7gv$z%UBP7lnEzq%Z5 z#VWL6{y1++f=k6xMz=9zB)~UX0J~V$Y7(q8^5iP-86WV~7`e6UX$xRxMl|_qOG0W8 zaj1KmYcqhB?YHQCj2;DA#8gD4zw0Jh)60jZL{~dFf|UBX6YAYv3OEK}nQ}q+?3LEl zCY=q{FT0FWb+x?8KG4gazn?@t_vy`9*g4nB!1ZzGxS-*<$j)KA{v|SKaV<>}$NCwP zxxfOw7}w4Oz}7M9Kv9ZU=a9fL=B-{ClfW?qtFCI|A)bP9S8EI(Zp=7)$t7$po-z+} z=PB;aJvjER%#mLk6dBt+b`W+YvEMs4oOTY{Wbn(4DMUWM(Zbr~_5Q)1V77kQFR<*4 z{&OC$347oRr2D`~SSok-D4F}keeK2{!$t-levoX}5DpI8_RsOQi|L%4s%6CIegk^u z9EGA5uM78%p%r>nORGEp*8jFcSZPtW3Bx|Y)|hzT?DOjytLl<&0GqtB3!f0UI(j`6 z3Q?B?ykN+2wZjDvAc$_{J1&i4J9?zh(fU*;B3E{6^~z~9-gxLizr&DnYn>iv20=Za z49hGMb633mf}A%h!@fsbIb3k=CY3Z7^#jw%@lZ}bVxRXa%naCV#v z+cJA)_Uj>BO;eBQnwmx_F|n_;&AL7NLiHT+~NP> zWCdJ@f6nEgC-WcB(Vy;$_a!TYTYG@Io_e-r%8NVL0QU=*+-%z6!s>;LW>}DI0X>>- zsX5vLnl>eQL&F335D%)S2Q&(m$`>}=D1`;rADfUcuMT3cIZR?!e2|P zf?7Rt)Yj3vPswG(n4xQW%1c#P`raUpapI1qiYHp*a?z#*uK=e+!DYkYD)rioOI07>)+zZq}M+bNtLH=v`vrn~)QvkJVMQ%zw8eaY3uU zKd1!Ulxm*a8R6vp{-?ayT86V8O|2_um_DA)^pLXIf;j_dvC4 z9Zs!LDz}y*X9o4r8wQTxYc&4H41=J2>9LpKRK$zmgdZ)^fzO3N-TR_XNfLq_Vzs9PS*~$4=U8>|7F@G?DNODde)hL~#@U zdbYgLt^SV-F@jub3S9dq7-JBm1OTj`31qic0AxV#g~ONDh^IYq|9G9z;IPv^bmluB z@iYLwTcM-1T(D$A8u~L|2f#W*n=9MxpEQ9?q?=#sbyWfvaDHNuF%HB5@KlEUj3uFG zTIGO!nN}|jg5#Ru4cy31PWk|jfvI^9v(1BUXsuY*s7mOFHhMo!m)G6SKg{jW1-p#t zyO|Wwx<_P8@&&g^(OqbpKttbZoq#v%&2#`Eem+*HH9P6q#DAb^tl z<#x-r0F$^srJFV$)Cd^PEw7TU=B}Rm&GIOR9w{fLQ%zR7U-A05&NK_3RjU-}LJ4YV zh&(|rFKl;nMfOu_IC61a9py(0YEY?EUF!y?xX-y@Xb9z@kpSXmTrlD+mozC==Mb;U zmd^Mj-RtP|nBnv7Ne6N~#*->!*RIVl+&e=T!BG+DpV+?AOl=JXjCqN-_t0rE0k_iF z<*!RcC!|QyyZgc$>)b`{T(G_Cqqjjb-3}NfEhF>4K{XBw_G(EyqSu-9ya*( zvoHchoirMQCQK%WghHX`6U#@fPBNXt&Lnv9G9f5^X`&NUx3>dy!=6-qlxITFrG3 zF{WE&=Lu4uQDs3gcO_E6x1AR?_aq=_=vx?z2r9QaCZSnE%h97-P^;ls5OBaYNLMaC zsYjKOC;QOSv{kXnS3SMt0eRVEgVJ$ZqlG@&{!7gOJHknqeRM%Knr&%+x>gLp6@OK0 z@K1~n&Be5D?N52b30$@pA5>8fDGWVZ>nL8{_L2CJozI^$5c(vd;F1nw}^Fyso zuHwhhZn}n((jQa(dkaZPl|Dn56Z4#;jtz>4bUi&qxBgCgB7m53^?zc@3(7NATczPV z9scw4VpJ49Gc;L`daR~oO0m;6qbi{rzyO&m z1w>&$S0I4RuQyuHH=kVVv)tSVzbzX$#07}PYC!+l_MJyox0PkDRUUM+HZyTlA~a{c z*7S=zX@0d1Vu*cpe>jbzN5j&gc`p;BR+AYZ(EH;ia(I6Zmgn)r0H50?y1cz(09C09 z@skv%*-}}JnP8I*eJN|W@R7!tFp<16RFuv|@MFMZ=9%Fkv@@2*hsR2p>%E=?E{rM7 za=y5ly$>iR=&D~2^_tBz+*`TV+)xORY6#j_+oK-5Ab0wwo}qXI16FuiJT%|_p+UX& z$mr*{ckK7?s{ua4wO8Vh;I=!Q=9LQ2$NbqGZ3#3%Uu$>Xoja@@i3g^KM1ikp{2}0G z&~r(kQL^U*^w*XzO5K_IbfQ;id{^pTN3zgxvam2xx|p>DhSv#GaRjqnc|%1>NkAx< zkT%vYy>4D0dOM~#jQ9!-qL{_a`V+xJlKldl_eJ8x9WeN&qLH6%mB76$Z_A4dy*hHQ z28L|;(!1>-U0FUSe{d#D#fg!Z9j%PA0jC`?7OTvvd)ME<%}TqvKM3S0qgp)L)&*wiF;^p( zpu+PpFtkxi9azTqsjtP6RAlIT5awE6*Fp@2&E49GyMX&3+Mp*-(O3)p}v1K4!BB+onx1b^_z{c!tx8VF*xTRB_*9s~;^@k4)rN_p&3{j;#{ zpaj^dn`qx{p7jpgFSYuWBr4@T^nRh?P67h_1i^t|XuNt9a7uy-jItXU0F|QR=Gb=U z@ep)HWD2o`8f(|cl3Vdr@$|ZT1QHwAv9Aqq9trjwP2 zz^n9qPhFIQz;}WxMS1bxdK`nyNwK{kUF^)%aT}+ANfNvt^|Kgb+9(N>3T>yeQwzUq zz4l*qhAlj&eRVqb%@nv4V7Yrq0rl7vcR87*$vGqZBOgAg%4Ukun<|ni_%5aZAM8v4 zHmy#A|5!n+!nZnlkgnq8QEKGo{eSMpN7ef*5ZzBF*fO2JC-kw14F!-?%VRboWuV2O zp5zf9sdz)>?pzmUXzcz()*d1>KZ2JA{)$Sb3^SWEyFG9f(dVvFnBZPBz&f-JyP-vreN&mR1AS?E8QAgqPJGbF;D1s92r?vcBD{_!S$(>iO zTLb->zQ6JX$NYMmcA|$Q{~Coqo;m`P4{9CG|M$_fmOsFQG$#DR+f?JAz+V59ywqa& zJrb0K`Yfmd686P!G+~qK3&GyvCFYH(n#hwso}Jk_V%seK!1w*wOjgSOvD&e8>$bZg zrqY$6d&KbyD_&}z%7n7cY44KP2n+z-vBq$}9&o z+u5Y}h4a*%O7S@$bgf-{CLgd3-ZVD~_@@jpH{D(O?c#y%9c$fL;}$6$z6&_&h|%xf zYRFnAcpTks|L6z9IzSsyVHz%y396T<&bV}# z|2YQLG2j$9>{8!DiWhric>SQ8Ac zcsZ=){_hQOdrk=O#5)3u&Yu{8l}jPBZ;@9L9&j=T(G|Fg-9Eyn79t zmehEsF$rJNy%d~mb#Asubw&M&=xQ*=`RRQ2rsr+R4o)mvCi216x;VmiNe`g{aT@zn zY!`I$_HnDU*i1+^hfx=!n-mvGUx|n3H7vM&jN{!@32T!?-y3vQxX;mB(LGc0MnMcC zMOfX=@0r=v8kUEv=siF-i`{EJ> zfe4A9FsrTl(*j_FaNvX#D?#6AnET9mlP==3IOZA1Bp6;d)g4$9YKk34eE|auL5pN8;VD;R|4uu z$FOW#+;$*-m5aRFg49`$Y5@5%HQaIQ@D0($o1XHhFwy`S!&hzYVDx{=>)MaplDM(< z)3S&4m+i!g!*rstV9DCRGAyz5gW($Zm=ER*QxHd zUypa2VEjDPETY68=f!z=K2i&Eh?Rx&9d;*Mwg_692ezp++#AJPz-Sv3HJ)Bni(`9U zFe6w(*Rm{MUCF80#GpV*MyWM(;t9k(L>-)0{pIhYq8g4%87MD+Ml#e-|PONj9Y z?xI7VIQ-Z++V(2F-aN6TuawcP!);R6W6hs#);4zsMo~zBdFuma|yq95Bq z8}B{|Pb%?Grq_K-&Y)|lyBLzYT0?ft>@OCmB?x9EWG#I>IjA4@Q z$u5FJAL{4p7OSN zMPfU{d`MfZZ#;By$=E)V=TrxdZ? zBg@8wW@(ccafW3gy%wD|yDIA&7GybhO*7WXX0QC*byGi(=A?$xMik<$OZHw-5eo83 z3QcKzoT5`@hWXGa(i7H>bQ-6WF>lq?j`vc~!J}{8PnX*(&tfWp1RhiZ^s$rg6qs~C zKbWSU{SBFHc9++o;^~vq=zsm_>;KTH)d5-(ZWHmAlOMsAvR*{;4>%*w}w z6#MYDr8{v&%o;k%*9};GQUxA(qawIrCh1noDBUG!cKzuCBh|WZW zQ!8a}OI4t5m|W?~j-ataq6|Dj)AQ|h3L-uH&vU3|E~1tupoO+z0Hp+bfjE34Y{L&5 zmF4e5A3EXUX~~KpX$5CcmdgIFyFFwqy{2Cp`DbAiB?gK+w*E}my)oKT1pY@>RKClv z?M+Sr*5BXJeceA8G2g1-Q?=63yIEYM=&idAM;KrunJuAp1Q_Ui_{(SL?!PJ2OAWwD z80`;F|NCfK-5=05kp(RQGKa;!=O=RE4~(8o`A~OD>(VWSQj(cJU?ANlP;Yuic;^;g zNP1_oVar_~a4@Qn(r>E48HC2xqUt=H{8N>tC@I?4`5gI@D{p}wOj&LY&FjD$LDo!R z1jq-yWI+t0cnKEV4|I#$@}zA*7Vs|AOfe>)Eb_!8=7!1x1Ra7@0u=ymm@l1<6M>K} zNWeFD$3TI4gH*!${V1rKhMo%*BeoeT0WxFY#7NUC>Ue}QjgcUW<|#E(#9XhR{Mu}> zCx6WEwe5czcEc+`{%tWyulNHS_u=<{Kf0SEA;Fm2RIp&m(_AmWdJR|h}j*0zo*7De2irE4&~OrGwoj;wvEjhqgJVQr>Ap?3m4+|L;ZyN(62@sL3%NNnzEe&z za>7Cc@Gbh&6wRoFCq8(9%fJWT4@m4a(wwN^xNyGt+;#tWN9<2+I1bf3SG{YBd8FrC zD^=a^iOjR=BOhf#QNTkS#7TJNKaVgx1;6rE@z}=)$Ol7Ooh4G&BE>e#4um60!tI}S zhcwdkX+5s|pR4bWhiCVCR%U?a-%HSx)i_pcdOLOy7$7p$BB#GzRhW=L7}?fTN7(l% z`Pw$oI^I0RyOt`#N6i`g_7-us08m5D$yS3{Q=wA0dKxVe}fE`LqSxde&ZT z^pZYqI<<^cS6U@J>J`MS1%dP*+doCtCJ9z=VQ0k7z0fV@Zj|oX*S>E=*4S1KEn!oc zt$pzeAN6kBzE)~?pqA>U4Q)4{7+eB41WBIoqa5aDvTtGoPlypRJOVdIIt=`6tvb9e ziiYx@gxvY|GX>GmOG6NAZI+;XAd#fcU{LE5B07=sEgiSVMV<+Z474dT{!dj<+q5bn0~#)Ci@66rr;i zFvQ!`5QoNs2pr+qYp?LuDZV5dC#-xI6}%mSs84eT zp*8EZ($uj@YYL@o{4*$sC`vXeMMi#-Bx1@w05WZJISg9A`8RLCP^pGy*W>@s?`&88 zBN$Orb&>DZ6;C_)P1j4Id0%H{Goc@6N=&SwGX&{;xMI)cT3rjb+FC_WYs zv=%29XxrUM3o>0tOQ7;i_?q4@ggwKbg<;+-s&&(>JPvO4fDiRSwim;`fW(aSYP0== zIdCIi-X{T7MN%CnLjG2oUPOVcqQ`H^#qNWG$RzUiQ_#;^aep6hKPCxI3gD-lHMRWn z!I6@O%Mu3zriT*%(80L2kp81c9G1_}y3!k8%x8Px%Wq5Y8{wx`J1Y2K2~ei4H0_8!1)| zCy$-z5+_TAMI<4+xN~XJzHlD2%*QEc->2423@+cP$&NHuL%jrJ&wNlJ?+OV_JYG>} z(V_C}LS~DaDdoM!Tyd+&*Qn#mOOqVnNo_-^3C7k*NKyQEpL#-7w(CRFwu=}&{R z@H8UDTWo@$3a2{ZsB0HL-QmZ;ArNV7g0)>@S^ASniSyD3C5U4T(SFQa&vVx>`S7JuH5xmnqD~XT;PIEqudO&7SlV`vOD2Ou~&&^pM3iV#$XT9~s~m^gxoB-!@W zth;`%vKJ@8QakE~C80@n#YV9_zXqBRz!LY4eCtkw3r_R!amXt-Yo^T>TS%=B~Moa*-Z6gK9*Q>`_M_nqTcg0$p_-#~}h!wWq+W|>Xq6wLmi^}9N2`lqzGp3M2^B|si&p89-Ko=7_mL3H~5Iow! z1Dm8_I85T9$uER3IVy_Ik}wsaTVLRFFzj4)^Bm!nb5Sc3JYv`5-h50g^E>TZ@gJCv z@I2=fbt@mhe|S4E5MIHa)LMZ6IFM2c*(^Ks!3)L3Qb?TtaV@HUVEM$Z^zIk1$~+2x zJG#}WvOmw_brJrif*QC-G9uC$wgC#KQb0NS(Ui`<=sn`BEjSMvpX8A;r8PvUP+j~_ z=Jffp*#7?n2@v6o*Or7>!-kxjTRVf@c9=d|4@tV#Zd_rQVP^LJ%6wsn+Y^Y6-PJ>C z-fSDY13I!actb)KZbo4DD@d<}myrmZPB+p(i~hm>dNsA*%7;u@cQcN6nQ zR;pjQj?}q@l9pr95OIcGgT!FoZ)w+jlQ5*(*N5>YH%C^&`Sc9B*zirj2bkdNxWKv= zMz@6&zhR56vyK>Zi;09`j@ys4(W!NL&L=aP1@c^ChuIb)Y8ote(6ej?DB&CSS&ij8 zt=%OfchfUkkYKYeZ+vY$>VEte3SXqX3lm>PL2rV?p?95nKV9cWI7-Vg%(dS7GYcS9 z;5T1iv)(+KQu+L;#PG@9%JctI&2b6RB)cAnxNVTt)_o9_<^rcj86)#4qZMyf?3c(1q9jCriSL#|f- z)H*+O%+GLbx-K{WeuDzlmWxpvA(Jwf&d}RqYXtvN8>K7j#CI{z05s7D@pltDLS&O;p97GfU7RDOR?qmtwx%O;`O;n5;!&6t7k4^(bs z=;LH3VY3)vBdS7uq2@86acn=^(z@Zc)o9Q_Qvaf-zI+YYo^pO$I99A94ezC##r+4t zhrhf_ugN`pjNyT~m&V2eTznDdXA6hAXYzXe=t#!lvY3nyDk;PQ_$5v}0xii)J%VMo zSKnMG4`SH9MsfAq9+vCm0rTc;*&P)A!?fo!Shjw3UtH%Ml6H(ca+G4d#oZplLuqYh*6R_Li^? z>?beDTT{?SPYq|sZ6%YBYcB`i(6K@V5fo$}Pg|Da!B|wpgFa@31hI09I~~_R)_T2Z zwJaQB?qYYLRSJRAHd5YV=uqNv_&i3~2O(2-l7gn=32>RVgc%{p)B8@kU2sfY0BJ8|Z+wtGL`;3qnkn)?poQJE~AJNSpuF_&CmVv23Q!;zVl@kyQ zL1K2hld`t=B8gE%R`{`jD^BJWKENn8xY3#hd9?k9Tx9@a8&xR;93Q$GXo;=d|(Ps@f6XPU=pa5KIkJ`;hT zV!|;GA}0Al$*_-HMszhijLI%tjW1eMbh?W0KmQV`QlhvAqlhwVB;19@Wbox{Cp`$#0Ju)=Kx?@){S?jbfE!fo+*omgr49H`mDPQ zV&}E)?O{bBQFn3Am$!nLEf~3NEtNvE#!7}6^12dOHA|5q9M455?xv6(&8LV5NnHQh z#bDTaT<$9a(&>7#kSt#RhKB5!(8TlJI;*GR`+D^f0*)gs4$H%0)xt($%N&uESrdD& z!IFFnWRJe1k31IIK90|Xw5jSjG=4`$%aG*q5Z?eHYAm3Xer`M)Xx9 zo3@9gL|^7C4aFf@Hz?KUl{lhF8*bKDCmh)#g9~>~dn8UK3Z=YbZJAfBt=*b8+;O0y zb9SOE`rUpk5tiulsl6QL1pYKSYxE_O~Vg)s5%d&x` z;Q8Y8S6kACSfIhEWG92<4zPH#uY;IwshfSlseA4GHL;)B*V7(?I!It}r)#Gz=Y&PO zB37wfS2hLQ!m@GV`i_7alfSrh5oeG^_1d;+wJriQo8p2 zT8?6sDb#w94oci~rlW{_K4mKQ?(?M#4_%wG!f1;h+{#VT;d6Kd1sd#(F2-{u_yvnx zQo(Panu&G48xf~wtL-r~O7W9rToQb_z$*J<$2e2d^-*O9@o_;U^a=>Kx07?Np>9 z;UCn7-Ny4$RV*3bg(y;L5=qpb*}Wlyy>H+&Iy-XG%&-f|7Wel8ouQh|^RFV;)g-Y4 z1D$tG%LNik$d|@EKFQ9Um)=_{2TvW5f~*a?K(cq3I`*No{Y-~2`$G635hbv$r@ia$j>Lto|jr$y*DE4cpC^ z6^6VVTe~>St3Ul8j7L>g{m0)Z13agCg{Kt5S-d)&J2_j!TqdJZ)q?zdPyd-XIgXwp zxXP9@K`8!|I>OUA>Fc@deW%x8>{K++2QF<3);npUCaT4wsJb2M^|028X5SVppF84E zrYou+xiwtl-Y^qxJ$y?PN7xaPUwX4W8d-|ak%;!G;8J@i_^YJYF0ayK#x10%;ImNzucfq^8J4saaMVM~zIA_Q9zj1gsNx-aDT zdi?9k&4cf0+88g;%Ubo>Q*+}K22>ZLPQ&~_2XW(7l*{}L9Padm!?If#0XMTDmuuY3li0(X{sgwt!CqfoEsT>+mOs_!8 zOb?(r2<+Z2tT&M~=aptN3V_?E)QDtvKb>t7fl`*N=jYslT=1)BUR~w@IAJ~Wa|-@;XK7Jpqw$ws3Eh* z;KP>V*D`OLKqiS@&rKgFKoQ@LTjY;%EAU@$0wp}uqtVt&-neez_2YX?-G}#U_s$02 z?S|uaQ$C2C)pC{<;lcV>7Ch$0%Zg5#y4z0|ymuu&Eq2LSEM5;6Abl|vCGx^c>yc26 z^HaTmB=V#?37t(^5Y3uIStr>*)B7J{5#ObD?gbl2D0Dr=PL zLYU?-9Od_Y9S15#ltiz5GaxKC2Jj!-EuN$uth5}GbRViQ{7-vD<#Iw}P}BY2yW@r? z1ZamX0EI}YjWK@iw;y#DY%IQ9jiu$z-Qo-A%r$8;4+J0)e`{GfCyV0X2hV64-#%c8 z;0Xu6sZFM$XyN2DqQZp8k+Ai5eRyU8xoZ(W*K^YQxp2LB4u8kzVV1$f_-`lmTsG(L z=q#}~CF}wg6U@;_!FEMFbCA&mLpve$h_5+666!8B8Ea!^k;@C@2fBBoMWUC!01T#+EHChzZx@oO9?A5sg%)cY z2U{O(hRjEcXM#r?j*!~bFZNPLH8PnPhF=h7T4do2J!dO|UHb_|I3hi~qIY^y)R+1` z*ZxFj@6&tNa|5hVsxhXWm0-%P`s2zKp0RPy41ZIAlEjzk9OLw_F~VJ-A|w2s_n}ew zpIbLrIf#-&MZYAA%pkN!pg+tCQj2z9{JsC+yn!m3pDkCc`~6lt8oAg6fI*0ePP*h6 z@jxvuWzcUy72?`9!}8zURtl6P8Ep(mC$YyfrXtn>mUDeqEyn4)COt`Vp0W2oj*a)+ zW|A2}S5szSXA6n+(K<=aGZ58wT!o=2+1pow?fY5TfZ-dkamMJ!z(ri-rW7^U=mluK zBZlv|XVY1{u0+_fXHuxhKyM4{xaRWxZ70Cb6s>$49a?T48o>GCBfnO93b#J!Ty1k|W@%b7%6g%;>XdwzUjxvo z=M*O`7KmFEM7gcQ4T6Dg3r5pBkb;w6;eMOU^AncsKAMhg6uCu0-pMOzo>toyZYfaK zsYqJQ9Qgap^JD^vv60GFwZmRDd}dyTky}jP7H0ny#uH^dkN;xfeGUor(^Fy~|8w!n zMXwZ?bm*PGRS`)>=s-*BLI<>gHe|+hMVY1o2uf|Y>QAjp3H*Na`=3n$y18e3|6liv zX69ePgyJ@XEP#=={o!kTY)4`oP!mcV&de6)j>zB$co0eP2ItX%MI7P$bF5r&G{^(Q z%tDO=Sy6r+0ahg5;GFF87fEztDAg)dZ?R(<85}+-vEEXXo9vF`rKw7!Y!GT*sVxOWD`*60 zR;Roxcv#-j@sJPpFbXHXs$tnYC=Qq@a{D=a3?zq60`b9w>r**X|Jp2%f%WnA(AKu( zp9clc(#!led$*yeFgv)+Ibsd{qKP{ON0b?4!CRe@Zx0?IIYi=MI9ch%<; zq{@@Laowd4D-OM72JmKrs#>J#c?1^I+H;Kw22iga`7;ydV-X!^1;MIFNPE4#&5+$Rb>Pu zIc|)V%q?4Aj=##`E7Au!pai?g7>0V^qar}EZVx0DKXF9hf;x@QCe{j&kv8{sg0zhT zK!{e2M_G{b7aCM8()VMIj6G-Eai#rF%K|4OodbKRgZQ4Cs5@%^)4YtE-5`pB-0tlE zz#(wLW$jGEIu~&ucU5ZPB`L)r*w9<_?IU&;hgZRMw%h4nSAq>TP4y*i4cf?{tOF25 zKa`1gIq!w@@XUJEeL8aj<;4VcmoLajIqWUfWf}xZtqgvVeEWzWLF1rrWGc?yfCx%qADF;`Ur<(?yABv1Ue;c zOv+viTaFbONKEPSx2O9Yf(-;uYWRMAzV$E2%;W|#yA~N`{IgO3$ZVx2vBqGB&grRY z<1oX=y9;qx_n;>7G%}O;f>`8Y#R;z!Q&VaXsBAFq(TPy3#sL59Emvh;pv z)>ev7E^;J`BloEouX_)w-E6aqSUx@lf^fIOlsa|y%v+jpQ?Zsca6xo#CyBH;)ZS!;0%M27B;jorI5kFWUZn?J$(orWJ9}{3V&=;ig;|FGpIzWEAzmV&8?28>QAw4=@ket<-q)L>pc|? zd6C0A2CLLEbY;0f`jUo9G!{9x>AVlr63$c15uz`^0b7KvH9u>v7VN6qmenXx!QUfZU-^vIzVupF+~?HP0AQ2wAEo{1`NNzJ1hSy2etA!K{RA}`cs|;sn-smEJV}G zxo#!Z3vYJjBtY?&`wXGs|gKwfW^j%Vq8zL>CnEMQA zvXyTFQKCiw<^YV4P3d#S$L}>{daq$C`~e11q9Z4kV1yIRx1Tp$c5EDPHN!)ko@%eT1- zGSEUU#Cdcapr=6fA)fjS!)<5=;`fX>^9G4{#(<{~CJTnHp@x7sijh=zr2b(5#^Z5l zLE*$)bYwaKETUD0EuOS2$r4w-RcN(X=z16RYO_fD184-Xz9*fX0=1x^#&Id@%ro)s zF@~NXng*`q1Z2`&iIr|ssIrxA7v=ygANs+lM#y&;Wwzno?z<@V^OnA94}UHJQzxhs z%x#guJXeh8wGwz$)_~m5bN1`6-(dP6CnG7(azGDyGr1R^^7x|S|Mp1upIa5!4#1Mg zmjBl(c>@W25_C`Nvo4@5-Wo3WqG*W)SY7h^ucW1PAlV`^tX|{affKD!0Z!aMg0$WZ zI{bVOdI?*tXalLX2_w;GP~q*zkq2ultbY*-E(Kt+!D;(g)gC$?)g62W3WLO_I3wXw zYqiYC=jqRiDks2U+Rp+x;cPQ*kNzWLD87MtQxlrSYbav@$#l%KSs(y zN}DylLhrSDiU%KX;ys>=*8Tci;4pCiX;EN|y~9(FYHF3Zzb?w0!~S(dc;MW?sljR* zJAbOO%{W5PxYOp;0BQ657p{QyZ2@tjOvrmNH8mP?kQV^s_aZ)*fz*4=rX*GkTceKl zE(8bXcDfo-UZ4hlN+;f8&W^4M)U(>@q%p|_zC)|f_H zdYV4?0nJko%LxXD#(8vE5iStWc{PFc_K8&%xCQMFsRTF!q~kbf_Lq{5!{_3g+vodR zD?ez{D>D*lqvmPOPart6@p_0hF>lfn-6xN+O@%quzd~v?MnSzQ0?7L(rJ(eueb9`9{=(MZ34J4QY#z+LV%htc|y!zzf)*!nmwOq zueTVVIXm7TTw2;XFEm&5qcPIexV5fX;m(M)YktgFl$r4YyLP=Vn@Bs@8ms?ag0KqD ztzypi-E7sr_+X@S?QQ;k1S1ki5@TZNR0~>>Cg20=>CrjFwWOlKb0!!TIhF{ z#D}c8>eyKM-#xt9bW@}?Wi-wj{&{MqIQR0NSM`)^S+<^^^mhs+gBy_i&Cg_R3_0th|~U51~h|nRKhXRIwB6QT7vzz4sD{1dHIVKI zqptc>fy~^@IA_ie#-{$zFmdwXc6N5yW?c78y}7}IT??e0$Z!hFJN(@c2cZULQ0H!d zNgN?Lt)rfdH}4k@f16%6SerBFfJz4wGHU@bRsR>-s(iG_z&bD8Rimkfd3;B)K)V3B ze8>jr?iHpfma=MCcn<`v?Mj(v!o_DJ=<;d#L``Y*KY=Q(1W263`ms<<=VQ~S{ zmY-#0R*U_7>5sm_3ew)?zUh4j&{W|9nXC~f_q0YLt;-Ny3nM%!wY`8E;PyA2LYra9 z&NGmdp*vE7l{{LLK=xK92U2%_b_8c(rbO~QDPQ3x_^T91u-4VrkMy9KKY3j9FI>p@ z=46>2P?Y-Dpr#H2s@^DRmbIg3uL@Qv0(6!PW%8^CjUb&$Uq~RAz`SnVV$0i=sAS^9 zYi~V-p10KMYWBysjKz;YFHo$8I&WKfXqabGUqzv9+9&z4-{CAK@}ee6^vvXL|2T|U zO?gt$PLRjQV#m6btpcHI^It0#fm&cBV$j1EVT>_!tDl|aDk*ZdyH0GIFD|@z(z(b_ z9Tm7xT*i0W#Oe}X$#@7DKQknH!V3 zIu`W|RSb{Id-yAWmos%kxqSBVNg0Ef9o4?`=juNokwXbEn3*cbEdHZt%Eeewi`q|>6R!2aQ!j}T; zB;YZAdtvfvyOLz|Lm+p6ziM;yN1aDtFFoyqw{@z6FGs4=O~c+R&aTg3LUGum3>?^a z$dSGD+t)!tlyCWSG-L}z3H(TF@Po3Eom0rSXq@lOx z`Db5;Z;;`KdC2jS45`5x&lTh{p#z@{CmnEB2&rQSNs2b|LLk^CsZ!S7fDv>VL^{Z= z7CTwW^XG>|{nm#u<@HYW?RWim(*foAvunnG5hPr1R>gq;%Gl)3p#j55>T?H#pp%wZ zw@c$RyXeiN-+G&Zs&TokesqVd-$8b5y0dowjEX1zh1ahE#qTwkxm;J~9+- zLc0%t(JRnI1sJ=)vrx2ryV0pS71hSq*@s@%<{#}9R=t?ZK%_pybveb zjm)l7`6{_rjvdZqV{}x@o6+@Yt-2ys6*k*JT;FHx^OK_h7)zU7_}C5nsoqEd)vP04 z_$Pxu91=h&2UoUxpXUEq$rmw>No@u3ry3)b7=%<%RODRSkFavvvwn2Qdq4Q0X;o!0 zNaQ(s7%8}(u@Pu|J5zhwy&acsU#&1s&l%RT>L>BtXU=cKO0Ke-`%`s*W3{EF<;4H^ zG1!-g5io90z-E_qme4^|vUQs<*bMWvjwkj5L`~-8U6k)WJ&#V)%Xn_BuQdGn4HMRL zlh;-}^`8Dbc4nju{P$CxGXz9i&lC;gt#1*v>*4kSdWHp*)0_YiQos0<{{4&g-=eLB z@)Q*zVVc;{6x?ZzZH13CDtx%jnr+(aQ1Fb2j=E>7DKnERZ1wYpw;aE{VHFR!@UkB- z&1yRR^N5@P;oy|h70%$szwV$sxLuX#hL<1@3il5}RJ9%M zmFbIvtpv25I}~**o}TXZ_Iar;;UrA=M{avBQ`k>d{-l|E%=xDcuCE3*Q2p^*LCv3Q z=jtyUG};*1PA%@WJI|#Ub{>|`x-CX?@D*VSZUDV#YBGi{Mu%#VIcD;9V`&$Wxpw-2 zOKLfb`L-XoQH{kbNnA>&8pAowV|%%CUZWIp-9`oIj|UxP9FBfXTjF_J+7IUkrHy@X zrYg&I2s&C47MYY`)DkOD7nWD>CS3tt9PG<4|2UB|H)T#a_81t}cQ?ZrxOUQRxX#_bl#iiFkZRTqcJuj=`r z+{1R?g$}7ZeVgwh_)EX5O7-EAF`1D?)pY`1^?S4dJ%={`z-Fe_>Nm`yQ>se5Ki^Ay z&LSf-!(X>dS%1(km7`}@n=^feLwtTu=zu8k`O6%=k={Dq%2x?|>5QaRy&`HP67)}# zdtF>nYeRjtnZSd1vl7mk=GttPEHv1x@ma5|ipQaC{d_Cu@iXyr$>*Qm+Zj1CGJ%`2 z75)<8tQh#E^;kURiz_iop$>!~4y8Ys&(;iPfPt7-@y?7EHMhr)K{`ZF#~saq~LAu z%4o)%6JO)=3ylE(&OkJ5fw%s8RzuuwJk8^fB6R27oJnKcKNvvi5m2@|HfrC0_Rj_M z1~QckLKPX+dyH1E8By5m*O_2gp|%PUxWH67LII2G;ZcyWtO#))%{GsD6zIN*#%s4% zdf#8?IR5ghC>KJlt$}k8-QawXHWeWyREON%XFy0^1o|G6;5oHa=KU*_N2+iL(cW$=* z-kfB6uxhQl+AXGC?xi7xXl~p>T^?6`!5giP^Cyi&(MF3k2XthE$Wl0_Ux>bf41g>T zbdrD)f}Cs%JumDT6^L8B*WVx|vcqMv>$&0|(E>^f>F$nfk= zUYl&`OtWy7@h+JWN1%YHP@j}0+WUI{Jf`s@K&U=-B>SxK?VqO^%LOu(+U_OV`T}lc zKcR}Ky^F!MdClHqWJmndI)PyY$M>#P*$|{#TC6`J5h!?3hJ3@X9&S|N&p8@q9LZw;WN6zk@+C<5`Vrh%tW zLDQk)s_@$^q@hNBx1lQ}u$^gu^$4=>&0){3(+S??HMEeoyp%yMF6d%qYixoJUCGwSq6H>0@HguNYzlOv=JVCuJ-7KxFggxJAs&<0j-}$Up!2t`4A{IM(4VS-avObeOIklD)ogowCmGFSPq1-~WQq zqr7p z?s;IWZS{-zi7ek`8e$v8*mYbY1pa&*OJK+e)`lGiih`VQo;HJI&C{F=BMPG54X1ZX zB})37Vk3m(0N%ILCPomQ2A}t@wJ@O$c!cnx&Q;Z5J5ZQQq-&V{VzC8tZ=CBIanE{ zWK{#Xh9lpU6Cz3OPY@j5qycVq^OQt-2%CMyF3>5N72mS+Et&fm`t!x#M-|s0;qwCR zKGpkYRK5dL3u2b&?i3;67%C~K? z>CPaz;QiWxrrH+eU>G+d>R?(DcN~jNsE2p^?vgB=(=-W7c2WeGqob%Ov==jLC%k00 zVTNY2<6#F8b+*Vioiesi95h8sn|G&qNw3qA^=gWi0Bx!RY|hIw8eH9ELIZV?wjS7j zECCquFsLAZfk#oUh51M^nw}Lp{s-Yq;j}_hrOJq*J<ky<$XuN+ z4Xt3|kw#uEc^Qlz8#nU}4A=erdB5QtviuU72y)sKAz(LRG z`l|A23#*~N{6l2)!thrdJi4z&E9znA;9O23)Kym;*16m3hD{-ylylt=cwl%J))S<_YKD^$|Qv z2ZZqe(thPh(rQPNp00{Cdr87a-jjguFA!V!SB*z{nPY@OQ{in6LZ(dW zJWU9ESqsGoz$Ios#epj`MxmXcya9+Gy_!AZPUc5uEe z=}voh9>ozNh$Yelj@#|AHoa)49i$o45dBDI76vC7mfHiXjVLs1l5Mb>AM{`z8evVL zCzI;~fNg_GCr0(N4$aDDbB0)^%R5n@IgaoQUC0)5+O*DOFr+oif)!pG6-&`l{i?F{ zMU98Q>^&5wDmJqZ2KWx=v>N}I5S9Ah+?z0z_25;L$EbJ0XCrY5@3M*Zj2l|2{nyKE6IxsV&hB!S$^CqjF<& zCwhV7zQsXvZ-h_zI><)U(;MVO`W5vABbG_t&jZzsT?P)(5{%%~f4I~}9EMyWjSJBWO zOUFIfOWT*tGZvD8vQL$>HAcVQPvl@O3mkR?u-<|keGHENF}r4!XU5Pjf^v_usLZ4(F%T)H3qG11I08IU4Ep= zuXqEjR_RO!S~RJE&Xb(yHs6Zu?Bs;74Dm^4p8{_csZ;T5#%SL~d@KZb(hH9zDUp)A z=g{hTCJn#LwBxqm!3m!Jg8Y(C|C&rrvTYckUXGF3E$$7>kq>GY3G$YXy%e$_rR$Qz z>P+GYJ?I@%>-lRlZo+AM9qVhK5qH~4-u}a0y?O&Y(!fX6X=~X)MEN;DT8(7wCZgd*U!7f^%t?go zUC%-)p8^t!aN3xs^0VNQqwi{R6BJ5NikVLqozbItY8gz3F(y|cH(Owl_V=NEXWhq) z4EzuJjO1*G?=T9Pzy6W0!463b%uRDEkjT3AH-ZT`QmGgF;Y^jN`lracYxYlzn3Av_EIiX&%xU;DHWD4|to8o3!?4xYBeJOF87?+f?+%DbCpiK*Z|z}npJ&`f`o z23?S2)E{P(+jOy(wx{q_{>C(1umInOnz z`wLY4aoqZ!L)+!|ZFJ7Boxp2f)#bgD+r;WTMqetCQ6)euvPY zDoU_&jFy^BXOQEcq~qsaOoy|0CR<`VvR zOeQCOeF@}iim<~qK~55MCvk;-yvxRG6(X;-t=!E65q-F1jSGxcy&Wo+$qn?W9x z>~-U=V%8cmCH>uf?RSEm#Ggth`liQH%0H!@Vc(|dsSjq4kruYwlRlk)<=A|AT#>EN z$`UtGVXfr)S5kaO={3`qsV{I2xSCVu0I5Tt>^7957%W>u$p+qj)+sUeIQpUEe3VyH zw;=_{k)0d%6#Mpql?-jpJg)y52jBtJTyEt=#F5#RgvSKb1Oco)>OW!_P zp+_hNqzVglDSjetMW0_a-P-U=0=k^wI6AJi#S2JryLIxy@2^=3h2;T-6S2Ntz;zW1 zDMTxG4B*r*;Ie-t@Y^p3XO3{vTlvNlvGmdom}{p>0D4MHrwrOto1Y$|HGA?xpmqx7 zj1i29aeO0c=KeLN{=BdrFO|Lutk46=X_Y0#fVMH zd()KZzBiUh==PqodT)7GnN{@$g_?06n`j28%QnXjz~qi_#`78DzmViY;CztM9*{e7 zYQFkia1$vHxR13{>&Op1(eDwj0YrJs;2O3Ud>*@O_gVrGuR@^Oj6yofI0KOM}leMGoTze98#JR8IX=G zNsdj7f@+}h$EC$g$p$0qOlsRe58If-e15+PEVlHLV?irw1SpNX^F50czX51OeIW~U z>pPAgpm{;A-7lwAYu2Ypz|r83pjYtxk>LeBL$dWn{o?`GjgI;GkLHCSS%9(z3w=fQ zM%+SUJhmCw2cZn)BqvOyVo)E9qM^-!c2fM}Z~2Lqj|?hdEsKcOE+qo&z<~+-Pvj&= z!L3pBj+m#YJNipjt$Lv!n*axu&ZqdVa}+9Dt7JB-2yPX&T=^=tKi2=VQn!mxAOqH` z3kj!by30?sv~D!<7__ zg*4^4V*>kwZn)XXVQ{mF=uY_Dtficz`O?)u$i(3S@aVBfP1wTF&*Pu z%Dmlm`=fv`kBM}f{-w0&su#BOt^3UG4vbQD{sJkNdnwhHYOgn^mYPN>1|*Vkt^Ok%DQIncI7Oc4O%; zKVYXUWL;_PwoHHEM*irZ~R_K zG>Jsp9jx}T>#l78ooBDDU>eYrY869!bR1eA0u*q^dO%jP37ClVds1HybRP`JIiK$| z2R6-Jo-V~4x8O0413z<$zo%6$(hqB85VavsQ*xsoW7udo=&z94X>biS$v6qQvD^Wq z%EL)L+8>m^<+icj$&5MU>+L#kY+utOe>>}V`sjbCzy%oRbMEu1-+g2?HO9V@cs^&+ zVo=Lo3k!8k79&Imf-bikzyKFTY}Fx#UlHev+U%=cvWU3$9x}wb ze={#e5c*;>_{6-|Je=kbkStYmh*^wNDPl zb4ab@f=h3QsYK&E!tz@Dcu>o==2Cbtp)~czz&ZL#Dk8YYNaYV=)Oh2!v7B)&HfCs1 z()8QzkOq@W&7&<3*oTaMMCs3tIZoFiM^NpMZvcp349G7|mln96eUx`?zX@!{C%g$x z7}^6qdGEbZM+L}XK=!Zl~u7nF$#hSxQ0WJmCan-@?qfGTujr1#NGzMExIn?6pjdq^{N zs~t)yGqHS1@4Nqc(dc_p-S9N`f|SI>Ya!V0eAV@Z6gsQA zNAolV12auwwjiCLCBkwDA8=gq3stRc2E3!bqZ2v|;NGOD{s?@VnKr!;Mp7Nt!_9c{fEH2=kccV)@;xyKtWVKU3+GgGIeaNW1J5XwAx zL;oMld?2;rsh(n$+lBrg7vK-E>`EHgcvsRzM9Yum?@Fxfcb^NQo&)x54jKN(WpQy- zJ>R~euSSlRW7TfRfl4Ry#urz>zKmHU1=RzukoOC^wk@CgWiFa z@x^NpyWmOGE8cnN`%|EkG6AMXz!hbHy@m4R1>Gsr9=(A90AS8zIdy8}uPK0nGDDwq z0kjrtQkT6V1C@XeuS0zQ{l|YV+beyX(^nr7_zV3 zL|$%VMxONiZ;iZ{ED-xp+_n z9J|kN2a5Nc;<~rLKYI4n0T|htQVKCItqiZHL}NffC0eYBwz zvuxx)chPJ8a;cB#9x4i@8dw5Y`#4k#XHftH!|N&M<|r2K{)c@3?LN+^K&2Y_t;{Uo zP=qbm)laB`m=3$Lg%1aF_96hyf1h{Ja4{UiYG`zHOXMp^VLJBFM0=_JYJ-2J+Sl*s{=f)k3^E5|vDkoEPPD{s z2p3(~RbdO*jN%Tu#h%u<3@2g0a4r^k+a7pbw|n&uyuJZ9B(vn%G?0%sL4A z-Ba#FwTVW95GTo6(SkFRKw@m!n{_d1%v}y+;K{F^EkLTet=U&iS7xXlQ9ySoN`ab@(oo7pSuy zcmA*%l0|-Pr(F}yBh8yQHXiVoVe6LjMYaKvqFaCwV%L!&A#GBP_7}IGHg}oimK+Xj zxZgz8-iweK&i=w}v{-Sn=z3VK4^R8AhecpG#C>D5N|?G}JSmY=zvmZNx(s2r@Tk>t zRistcQe1av8WZVCKdbhfvUU{{2H>w`A8-Q(zGcNbj#lcUESgZpurHdo9$rfJs|Nyh zA{yE6cvI~`5+KGEEP2%#3cQhsSJG^8updcFt1L10R(vEHs>cMq0DWM7`T=)Km!cb$ zDNX~MOZW!yJaye{LWs4dIXrR{Ogpg1gtDle)~y{u7f`ykZ*>9y^dN^ZRZE>Kw{IpD zt5}>eLSUYSLYF070I(dOhw4fa%=k4KrQPeQ)=quZMK%UX$QePoV42k7`xQ^WB9o;H;sb6~C4f}f{k3X!hw$Pr zMz&P%ob+v{gUfjuKLAK@jFCA;E3@9Xm)*bxhxUY&AP4WMhjV@heW@_+HSeIWqyF}8 z34hnk7s-TiJUX+-Qu%M6rk}ozSE+mkiA7s!!sj1eBAPpVXHUWP!DzvH!m!v@PTM`f~1i0w^;X8(V;V#zZnQ(A_P} z_Vz+7@XGHTD15{x?h@wPjhC8X7bMM9abpyHEi+{OU8osA(V$1X=FxD$ghX6HmB>{M z8q&fsV_&|+f+!gCc=P+3*l7DX8zSPkEcpf#(L;Sj`vvJ5ICA3gty!o@Q?mrAfbh{QQ<~t<0FOL%hGwiwYoI;dt70V8x!e z{jt)yonIF7{c`lcFuw0bQO#;9!~y$)g6I;!n{%p#ySW#M1Vf|$*AO0XtABi=CzWsZ z@O*ixYiJdv1&F5Z9@N>k(SmLHhFhWHCbAh(bnMam35itDR3}Lcscvu&NusV_0-bBm zM*%|QK**<-pT|?-2!57+19*_S{9UjCtQSZI5Y2vv;Wbaj@Y3*0n9}u2cz#Dhbr@VH zLF$Gvx5!Ktaw$&|ABtjV2C@%P&9ICU?Ari=mozgf_^!<@XYh(_o8=hf@-tB8xF`XV zLVk(tqGxM(XoB8lz1~7Bcy_!%s1@`?&!zVKqxEY#o@j9sjj4iUBX1X#`n7c5Y&ra; zW-yoe@V^ez8@E%!y*0Hbt{4<}>Ihe!GWj~5wseT4TUylc`79k_D&!HkR9D2R7md4F zG5tWvfRit=$uFAWV2tip1}!N#iN9g@YrZ}5LXWa?Azt#pg`UWNe@r2k2Dk6WG825LlRlymf3zTK$X%dQW+K~9@ME73VqfDmgvV4b3O+=N@ zdWOf-Vs|B|sxX|UY}8AHb5ASPx-z$%tiSUNXWj0(7>X|w3W{0V^9_^*kvvkQqDHj$ ziqi}|VJoS+2WW#4lzGg%1rL78#@#AuZm%edUvD$wS$qsH-}XvcmjB;5>*L;wZ=J-mHKru^FxY!$Xj6gpuR%0vT( zWk+u}K<*d;A~Az$nl&$8(b=zE6$qDw$;tP@5iyO9P8#K?*uFGpWYVJr$ThM-W(@1w zqJ~x``_4nE_;Lao*Rl5!&gsq(5wrr)Atn|5GAD+N_uXuk@5_5S(_PAtBw5$+Rcur% zs-?02{kKc0g0E&;uhXol{t}gNe3-=10ihCl!_2i}8n)C14SKMr2}UulE&3e1gFy8& z9L+Q3PC%+c*S$IM(h(g4UcA(&A-=?82BKs|<|u8HG>DO3AT&TaLuTP)#EyH{FNAD_ z|AKC-S@k_|HjGprYjWtma_aXf4P?G|7R1-gnb(=O-FuZnKadmZUfVnEuY_zNQ|Wu3 zo2>N&B(Y7vlEd_4tpnWYgvtogZHS&+St&>_tdMshF4ZAdeLD%A|EsG z$Rj$LhR8D&sHl`^Si)VA{Q%OWw|#HPC!7Lh0pdnY^m2fKT)@_<_9LW((cv7veQ(lb zNSRF06GW9aE7ndJ<7UlDOG}5;5-|`PV!(+aPo(3Qp1-{YuzfzRI`ZOVFT`0~+jK0h zmC)kNuZm2;WTWbks9x+R2l@6rZWU~0R$9l$m*wO^ALxQ`5UnzuRk9hR*O0t&g9i>@^dlIOxq$HxKx5{vm7~0Q-u1-?{S!yp{k}@8vDK{~9SPx4iXA zzj|*5k*i?`C-_>;Tg(cH^`w(?9SpeG>-_c>2b)c~%S6=1$xf)APgaD4lSyO_Z3ZF; z@&`#xyZ7GTqvN=aAvzen`y|N}0M_lXSL{ae&2C)QH!HkkN%8@JGWxj5?#Jh?FSB!@ z;#%oXrG8fPicY8U@11An4ZK;}k==1JNdMsa_dIQWKm#QQ<<=|DWg00Bn>oo)V>_Xm zk5upmsq%DVEfT>h%^o72d*^WWSCo6h_1%oK{V+jHCtwB*arGKym)nf}HI(fTM9PzYHc3XO|`Tf*~5-3&*^}}Q9XZKp9z%2ehYzN1km@d@}*+qh%irHw~(6!;WTw19mcc-)UA{aM&|iIbdVWUwAZ;)_#QFy8aI)*jCn%eS@Z z3+hV(_2m>bqxfKzDcF50yO_E#lTF+XpXnVO6?c7bes>Pxn*2EJj!2hpI;Wbc%Z_LB zj_1zwa??S&>%CiU`?>5k57DMI!#9CL`~W}}QtD8ofh`Zm)_jKn)YC=42|HT9BYxZ- z=bTM~|ENdaZ?bB`M}m>4Eh-oxKO-==i5bHq8ihQ>W{(oBWjYt=mpK2>=C9cK(>RU1aRcPBX3cIIcQYu3`4 ztedZ-XN-Nf#2DoQ{FIGz-dEzIQUCdZFXBHkRe`ER#j!v{Z)cu?Nh``a@5pd%HHUXgO$zt0_u@51-J&!Rsh+b%*3To^4uf_9NO4-|q(L%6kr6 zxpnY9*2#6Ee|gJD`f<o5|=pFyPO7FQ1b!BGN8lj}`R098=H)tPn9 zC-7G~NE8{=64qXAL;HB(j-MU75wBDnzAdb&>BEvuegtQl&?keCl*q#;U-P7fAABF< zZIubx4IWqcoWC>v^ZPxDlLmHDGqOYm+pLx^7PUJ`z*b!i;vsE$V+Ddnymx`-crT%o zLUZ~&&tFKl)lXMZunrQvZnA<#5&Cs%YQiYKl8+%W&vw(`kE4#<*B(V2RgFAiRcL#y zRLhEGaq4EEDci_ShU6RPdReLS5)%uM?E>PSJZ06@g-}~X{4kVhn^fBE;)icWYxKZ{SKcfUw zkb^1E;nqO-+gy`>TEx~w*LS0|GE+VgyEj8{5NA>e&Z;pF#jv0L9X5u)XsU+Ad60TY z;%SRq54I%M4viXO*67PupQ2`ELh|LKMCQtFv62XpNhEsq^RZRAc*`3SdaTOhFIkJK zm^wfmjCa}iVojx2U&3iYdHTV;Bg zIyRLNFcWb7)AxKX*Ltg+)?_4D;6G=|%}T-Ctdx0LR6>Ry_9>O$`e2rmxtnI1pxMj4 z$C9dOCeP`i#sFZjH_4rs`pz$QEb{MNnT<`ut7|&tSd(U1z4Jm%?dt#G?k&8cT*K~R z7(hxIM7lwwl?IVcY3Y=%K{}+QhX&~q1SF(ufT3F@lx`^j2|+}owLE22!(?Maxc1c9lh4O>0=(5_ z70!H1D8);7;)aEPXsqAvsl*BP31mGYe4HkyrR0sBLqhHB7{h4aC~-gCLwnI179XhA zE;Kj1?{mV%@%A$JiP%2HX{+IXttM#*66|I7ru3ZNZ#|GG4E+JXjvqExpwhN*b9s;# zRzH*S_W+3p`yOr1sfnG|$4*0`^9fHCCRk!(nBp7Exp+@pkGmmdZ}%I+iS+J|5crd8 z@DQ&4=pow5W(?bnsihgMZwpbx2t@p3D3x&_pXM!-5WDyo);wz|>w=BAqdkUstS$JY|jN9oK7Tik%@^6sB#QCA^D` zh+i{Z7s!*2CixX)V3YfZk-g|DZ{DtX+2yS-?cSF*mk$hxa}R?)fq|uVBP&z%DiL%c zMCFCKv1m*c`=bR!fE5%>u50*WpJ~wLlgp#61LFw>N}31%XR~%ZR#G&-xjOgiLd9(E z`E(faXABge*jD!hSdhz$5LJC;YuR?r)Nk(v)Uc^;*=7_LB4sRXg-!JG0 zK9K02#KSBIB8eD&#D_c>{xI(Fn^Q%bK5qek0kJ$$8afU0|7yv=%oho>V!6e`q;f z0TJ*2+yc1Zi-&{%F%@=H^1c{4M zWS1YrZ{$Jr*NajZdB$HszWz?m?!x71Ge^2%mML`rqGFXD{C^H~U+lI&<0@&Hnf3s+ zNUhYb?Fqa-*6U?YO|_lwEH|hpE}7h0gqcUIZ0^P|qy=<7c$21RiAEQrtKbmH;Bszg zQ|~rxQyoisd&(XQ)1K5XoDE7yBif>XWO=tRSeSUnr}(L>`B8jb2Sq%NpC~(dIP22bPP?&CpC1^lgho z<)ql}@d?H)tH_a8^R{_GFTCRdz6GBD4#*aEwfJoso0a+RRH`IQ?9%1P@FXNE6%7V_ zSiR7={da(g)%3Gzxnn2PUj45X7Hr5Q$@asb-lm0BfG*&+RmJ+Q=zACFszjl){$T9K z20KzoE&*zbZ|D8H9>aa@;o3uD2t~3D$H90aqBs}(+#x2-=6|}g2>S?$iAELKSOmy4=T-*_ z(d=g#B1^G2?q4=&jT>e@9b~~v%L8Ek(mAC zO2wL#vX7A6$?_ZFD)rqstrAK{lHA@?5(l%MUtmVfcPE)9TNGS=lE6ho$)}-AC&spY9pm zznrqN`m;-Z{)FGtp$8wW)zi8LmkxK|KfIT3!2^LBZ2E(0@q2+U4cOJ4G*Fb+gz`96k%?U-<>#pJ{5iVNEV<}w65+ZI zL-XlM9ohWf!C2ucaisDG;@NqQCkkqPEmQBnNak9S-Vzk&*YSAH5O%lO30eAD4Vxfj z!+1oYjH6^Fa$!~&UYfG4*0>DO9^stg({T41r4YYa z_E;=eC>pnrDO*6JCR2T?BcroWj%V{6{*TW0(5Bw_ljRxK=BjUi24xdojSGE%oN{GyA4H11TI$(R4-hnYy949gea7z~BB-d@S2Ukku$W==H&(G3&Q7$v*!UN!V#0!C%^srWI@UM+Flx&&T6Ei>js>?+fdE1E0;LS=Pr3? z$p%A_HrlLtq2{FB2Tn^>y~uyIyw_NhD5&W0h6aAI?V{wpD7LpQ3X**V@)XjE1hP~) zrlbd_(u7ZCuwDPnGdRWK9wf5T!HOszgb$FP$nvFYYylA^K(tHy~*5xIb`Bg zg>kTWpMsmePjoZ7S*{S>*t>bbZAQLN;J{yIufP|Tkt|+#`H(c7a!@+s_n-wARz#7P zR?JA`%SlO`Qt~ZANI)Eao^%W>BDq2@``B50fc`z19))xk`4WgcOn5QUu8BrmKKc^H zU$_5|4-j3RBTc7+Fi7PJg{drh6qN+t9AbKPCo;uS`hl`{!%u)>3i|&3PT3s7Uxd8r zBHG|9c5SCmACved2(@1dWVvQYmP>_J^nOf&kVL*l7c`+n4&vW04K#G1;kWAV@Ap~_ zC!Ysq((AJHJ1MJLN7DELLoS(Pb0opTnESOko;G6XMS{OCz6>!!Cb&&zUPkUhHy_o* zQO9EA^I+PQV7jC8?!+?`VDxkKHPWTa85_058{rdy{CCFx13Tnfoj?+$+l|0LpJs$h zBuwEIOYm+C|8Py>K*pq-<+TK1PoR-A+|suGgyr%v+s?$Qf1%|=DQ%2Cz8UlUjf-od zo?@eo{mG3K$~l!Ze_vo#ML{O90B}E^`qnEvzF?{h*p=8$+A&v{giB@ zRRd+Km;_~rQ8*9N-%I-MAtqz`^^qw-_yrVbBSyD9PDyy0<1@Rt3!2-CIgz0R(~%T{ zrAM=KbT?NQOJ2CtrNcU=cr>Q{E1A`!G6166)nNh_o4)yOn4 zSBvmzS%lrfi{6I~P?>o}h>?uwf?@eT>sYdq$ZqOGstVo{KhXHoeEj)>cVwIin+Bt4 zA=)w@iR(y-3S&i5L{#r^i4CJHgOz5r(=PrljjII46#4O%en$tznl7&Z@ffxg&S}m8 zdUW%Es}dQDOFhHgH8tGgR7M{@(An5$>;3{$5yKV3{<-CtKMLk-mC3aCJ(j5yc}%Pq zru;G~y&Lx`)VHW#4GD?g>`2#_%@Gkn*WYk|;O6qwlqIz^ML%9u4C?nJcPf2Ng%_OY zd2y~yhtH5KdB5l$O&Pp~jqryHvJNz&_VkpI1J1uYRaU8*@k^`HL|Zfwd8f=wsFdVZ zc(lg*lcmd3zrpTbXCgI3qff;gi^9lcVL#gJYK@pDL~DI==@bVGz&3hDbC)@|5+J1SJP}|(>Dh|2!k65k8*?+LD3pN5(X%?EY=npw7!YY z!+PeVB7?RZo_rmoPtD$;?ybsW)$i6ZGQxKmVBLqeg&}rgYqV3r5H`jejOEQI=OW!# zsLe+6%pM|{xD#D|xBKA_EcZb(GNsc$AvEmyun6IUE5}KT%SU2sin!5U71)?G_O<-a z=#NRaXfDSvrs(L>sV}vu@Q7kzUPe$v$2r=Ot!l*=+z6-L-B!(*(UPG@tkSds8>Li{ zid$l7j2{jukC)PT%$(EByhEQ{elG+jXb!+8a%idLFSJHdhPCEL-B4`t9gr-3@k%%2 z=3@=#CJqy%jm5j8P{Ir=zED95&gJ%r~^ zg75|v9_MI+@Mpqw!g~KJgK@y#3(!WG67*GZf&E7O8tG-k-roJgf`=~30Nh#(x9FCghbaFPlJ!* zHVKBax$}$i88i5T`oy(#+J!ME*ylEL!oUbxuG9?~QQ7Ce&X8ZZW2Ls2Abbv}ezTzb zu8!nNH99BTGbvn60BTd%9Up7QPsr&~^A<&VI42lwol#2P@v#*w@%@9~#Tip~X`>2C zXwiq041RVwJ>AC>X z6ru8{vB7p5yT#YFkA91;8S+=Um(7h66MYd|bn#B=Q3_~)Dr+fHJa*LSBZ?dvDKTP1 zL0mXMM-Z4hLx+^>d-v=nyZjY}-rvh2A9qRS`jh~3zd57ERiIp>*gXv<+?>#|AD5)B zN}^T%P(jW>w1$i%5e8OOQpK=fm~(atR_3IQQOZkr0A_uwbLU)XnUL)$^@2lrrBfe_ z$ZMEy@W;-*zYxBBuMNlQma2EP()cA8u2CtT@k%7esw6vu4&_Za*iKXHCGG#Fr?UCa zAb`&MK@~_0kTxHe3(Ge)VtX~A03CBZ0c6j)6n5>sDW#Y5%Lz{b<7QoU!MXM4qCS?U zaIK5_dDr`s!DyKKkatg?ro1)^0(v2j+T7_9#gGruQhF9r1&WZ|>WzpR*+H$a2U^_N zaG8}Jfvv$J8)|_G!j%yF%Awy(@q+OOtuc)GtHgzek~RAjA#`i{H8BtHFak}8lyHxo zMNJC_sO)Py97hbeTAB@LyH|}Qfx$t`pIry?zXl46P+!6(w(|aC8#GoA_}<~g zu6!l8+zyGp=&Oc}z8{1fkrvzT9Kl@2)t;6A0#Zg*UkCAtYeQGp%tN~?>gt7DWwpp-1^w~1uC3O+-* z_opKg2&MaHC&PK)Eh;Vz+$wt|^5;8^s}i_XQDCPGoU}P&q#wM+P*`xNM>S!JqrnOj zMi*RkGR?818d!XkaxVr;(K39gwbE8vdADTZrY5yhwbrcj_H)P8ZNR|XzW}}Xjl7OgrgOR5jMQvdT4D@_{(LO;U}IV0|It=h)}0b=Lly2Y zx;meoP7a2hibbOye~jnB=vd0@b4;;hw~B-M>DdN~$Iy$`F9r+4&$1-~5CKQm{Fn9@ z{JTr7O+Opf@ke!F+Hn!2AQz1=dA0ps(ErTEafTa3TJP`jSJxf%HEbs;)wi6dhEG~e zEm~$D0gg0xg1FWfUBlyzkKMYSUh~@jBuA3yA5_f;Vku5OvE?aPSnjaE zqWp`jm!^xjqn{Ee-jbMTC^7WEzxOOgE@z>EnJcXr%>cLVXK3EN*%SiPrjDfrF=zj4 zVasCck?eKj5WoG}$VTYGG+S6@O#N;-RWe)KbP`{{gE<8H;Rn%FFS!P(Ks zx#fN1f}J~uT~ibdLYpW3t&&f=fr3MAcsPGwi)4UM7U@^dlnc8_b|$;$uZ1IF@@Ob@ zW}_obIxQk_&E!n~%Kv<2blE^{ z#Cs{^I9?RRWmq?9b0xAoy;<3iszN>wG+&%TLNoK;I~Fveo{efyBZNFUCkoCHFc6R`_0g`cNd@YF1*vEw2-mzZ7 zT!-|k8a7nY5i>~TBj{nM2`06I++5TK;@-CQ+P8AsyNza@ch?@h7Nyqtu#*GR? z%XWlkUGa7tvN?`!%JfIwDy8~AYmI45{6>_H+YwfgXb795lc1 z9{OVT^Csb_!uJSqxr0X$_n)oT{R%e*+ji+qmVW)@lw10 zJtmL+Z`Aoso}|9H<9t)Kr{8|d_q=8seHO@SRW0!>bB)fn^tc)_y!!j~RRf#8hGB1G zlUyGmhb|V@rndCT(A3aL(XRVy_rMlj|OJ|MBb5q?cvd(DD@JH=-#X$ z0fog+p3XJ~&UUFPzM#*fL$sp&2rq0WP?r2oB^C2`H1fYk0V+|c0p#{H=++i6$suItBmgsbF)99=Ys#N>m(f*$@Onw@cKj6}Mf$Cff zBC~tecPkePHk>AazkGSWHj|Ep>y+6WVF0qUouS_i@r0=FVanzc*?+_A{~qCgy>W=^ zLW;+l=gw&H5i(5h!^Xn_s+sv5hc!uMqDt%!pTum={5j+r{y#&EXR=sy0QJvZdX!ssusDty~pVDTomS10LdgXdLMApTb5DZ+w3@GsA z$oUvCE9kP)#cVcmv8`mTlBuopGpgVkqqC$7 zhi-SZzi({J%Xm=pmras+Ieq7|& z6Ead+EP#e-+B{g)0e{8>tv{z^IA)4^e((g7z_g;iE=Ir3D(V_cBQ=jA=q2brkV7~J z+z^0T@rYSM_g*Qynjdvv3VapciV0+>C6hNs&_za-FM5e<+los&K+|;6+R{=Vgxj z-Hpb%crinOmj8vjZ*3S94nIp5M4W)r2;g%FDMQ*b<82?{xmNLZoF5PycR|T=d4JO+NI0h{C5(~>sI0*{8@V&TmMC3p$fM- z*zvYv8-^E!q*CRAIe7xghYO%dmI_xMzm^$hF=#P|hppOfz#%Ph9Efbmmcmy0Wh`;p zFz-v@G%JLWkU`_^Y()7_U=`H}9OQyt?Bd zMt9RM`HJ~RpR}ubZ4U;hUN$Ju1x&Wbs;Io+mOC3);fX5%T!m0;8=Luq&f9&{b=!1f zw-Ty=^0z1tI*%z&+oXT%cpE|m)MNK=O{cO5M$Op)g5th;TYJu#_ zT zL#hYjs1$C0n#CNq7cA)53}?;51vj%^H>qWg!jUJ`b>L=s)y;l9OU(-{2BT-1omgR1 zhG$dkEI!x1RW)5{r_)C8vnY+`QzxWzo^fJCKjEZD*;0;^wIl?7L8VQ=3 z;`th#lhl4PKZLYn#)>>X@2+Cx{5-!2z#fmXSKxSTI!r7j47Ore=|?6tUrY*xodXj0 zpM4gR)(VJ2N*lcZDA-EsO&v%DZP3hAk)sbnx@s)A@GSeR$L-!F*xCZhRqw%D5Ux5c zyt?vMKt3E34Qz2<2vwZkJ#OQSE=_?HEM}qLy1tK}u_8_He(&|kV*b_Ww(wBBswpGM}#mxiXT05^t(BKFJHVZ1#;bK z|AY2ieZkw(!q}9dw+9`LMX96gMC*Y4wj7}HGyCZ32>u6j5(X|6!a@|90G{I#K6)l} zZ)~j#4Z6hBIg2o;XI$^NhHnI2H(C?WP|}oNr|KboQhNW;*!-X;eo@L5w1Vmg%zCFc zzb9Kd$Gnb6oZc)-=BzY$ycKKY(NCt^>|yWWN9`g(lS(cYOT8`I*w(ZR@7FW?Ibc(X z3laIP+B7$ScyjG=ZYyWR?*rjSm$X~wSiB}+%O;;EtbfDhUl@v{ZS-Fb35yg7f|G0u zbO_a7p8&B6GgB}I?mgLTCiEDd)9s1DD5tjNU9D%z{Er=8H9AZ^6x~DWbqOTS%ols< zNkQ8jmWw0D>h|dyoxw&USE>#GeK@N%Tluc6$s_>3pqZ)6KW3DpP9UF+8qnKa7bt3+ z7G0ziU70Zpf+BSaj&>e+91y&06gsCPRB!0uUxCBBJ9xyi4wsyhqle5jsb5-pAPb9V z_%0A_KsT)~4!BICq_CyJ7ovVaA$H&tQ{6f`^=MZxu&vw=zA~x3bW&RZ`etJ3)4W7{ zG6eGSsPh|s*#EWS$*$=~pe=p8vw6D3`cT{=VG3RFLKL%Z}N>;r~PBX}K zOcF^6VHn$9zJ)hzj+i8)Z>phTf5Tp|*A?j*RHT$_jH6>B z>*`u7_OuB5P^_>mf8D(^@cQ!tCwtJh#ObUQw*0K*f>jdx*_)0}=iirC56A*W4#*uZ zKiqI8S*8j^J{MLKP;c_8mV*Gj9X}#s#;cFmgt=zw#Go{HUCEpy&%GZ5E<@m1mXv$Y zK9hCOVq2C(lv3JyFBTx;&QRbDGnE(UfXmZNzhj5%T))|R;MBcc(whDX;J5<&s z06ttl>bB62lQ+F>;{5F-+OO&|412m%HCwCo{LftMl2^TMB#liB@Ib$CN2GfB{$Mzz zbo?RcY|NkAa5bTQWVDhG>b=XMPn|8IOm)aQ-el^TkdGj>WIut~y2}iiXVhn$MaJ&P zIsJv-8VT!+XI~M{OQL|=Qc#Xbwy&m~-nTtb(wY~(OYce(OD!@^$V!Al$U=nq!^3ca zpkE?2ja1yhm__BQf!o~?2W5wQxOc&ra>F!^`Fgb3YRoXEYV?2O}<+ zo)T@X7H2!x?V@!K0xPpUhX)a%yYXI_q6zn&GQO~rN zMQG8SrmFLsAxp5p+lxoyvK%|)sEbVN199crm=0EUf%ZvG zdDcNKWc31=Sn?|4bcy_E6WecIkIga>7oLu6WX)Rv3y9 zXaILwS3NUuLF>22*V@SZGb`^9vyV*ASaat^;s6SeQk9qhf078xQA~n?4(+YlL0T%B zr#?=!n?8xzPlGxxafDJN_bz@9@o-E&sris|E<0XwcmpEc#{IV(j^AJjlk>puvE<=* zi}F(UO2&njWeOI1>2CF5$IE3h5mTKn0RgiK?qT&Q{F)shO9_u3J<7R+iM}bL28M72 zB;HtyPC|<=y?2JrhsRr}v<;@u{Y3HP6C#P|_N?TCla&~dIv!pN}HJ(vP6wo8_- zqva*nOGkI*8CW402-)cD^|G!6&aT+)SA*M>hogns09|muf9APF?rs znj^3SgAN&RPJh?od^>WP1Cx@{h=XqBSMc2`5eVByFg7oc?mL;r9QD(`BuQCqmr^iS zZW3^_-Bq6S)-Z{sF3V)K0%BD^wm)!U($pMjE$H=gKiKNcLBki$UqhQ2PXo~awg)ja ztF8=gF-tJ?`cqj=*8un7>jBZ3DmBB)lYND5wXANaYth(Y@@QdYy!iOMnS0y_cNxtF zdQ+~~!=p!rkD*yy!W*QHzCYegZ#GOAFM*B&;qa>@1t+2=CG5925|X}mH>#%2?ICtxpr4rh*?pHsSZm>O+T z*fwXmNJP(6)XudY&_c&GbaR`TeGcq!l;lwqQ1WaTWDm`EL)B}~CXXjG z3ApbpQd>`*u69T5XNs>RvMMXHo0$rK4~~Vb7iER70~@3UWb0P7SlpX;!G&P~uusWs zRN=EjIANr;=Aql>lZma|-#@DIYtuISHrp&cy*aZih@A0Z)L=rlL-eri`ZK6qN|H+Q zu1P#MLx)I&{oY5}uPn;N4E3vf`6%>0iHe4CgI!W?(9JowwS?|h=8#MMW)6OnRW*?f z(it65-LJC27hXkGYx$Qu<<-=Fi%!CmE*MOsM8e@jFCP+5TCL0A?yJ{AlAu7B&4)X= zo+ANxOP|G`ZH?yU_Pzo=o2?PqAjSMS{ zj~wxyV!4|lMN`cM7UE4-a-(^0;0~(D1-WV;!W{Q^y^SNgq8WOU6PsvtF7|XCham5%MgYj`*INb&j59CCx2liVRdSbZi8YfDO z24161pWj?^aXne0!DFLT13^Z>Ci+8T>&lxGAr8aZo|ZYndYATd=l551P5Q6$#nSTx zA^L|1Pc_AC@9#-tQ!BiVi!JpAQo-);&i{>b2c7ke1RHQI%_QFd+Q4`o(^~ad7AEp75VOxF(=c$pw7wL=gv(5F?Wgp|578W-*aUT(I zrVbfZxys{nK+qy>&&-=rh3%3@y3?M`ByVb+qPq++?jMo)qbQy@4@|Krv{N|3akzru-|4j4#vjx>BRLk$^ZA7CgTgiveg;fGsZ?iT zg`U!8DE9x3$EG$ZG7NWobBJ<2XPY+%knSx?+A~qdA}9l#2#K5f{jiJ$km{rWH@)Iu z5$#tsmUw;I8Xx)-ZUK1bUfs~5@ZoqP@u&1VcsVLLetkmA{_9%#q>PE2=!%!$CBBCX zJmrz4Mz@nC?09o5k@6rOc53>gfR?%EN%u9zYPck!R4i_tWEh38i#a>c3Mwv|VW}+o zxhxa=>-V(3(Fa_ORfnSZK~CjriCJ0VS!P_N$rAOVBl*(JYq+f z)YIJo0+B)_q3C+Cti_PfsOwqm|HIOmA8>5sB|o z#(y9z1WQF_NNL>;0qXt{|5m|-M}=hHX$a+VC|y<&1OwZt%q5NZ@gN`{axP@QfygME z8xb(ALa1aei&2~V*97i7*d=Ak=K_?Ct*{s};lX(mx2Q(WGMOGhe$@vV0z)|I+@1yc zg8G-I1Yub==UjYk70!y{UTLJ0cDU-i31Ry zPCeE;_XPlVoRAQDa!n`jHaD_A{6raIW*BIU&UKt=T}G zZ5?UjNZ2n5tb$o~)XMr8;n)tkXY#&`kJGL-QNr;fHcrcuMq`r1yT9YXz)Y`rnZJ+g z{LuKx#`{+@dREOUq5b>5l51}Pzo*t2-D8<3<2F)T#TRez9)xANB@$ixhUn`})Wq`e z)a7s&hP^bQh@@}tsW$06JZqoONvDuaq8~9bfVfFr2{oP!<`~#s z5!pfyJ4C(HJ?}!9PVPV3Md=^cxN^dl;}YW7^ISI*r8B^K=JNsOL7mOrK+Hjo`|*W@ zk(oTgqnDxcnW-imA+$ZFyo7r$ifeX%oaJxbXMc64fXW-aC-x$M?nV!@^770ah!*M2 zzeb-vH9OvDr&Tf69=n`u4;;!8q5RVJ`qQVE9GDBQ90XWvroN~smlFr?5ak1vEtZz_ znKy5#NrYH1W^J^1{_~ft0`)=!^pj1i!F^(fXY0XLA(On)giLaSC7cYrtKu?~j6Z%v zErdi0BPzzw8T4nkW}9DQnjqpBH4e+@81Jt$7+*w=BR!Pg>zv0Ii09+eNr>BsKF`1+ zFcCjHYdLh66{Wo(yttC#+b2MmW^CiKul!~|_{H_sM+j|!Q88@>_ReNBMIQPQUGc@A z0z+OJB37&cEJuI#;TGriqt~Y#AzAA7Y?V2F&v-4O*bjx*NY~#EUMH=XZkwnre)+sR zPw~=tvkwy&eM#kz5m6 z8lE1+2+!1?(h!~gH5H1tBBy3t9#g6Bi^>ji!D2@?M)E3@kVu+E3u}z+$CHm4T`u}9 zWZ5KkE;ZI4WZbN4tCUsCyl|!yCw-&)**GNlR~H&l)m_>^S3nBgtFNLZlg?6a?ST%W zu@ht$g`79E{DQ|G&;%5neJ(5ZlQtpFpI|vf(GvLgO3dDp&_;2z=jY;9qEWHMuqo{kBQXIq;C z4J~R+`+nNfxJ#9G_dYMZ;VzMDT!VDqgFYYQyS4iPtA=*5UtAwj zHLuyJNz%{KUtM;EtIYGP?$(ddN4&~FKN_&6slt%GRyOYM&_rFh`13ixOLq)%?Jq1P z6Krm)J5Z!XRmGplCxCjOBq<3oBF+gjd@KDw ze6siPA4tcJg#jG#8>6gO;4Y+yP_w)rll^fs(b?KNIB;RYLz9K*DutqINfCXtAp0_) zdPn`Sdd_ezQl@Gfc!! zStrZ0!!*0|Hw(yP318YdYi0U9f!vMXG!OW;@zy5>cRIMYG9oeDr`O*F z?J~=v!<2IS#Q)binjT+SqJN3}{l*`qnB*R-W0SAtF~H9gbtNOiXeYssD3W`nSD%OG zZ1EW{G5R1OhETD++5EXzAK;j-;j!?pDu~SUYf`0zuN}4r=C_?VpSg0a=t_JV)UI!!xOh2z&#cNI7E} ztHNxviU?HFPk<`a;75o%UA5Z%wWPIQg{v~XtKk9B0|wx${ZzP#*gghJ0LBl@c$?0i zzd^Bo+nwdn(H!KH0R(6*5$qBVoPvPYx!#Ulk=;OfcliCj17cA%Mq-A<0XNBYd_Y=K z`qPYMJ$ZK5%cAE=-7^Ig*JmSgIBSKvs!CG!T0gj%-|hRy9|_3D zFEE9}5K<`#lrFglJ9!-D#sxyJ(gg$4uQe-uB&c+v2L;@8sSJHQyC(Y5bx4=Xz`#T!za7rkLDNFr9=c~3C# zWVfQe;MSH3@s%MW>4w%hr7NOCQ|c_`*E_T$to_g$d8YfDTDl`ofQD3D_;+dlG;pMO zq^&J-2j6mkthxOmWzpL;i|qRsq~g;z{>?qt60@W|FZFVPi$uu1tvGk($jQ1aJRR%G zB7~O0B<7xhPyx&C=b<5YfjOj&LpJXV=>|uxh1EV?q{o5eBa+0fv|-y4o6UP5PSB1y zu3)K|8ul#F1Z8c9V3cjW$)R_ns@B@I4liDbhJg>DF-vY)YVo=%>98U@E)-@2wPgr6 zrct>ArktQ3(#bwrj9VX#tC?qE#YLv;-9GJh_ouTLy?SN=8xr*A0EB~H{Mk;+&fazC zVJJQZrLZYnalkbcav6O;4r~_XsSf;;=VN{qBPmV6R;+|7=8m3yMAI)OORe6*H+AtK z9D@l>)(mW&=z@S|-CWcF4K95113sJB)b}zj$BK z^*-!Zc<(3gQ>T?5J3oKBmL^@OY>n4JUc@~&hF2<<{CtJ`3>P4{irB~nG5n&p;%ChZ zcD^9hy5Hy)x$a21)x#hbdjfDxMNE=J-Z=z-*A$p30Da@UmIY)RlA-jHP=RqtS6fZT z5pN-R*>@7m+dr+}GYeuV;yEQvPG^hz*6r#m+v>^kBg~VfpDlfLUP1`)G8$Ha4}T!Z zXgnHyp9Tb|Q(x@nj=;etNcSE#eXwg*4le5dWq18GER+nqwg>H%4}^q7;plM6?i~J5 zWhC#ApS+M2L?Hk2&i;9G9R3fXcx=N1S&#j(+}XjYzKcZ!xLn&y2!n+CG#{xR3X9BA z?qtp&{+GD^FPkf$5`FV7wVj-Z2=VNPZr)TcJY1(Z!zOqftGe2ImdPK+VA?z|#pC|3 zU!Kr4Pw|-3@z;tLZgOblP43o8bPA*4$vHT=|C@jY7!_*tr;Rg60o(h5{*kb9CC#V6 zXCk>v_i43LB_Lh>dtLv$*@1Qa=CedrC*H?@vm9qu0ga^MZHjuQ`GfTf91f~GtG0WK zhtJ$<-Vc|J%l^j=(u-jA5znSDW>Iey7u(cY^*)Ti z6ImH(N>n>8vtSa~(n#^BD3H4uryQ_PFzn{R=Kaqo{riUU5mMZn{n%yY4%T;v4?%iz zoTrrK&qvzFw+tah*R3awjQ?lWf%1kc1i&RwpaXVxp~mJBi0_!jf1Q~1#G@1hxP_B^ z6Rn>0wAG!ajz@~&Xn+S4USA~gFTohO-TeKMd_yGZp9x52xP?9(`VmDDzi6c`uo zS^@G@R9=|;GvQ#sEQKN`1tQ`i^ke%yswQ-Jm{|m+#(#|oKI%9oIq17Z28g|_cIe=u z_AEy(PQ62@AUTX8ST3|jtZEH_eRp45|ENC2PZeV1W?Y-~FAwqG`vY=Q8HfQ}j=5vd z`Fkx6rjeR+iOb0kU*R^NQ_Kx&0AIJ zZN#q4u*+^iylEgtrlN67ko0$RmG4uPwf{c7|9z7mOza}iIx5jRF8v&>KhZ@2*(n*IxR5|(En@@b(q?zI9a8eP?tm6N;-Xg|eKxhX9wfYhA z>5`ZSQVN>U$0SbV*PlPv^}M+~s@D(EcvxpXT4a17fH^~0P857A5~9~8NxH70*@O^y#TO4ljAubSyqT65>)hvry1tx5PHCO13R^w=LK#a($EV z8Xf*!j4dnaKdugW&yqMq;3So1Q_=p9ZQ`$uLs0@5ZXD#;$^Yk80KN%$#SkN$TuxP; z|GHGLi0RLf#$O%o1r&eVef;0SE+Pa@80;Q({5t=!PX0POzB7SS=6p2DhWS7CYUGtA zN`nD=|Mk=O^Z(c;|Jq6Ze=!)PcZ1h-$~kXa4=T>9T0xh_=b0vrmEb|L{C{-4WmJ`2 zyZ)_ocSwhXbayF@NGaVR-6ajuNF&mX(k|Yi&)pX z<~8Fyf5&lv&$tMN(VG6hd_D!@`am^YWA)c;BT*9dKP-Ty zE=PZ&A;LHSw^@D>4(KVO@s$F9+y<~ae_kmFU;j7&-0Bo@^(Pk}!0jC9J>IAR8;gG> z@G9xzKXq%r=$gho@18xG{Gh-k&P@h-ci*e6ezV5}kE%l+vDcY)W0V8Jm)csFw?_c4 zv>TXpGo^I}rUnrs-e9Gu@f=Kq+tX(N6a`_mmBwC2u1E#Lwc>ec!puwHPzz5S zmoU&3za^Nes+&dZ9@}~WG0n-ZiDbz0Lf(xCLw3qg`E558IgU@&ZJwq`QE91(Xncga z+}gD!_PimkZ8~}>si1NibnJdD0lVry0l?D6Y~Ccr`n~D%WOGr(D}J-~H<(`Iw|^_b zAMQ&w+u@~PY6(L&FBeT2NPQNPk6Id?0p3K&D|(7nzuSCiH3ry?fljo_NS-BiT5KI) z^AUghiviOYpiIS6N>Wq(g1cvLkhg{^i?QX)t5i#hdo9Z}G)efn!LDt)=$$2GkLm0F z77Rckp$eHAM#y#-%UJ`b3CF3v&CvoVbD5!ri)G!AVzwA*~ z@(Q*1~#pa{Q1Se^jK7u8dls-lcElAOVzrMUrKHdc(l&j;faX3&%!y159+s*xoor# z=$M)hxCgFBe2S+qrPPC!z84DV(xaB2?&Mkje!R~}aTsw~uI@RQpV|FeP|xF0yO`Z; zC18~Z&^FZ~M>7O?t$#b?wUhEL3kFOVi?#CcOui-Z<$IY=^ZyjZF(hlJ95m;bZ2)|4 z^Y^@V$=%Z58lv`+Eeh16#nniGsR>mqoVTVb2ivJG+*iI#m^dFCQ9z%?ZOOuL`ab|Y zQNz6Bh{i;Vmn=!H>lD4#`IuCV!$MvD#1~Y+faN(iovW&e6!TXA(vH!k1IE(p?gYYl z6mls4I%ovsr}ghemqs_2rcFG{g_;tu|4Awum@ZCoM5^KalSx=Kq8goMeTF8)`3v9u99j+KxzHdWlc`_XW4(9gP^RU4{AG?R=}L>P#uJ4SA$mX zsA{RUIZ$7en@{Ba?g&IK5er7E0ij9_hhY#|LEu*40C0P2sRW?;lK11i3j`S)WBLo2 zdFQlu3=GPQIt`N$b(qmYhWAasYKf*X@9E9Ofh99~01aH^ESoVX-yzOT8fMmcLaKyC$Cusve%TrW*{O#S0wOnas!;HC| zPbqZhZ%4G1gqWDsLu?3>3RzxMHQ=>uY)a*l*t;XT;bHms_-q6Z-v}I3#adU5Nya5V z{4^d|Xbu5Qv?xBuE%hSRl7)Mqg|2a&c?;rgj3W7rdQffw*z;Xv9^kPH$sHAd0|fG` z?QE9YOEJ4fVEpx4ov7U-7c|l@h6RphK@0z5igisADj{bd*Ry|QApkUUK3c4?o+s=L zFm^jC0r`*9i7a}MmYy?EW-PvR69EYdq!=u@3K@roWy13OJ5)eG#+f?0pms$vR;c=s8t)t|KfGt4U&2J*8mw@WA|<%DKWaPIh|^$h z*ac7XumJNW=h=Q`5xclkM(TD34AS1%xOXSPxSLy=5aj{h?Sa~k4De6jWIuSSEJTOH6w;Sd z6dx;!ACF35?)szg_hB#_!uIlkguQj)Vq9EYNQkp3khx0@-rLWNsi0L1!$YBGt2V7r z`~@Q^Qh@fN@vvcUKb&K@yob%Ay8KbYG)=|Al~mX57^N5RwAQA<4_6OB%WNT`Uv$V1 zpQIhQCnS zYW9alB@YRY$cj z3i{xs#g8CA(|8$LLmo;_DaC~YsstYdx_?nLzb2ojwV0&N*`9B8Pw}WldYO6 z_9W-(#DY<4An0Aqjn8A{X_t_A1vn4?N!5#(t94oDLJZk;vPgC5!nC%2BKB-oxF*RJ@VbPP6;e zsEYQz0fT#U4?k8m{#d>W1M;+1ST$3JImGs8hkITfvd^pjVC8oM^RbyohPX6%L*y36Sp&A;ouiogMr_xvT-~$M}xVCf@F0< z<9`f_r2B~?Xd=-;WDPFAjT7q77rbBhNKLk-VKZP=$oF)=aRDl#c!pGRcFN?N5ZZF; zJ?E4^&Z>Kqs0YOIk6~QgqJ)vu#8SdQ2LGeZk*r9X*z>skVpI%W127JTHjy7tvWPEL zB>dT~KU4g{DWdabT1v&ZUq_s+JL+KOtl88ZY%Zeo4T(%7jXd@s8RKNc2)b-n2{ywI zPsWXP3QVG=;Vi$ z_Q42I!CQ(@ocvY&+Ia+#fPwWYEPkCch59G28*>KjrrDptXwHqJF}U#K4Zkay$~LYR zd!nND^a~FO@*+tcHGq*FL|DkR>|v`0q&Za-iM4YBg#qgPvkf^yLdko37?wyOaEbRRub9hty4xB?#n5-H8Ifq79!C+C8SMZXC zU?yJ4H((Vg2yVdpXPg{keabp!6>tUB)%j0*(?efW1*^XL;q0xlrMCrztpT6nxs_Qk z5mm~XQr&azJGlX6FGS6|ydoI${ui{9qILt|nNm51dx36E=j}K2_cezRC%qooEPmo8 z9E{~B<&g~V6l{OU{U92!{VwToYp)eS!R(D#H(g{Ar`rSGqaVp%gWhV=?0B~G0!gWr zayYZ~7fsq;{+mDFd3c#;2ns&^c-G@M2Fj~nuN&kx__5s<*OHMM{qLN8El2ZCy7^5Q zeIqzU#zi{cSa9#8{BR9=nVu4UV5deA9|-+CadOmkxcKq5^^w7Oq(H_u0v*P#fdU8a zNOs{&y1S+OM$u{@grcYk)@9(27z4R>q98HQx(|P3l=<(UNUN734 z07vHjxjpHbBN;>`wJQ4e1eEdP9a2vyq6?~{;YCRIJWrKPinWUa3=Kr0d(3zOa!vF8DHRnCZ zkxV>=rYrePUD(>4QDlWP5E+Kv)q;Fv)K71Y!>FgPQRh=#v#W`O7mD-y5Q%h&yOQu2 z?j5_0qJE#=9=SBuwq9sjdJVE2E{D+8?bvl#+sQqim&rft`hv$`a?n)iK>$y_s94ET z*LDg`rn{oK%WactssDL)J?wYc^`U^4;u`1S&EyB;+-lW*E1No8X0BmGG)^-4zeEDl zL#77(%bITgjy{xe*JBoA0adfbigV$*$z*K1~TxxQ3%Kff0>J5Pg?;N7nQT4g2Fy8+eCRFbso0lte=3n`l#5j;zO-XD2_p)DN+iBWx<6QmT&l-dH??HTnqR>D~I@}-CYdHUf* zJf+GyLNH;-%ys(E1~i~&P&{LuJ)^N`$yEn=8f|$syvVgz*^#E3)(G3iaO8QSsLNwC z-y!`(@4_C&b-oYT97t%a@VkVLl)3qI)hvI&-StgWyRQ zc1}QNGR`dMf?=yEL{?b_(2<2KAxJivx*gqQGHaE?(cG@D{cXP^Kt7oVj_!A>OYjJ* z!dIi@jOyL)WsZ+Q1nXE~cuWr4FlM&S3Z?n>AY$?y3g?f46$`c6+FE|c)Hk^@+L^)nq8XC3Kgv(L}KN%zgj0+1!y zWB~o-0Y4oa^w`IpAs#6`QE5xJ72g`0Lv1amlWt6Wz?@H4{`epu_X027r)6Gv+V#it zVKUCXgWxcfnHQqna~a+ahwOS}Lo?lPkCql4(mjTo569(YA|h5pQ#{(9%PVGG5Xra3 zDTp+;%YHc}cQ%h+KQoVsx zO0=AQ8xv(lY%8Wz&gdTUceDq`vgnSEB@DL+OLt0kQRP%MQ23TKz*M(B9X;P;NPla? zEBb_iEdHvI=@@?K&)9dJPAIa;?%xSjfnQgiC&ACE!xmv0VHiWkqutQ`fvry%P}O47 zeXJnOFRkAqRNL-{ch2HQopeM@&phULRqyPVkju)%I0is`m(wKG%|}{?Gy`AMEKFM2 z+2?+v(g#26tQ$^YYFvjU63LS(XRlY>#q-Z&`kW~PC_`>CA9U+%NJU=eMDFpuFz{tc zDw`KCF`qxryGBl(vp{fckmxhS%wH%|-CbNftyDzh`};)~ zK*0F%EJ}E8dMJ4_NOa!*xt=VX%aNsvrOZ|C2Kd6KrLKcZ|>-=;7 z${cRWm^~C^D?!hR6IRCZ)#Eo;#l>xKv98Y1c8kQI@~HGNq%5QyGO=nqy^%sI=v^|# z7B)4&q+~3oK66g#bEY}fM9T|?lgK)@cWn*qJ}hsOBwTI#8vNp-HwllWnPuN8jJlNa zFFf+9M}mbQ+@xhE8=mZEuuYEGn{qio3Dx@1PkD&F2{zZiJU}RYv)%gF6?gk23O}rh z!&>lg#9$dP5dY90mzk6*-g?#U1$JeDBYbq_yKAkaKgt%?_gN9joY)2Le%SmJV!HqlXZE$8(>pEPTF@?h?66XwfVL&1z3js@qD}= zPTPRSC!-4&*8Tu5o~iGyYmhaB^h5B*+6(jM+UK!x-Om-aQ6GlH%7A%FsI&d=1b1HA z7EHPR=&T#78m@@Vo zM=jT9)=R6wws!J;-jGr~%vk$j7u17S zNtG@4+L>bZip_mXins87e5TyG&ZL0KPL2Ze)1UP0TxAV8zCYwLa~V;<5MXe|nsO$o zF9qj(%8Yc?Us+Q7#|b5)Yanb*xvRo2oANVDy9J2n{5Ovs&{J@({^e=Ml8KfPl^Lyq z3)fFy9vH}c_!we%;#Z|e>@Gekv|WpiY-P;lvLn}=@%2?1v=b!^cdCaI3>y6Q1&hyh z*a7JI$Xki^p>pTv|GJ(Q=91FC_D7dQF7I{FpipLP_&aV^NU{cN^HX1HG!r8bnMp+X zabg63W7jiHZFo-QO0M3YWD1Z|H^ z({y|1PQP9X>zKdYW;l5K_Cv^AV;*Y2w(^k-pM);E{dN8+GcTd^s5Ip(eTk#6=kP>a z%^0j+<|JUbhpD%zbw6gfq!)2* zzn2~nnu(A1bd~br3B6Cie*;g{!Az;ciuALRX)nuf{drK@(&naaYQR#|5x;+k*6@+q{}@><|oV`SnlyhKbrzO zcYC|p@(*9#jd)eP0zPF#BO0I+T&p?>S8f-P+gsZ^>rN(V>PitA3PG zjcdvECZUaV$&!g*>dCV)vkR!|0<*UKvME(Cb$b=@Fpa}>i^DN;=<)$iApaE$gK!?y7VUOM#=t;NmUriB=g?eH;d>TRzHvrv z+@+@eA2LDOrn022ZHyWGQ=#fL7A9r7Zc7G{e%Q0AE|?kH)Q_!u%>J)xfKlThek^SY zuH#l>HP7NClIq^Yp>>hLjcV=3FV}xQLl?j5omro#kIE;u~U1b$fV)#N$=1dsN5h-1&-RY1QT7hS0XyDMWuvcJ9eZR z%ebIZgbmIIsjx?>WHo#13iJf1hnmKd+`4Tiox`D&`(??ElWkT=b;dDRU$N@i*(`Gc zslRr*&~)zoO1oQ!=|-JD#saEseK#ImxyQ=+%n+?xH@<#}ypsL z@*(Qr*1<&CqrSLZ*0L#g3aa~knCfVyE419nnhL+#AM);nHX=oLY8^pMEhV)IjwO8l!vgG%`Q!oH(zP608zIa;$>9Woe;ta~6-}ZT724V@#2h^m7wo32xUv|xmxIx$e z!V8CQfif2bL0=NrtU&+Aty`uLm+}TA1`~7%n#SSH<`;%;+ZIT!LN62gwQco;@k*<+ z-Ik9=v6lA97KA(L)WAp!M!BHk;pQf|)Pl#Z0!r;wn@Ud?^f0bCLhL*~5mQ!*tFMN= zsY*<04mM5Vbj=;;;+LTTc?)_6!1S$B# z*xVSUuaMY0R16AOqf!rSyOGWeM$#V{n(UU`aw9Gyw=5IBN3_1g!Q_@f<3nM~J$o57 zIpCHM5Li-N|F%5b;t1Kr)n#=v(CfCevDlOn7Aw2M;l4%s&$4|V#>5X}+1m;-nw{EC zfv{hdu5qi@5?Rk3&J42bh$3;7oPY=9Rukhq=tG|C-r1EkTYb1dy*_M7<{ld4+j?=U zq0Z{ZD!ZK{b(LH4wt-$na{NB@#egn#2B`$guNranWT1o9giBmZv?D83v4M zZ5v*Y8->;Wk;)bl1s?V+ur-ZMP!uT@)ujvbr-ccA;0+rPQ~T8%suTO~u-HU$G&1H> z5aA&}96=`e8HQ+8-DF#tYFgoyxOK~l5__!JG`v=v*al#WG6H%n{`Kbe1_^?Wd(at4 zx($6>-E`}T%#ivXwGyZS&2tkiQzreH{vMZ9we>u^cMD4QG^f=9)#Z(ZbMzMow?DYr zjQIb4G|otM9Og4u%J&LHSaIXL48Sh#3#{{_RiqHZc$1m=%=awy1onC9;_=12Tv*zt z(0#ayfK-bYOWJ?^?E>o`Fg}?8Wj5bQI}%~YXC>Tsd%F(_->t_eE78o6RI68)15CW{ z+u>{6X-P$6Y77H4oMv?DLldjAc*W+yEOUBOp6QGGTNP0sK{6yfplVHHLv85qQT5*= zAD%OortowO&uj@h%8S|9}|Uk;&33x zU9q-hTDxF)`Sy1CVfv5>-(lynvsw);lv(qp|V>@77ZE~Q}DjDB^ zhNGUzBEpGdTLb`mwi#+8BG5e3pzW)Fzbk~Slko4jQAh5C1H%LucUt%TsuSlkwO<~P2`I5t*f8BOoW8ywRaDAGF?y`?dDNzU2ZPf^ zEx1@*r<}Xme{nvT#C_*b62Z;R^eS;>)Ay_~uh6`5h-Ubz&96Aj5#$ zh;%rpjYr!qNbsN-*La9mk@@) zlxn1BVl~qr?Pog1aK0vw*=S}ci*x*$whok*a-4{G^l+NM_LVW;2!!8I%$e5Eo|5Tc zy#2dl1RCV&mPAPZPAk(ye8nh)31{?!Nj{qygl^Ql6A6&=)yw73gM!2A*hYh1l^y8n1*4wyTUPlUSsjNjb#=k8bpjD{xW7qvEr&Y}^H($eL zXq7Mg5#$kC-5mV{)k|sdUgj0zaemFj&V_dA?TVILV(r$*@f|m7j(tXfJ-8- z&cwgl$a^A{zy#B<`!~Ea^`jW?gHQ;*7BOo5A>puJ6L$nnrES!cHfVHC*N#)qwXvSY zu3BD}-w?trDDO=8ID*(5V#vBPU6S%!-yanvP#(kE&1EndD86qD4DevQm$+J1S3}98 zYG2GyEY+`(mg>xu>R2030^8t8tLiCL)mMW*`<`D?2x3VptDZ-U9Iwp-xhv|CzH4+P zYDMh!^d-ut8I`yC>q||hUPkZk+I|A>%cM_W!7Y=qXtm2)q%B8<{qT_T2EjdnS!X{L zs8x(Gb}?j6azCP`dtLl)viP}hwBR^WqDIF*VEWptL>FqKk+SL-xoHCG<%!U0AObX1 ztAZi7vi)R!)9#l;{-&Rr7k>%jg$vPL0hzQQDM< z;%9BnJcb$r%um>v#&(!r7){V`*VW|tuUpNToP zSZ|y=L;&F;s~pF<`LX!S>s!bOI$VgV_aEDe3XJ&BKUl^rz-aJXUDn9Hqx^`3c!q3^ zm{Q@}NvG6&jWq}UY@OPqN6Bbv=Wa`>bq);oP2LS_R<#Ucp+2131+ZOA*f`ZDT3Wnw z`Hq$DPym9Gw4ls%h}{_(pAf5|Y_Z@&b77b{T*YY^i2=WtLUqv^~mEox8 zALR$^A-Dl~_b@heb`ufr#yYITt7Hz*=HrD^u_^irp~}SVUA$R!7qm{|wROfYw{ad) zzJQAxOWdVc`?fB8x%DP2OZ#P2!*q?qMZjImyT{^T{l?JlFAKWNujFOXIDHAZ{}rNf zjie_5E>NTff#J>VH|qp(U2CCS7?R1sPpAKn}B^yLv;|1I*U@ zn}IOgGa{?H0V(#0j86KINJKBG;_Tj-27oLJytr>(z?d%AbR^v^c&F~}eVqD%!`7GR zjbO`$jYoq(g@BlNCaq+MBAc9^7rFVdL(FB;6Y)270Ga_63M@lcUaDJzw_35f0pM+E z_9)i3ch@}vlrA6?R&m{KvEr<4$-(^FA&vpBMzb+WM*uHqC@AQC7KM4qLcmvn!QvIe zA+K+wT}*cyS=gRs7p(slf&TLQBb6knu)|M^x&VsrtLIWA&fn@SRN~r?1upO%$+gFL zE<`A@n>acQ(Mr>>XOA5m9S!dgbY}CFzVF{+ofY}A$+ zBgwW_zYM%A&RUQ%oE_>hZGaPuj}Jm)&U-U9$MyC5jcsw1GC&XApKc9a3bx90bpk}L zE2x#SFYac!&+u&Y@`=F=1gfG5G~kKz4;qhzxuVDgjB``4!xq#xgd8L{U$<WYe%JNeo9CoR5ZF0aTtMaqnI)eYtTT>mDEzL)R;{H z2IB%|FKTrA0^3L;TEfhzjQTg()AqZ5Z%Z%glD59Q&s5SzybH}#@DYGw!zFJjRi>sw z7s=Yu*n(Q<65rU=I)pnLhr9l&*zQ~IBfHsD`06qWym^NJOK7<*_Q2}RX*a{>l1{KC zwZ!{yd`Wd5&rx=)>3Sme+;P*5T8+Sij19wp#X2jPu~$;hHKQZ?JTO8;cxhc}-LZap z<#6duG06zmUR#D}dZot5oBMwwM1dkOH|_rquYFp{{>N8iNPHxmpw558~{ z#OgYU+d4T3*rS;*w>0kn@8f$%;82=UJ)^6I!a;KkoP-nEjCvA5IzSTG@TK@d!zb&w zEOM&@|kC3^c-%UPcLy|J|Dz;WQurvF~kJ;?HHAUfrX!E+e@DxL9sveCDbM z;HV}x(dzT~5x+3*sp08Q13s!ZEE#Z5Vbh9qP>^GXSP%*u2rFfA4ZB?SX?-uI{{8m8 zSLDF}5^5v}2gxJS00us40trQ>4ZjwE$A7&&`Q0Kwb}QB!fcN}?F8xOcKsyNG#-|rB zd|c+t^O}SqVayX?k@#fci08!K|URXM{ngV>HSq!wgm#w^SCNfGtG0$67 zMrbrRnJ@7|l7RLKu#Bg$W+A&o=BDG0CJKn1`4&&>^^k>W{N}S?Y1<3+jzi!kCKHVV zoY2bqG&bWSpZIxo5O_*!f^473>A{HWM^KadT1V)(%V|IH5s_`+rdmGy%@f)5irB`1 zw?~uL^S;ix1}m7g`U-V|Hf1y}`RfpszIfIF#*SA5B!@DzX&(Wx)w{6vUS z@X&L3;%EyiggS)!;hz|@fA%0vU%=f3A3**t7tf+otT|A=6jpEr^lT1(0FiR)t3g7N zK(gU+&*=`C_+Q0=JNmwVhyQ66kd)B}wrg7exEvTljp;zdi*V@R5-tGhlP;tb>$$3e zf|ye6`f*PnK*{}(Cr^!ApZ~25j!HBYvT}B}_E_Z%ga2Z*gj>zV11c^RXoUYr1eH_3 zf3d)xxqq4(Es~$9ytxlJ_W@y0gXrGJ{>h}3a5KjU%J20pT{#N{41u_LvzcZ|4r)HR zZa29SGT2_c^p$J;&txJ91t{z~HxO6mm@x?K6ge%Yi>V~Rl2=2K8vRI}8clVze7kfx zsH$ac?0wZq*5fX`O2TC^ncoKh#NQLZVz_&B3pgio#L_Bo1EM`$!{`2dC|=9P9Kb7N zVkdzXgfi=hn~-SoA;7c`fhnLEAD6FxuEyGudsZL%wpHY2ENWM!{$nbK=`bgd2GR3w zM3M+31Dd5(pXer&r5J+2)6-5#1FGG8;0AI!IM!8mVLjG?-o+@GfS72fBOU|cN!HHQOS=FrnuZplRQ_s^kmj;nhip$wVWA#SiL_1ey9shhYR&9 z%C0pA{Q?o%uF6_lUOkP$NHsDJR7)TQt>*JPaP|AbfTRyRSA^$y5nMtHEnucv4nKI)j(g&%6~$SH2^Lr!?o+osd-U)v_#;r==&F!WSfpjp4Dj z@jknSKl@R;YA2bv1LP&79*Da3#HW}8qD4hTm41@n##plZffPSuKmo7jy9L$MO9tx@ zkJiWgqcfO((aRrZ?O=jeFYEUQcIb}Dxa^_tnt)mhc3#`A9hUFJC>GSYzU&91lcl>1 zrMWDb(}3^?g|;7`VS|@Pz0ZsEl7L@G*@_pYsb@P4;-v#DH`*S8NH>bFJ+KOSH9d%D zo#`r;+WrW-snvV<2@@q)x7=ZFSZi{_HSZ&;Pm97SOUaq}h>IXTVr(}dLtgC`K*&gWix~gHNja)WERWO|aXr>&kPpH` zva<7+{l>OMWK%~`b}UJK8F=#)vyx8?R{uDoqV%WD3nBUQNBscW+4U%qbPGJ!AY%J2 zOgCK0b^aCoz!k`H(nJ#&5UO{#hCjUygIN*{qE#JZ)^rN{am>QiqFMNwMXz9mhHnF0vmJ$hH)Kw(583MykS=*THq^nB= zD0)Ci16=#)h=3;R`n=31t?(A2epsO+GzJ0ZD(z$_cb1`>CGXZYC4B@vnvSv=<@LWa zb%-3%T#Cs|o%G%<6%xWeUHy!4(m(?sXKX>;5Ic_o#87^Fy~%~arE3afYrmaQCH9NB=CSoJ`>PW!Kk#T zVv-CMwv~yrPzZZ}P%sztM1+^YUcrbAw~MobJdKUUhpdw4#69$Ezq2!fzHS(pOMK)2 z^|7~t$u$b;dHK<5J^fK|0Y+F41MU^m#T;4R1LULvpL5iLCx_Rz>2IK!v`By@R^-ln z2nYbFb2r;B0ylLsDE{XR5d7@9&)m!`_&nZorwVf>g1K|rd$9R_`wG+{xWGQAV!1*- z9Gk{Knr^AzJCQlPL`m1AYE`GhgJ(C1cfY)I5P0-w@O1HF#$0k`72-%6=-u>?ZouRJ zXL5ktvenWw$tP0=ML6HzmB8NJH|yFUjhOPpDTG}RAF^v|D$95M61_zfEYtNep*NyQ zBLMkY(sdMs=->#XKxWcTtH0ehT96n$jr@yeD~(<;k?6El0*!2m*quFrKli+Pmk;bK~~?Y z2W63Ikhg2_>X*%MmmUBvTVI)02)eKGVywCbU;BLop6V!fI>vhR)2Ga+M+6Ucie`}r zo*RNj`z_^>y*ofds)d2B+Y=K1;OQoHLt42p{8BAl1pTxUGcuHd^QWFyRz^_IUQ`Ml&!g>t*fzY@fPEg3q(`uZ zTb9L( zC_OU`kwf^PB&zd@f6c8COz^bLan|rRFV}vf3m?0(#e)cZvM+kER9$25`mO z{3kByKc*p6=9!6_h1i3DPtJd|X`6T|$}~8$2vQL@6+vX?80~4R5`T~^en92@JAH7y z0jD-OMt`_}^V6)Mz@&#DNBN_lAS?S}b<&G;>*JbZhtztLtY{Xc{K85(M1L_q|4lQK zqMzT+E_XNA7MRg`&@sPbmT5Ie0c(njIw~wTJvqBPqScOSw;sBn4~8a^2KD6us0Jmv zsPB`%B}RlTgd9Dzz0Rk0+VZR-dKoEOI6859KBnQGwtOGYC9?>l zrKX56XZp(v(zHELp-doaud+4j8BEX-Q`3$1AIrLoE0;f6`^y=ls$Fe$mpZS?>s{n| zgIRN)>Gryk4Lt4Z2x-6)f$F%(Y4J1)*b$>EY2T8QhxZcbqJVs!sHO=J_-WnOyiQ~w z>WzSH}31xp}B(%U=o3Y zQz@eY38IyZ+Q#F2+)KeRWD17>#h*QQBqK!^2lGTQ(5IU9yHPSQ3;#YnA+L%hBM6O) zboe`qIEsV9`XWQtij~LchU7!AQtQ&WUb?ISft^V=chf*9uIB3i%E>tbQ6*r&_%hi4 zPe5;erD(ciQxQfB;*@C_rmn&ZA(goPnC-CWfQ;ZHO`m70p6gABC#Te@Q%Oe>(guVg z`?1ltb#I4draSOwk%Smr+pZm{(bsf64&pZOEP$0P*s3V{Lo69qvdyHQ7MG%^*L)Ia zAj=}9Nl?YrsVmSg1{*~I)imJi&9$2+RF>i!*bezL{t{v7p2-0{Jf0UFNQeqzR`*A>4DUpK&tJo}u!{h`&(= z1HmYb^X=0>pvRPUXALEke}N|+na=xi|JP+E!$v&@60ZHKq>AXbMpVP+r2O4zr`oTF zvw_Jx?-T}gc}|BhQ39o)1hp2+r|tJJHH??mbZ1VEzuxM9Z)IIhdL$0+W_^Vs$`)Wj zpsVgW5;(>2{ZIjSA^+ERgUE7Dqu!K}ktyjZ#Ur7%_CY+AG;F=}(J&8yl>WCT^ zFvBancyXa6T~`E^?OvNG;8H4wARjUXe3Ij~5!ZN_@pf@PG5RvyTjb)V$1`deh8bN2 zxk?H*gEelB_yd(QtrU5BtoP3cV4PdI@{FP0c4Byf%q$*CE79lxoD-^+G}P5my2>n) zyp3gBp(s+f&%kkDL^hx7;!mjGRR;Ffj+&}G2NH}k43}w$YwPXe(P=}QWx_pZ8VGz) zQ#0Kf$*>K3y`r?kSke|>rT#?(kEPVBIXy(dD3{n7< zjDm&vwpTkW{)?{x0!5pY@LAhPkHBi{J~sP{b%LrZWkv>Bk5nmxgJ?d{dZ zsg=xJUXtgW+l|)tyEBxwrZZ1Zm7Tkdru^9GH>)b%jxNpK1n{P@4!=VEyi<5qEs)rJ})hAnMc=kL)HoCSLc&~zXR3K z!P(prX!LD;m1XYtf}3~_4L5sdHN1|FIn0vz{&a}FiM1Fbv@L9O529Z_LqhrcXRI^) z1skRF$yX4ov+>0|ApK{MQcy2(y5lftCEv}sQ7(-BSp2bjcEGfR0%u=)vsPWgqi>Y#t_S5b+eKfBJj zL2J0!SeuF9x?xYJzKEI1zsvo@xRq zKHo#cQzHU)U(1GgN}1FWWjdo2Yk?KzZ(@lw0T*uNmmkwzJ?zM7QNp1a3{;2xgC8(s z4#7g%qy>pNG8sb}K$tN=;98L*SXTcv%tZwTuAUmPr+zRen2_NNw!CeLwB_@CT zcd3x|70#A?8G|>Uf9C=zRYuXLhro2>Tq2RopiW6Lok3j3G~l^S{bszITz^Ro*eB1A zV4Qoh$O@Ju$_As6pscoppFP|j@qpUShvxa1*BKTyz}AF~$hN7X-+kGHZ^Z7nm)J3U zwf-!E^^G7iq_imDGM&!XZUN4Z3|Aom60);t}jxMPbYP_P;~1-d$;2A3n7%L-eA zIOC>GV9jrw%x!Ja2P=KZ)uH5NAF)T*^4dmViR*8FW2DzkI3)?B@p`xn36j`ZZuL1_ z9?W3F;}Fe3OuC40AwJt^S@7u&#f@L3&G0njJ53_9*)Iifba>4_9FT9140veQwbh3H zZ6Aq49@+(VYY9mQ*cgt!+T(IvD|+G$zGKya|CAIbYsIw1T#r%NR>0{=b}^w*AE<)s zN4tw*Yer@1hsa}HT`MjSNV-v!K7rcH0GHTb(ALoj)xoa_0y4sv zme-*xUuHg-Z4gi`>rdZu54Z&xNy11CzB2@2M8&j{fio1zx1^R2m&I{}5J&1?i&ic1$v@Uz8Lh0c5 zk?Y>IN6W0|_YRACNhmEghns(3hTRmUa5ul71*H3ZQrdKEP( z0N-mxVrx5RodbqZa9TUdzs@5KR3@j1hDT8U%&ETDUneal6Cu4?3J&C@yy$kDZrJ-Y zYY?p7&cXQUy?nky^0}(k({=oU^iv2*cb`0;fT35PsJS~Li47OZWHg8}*FP}wEofV}t1y#>Xf5*7fWDaTzBfk}S!=Z%`x6|tBv$GPW zA@X^NAxMn`>G`lI1=Wt=_nt5*gRf%z2AKg{V7sIhGeI+mGm-&x`>!sApkTAya^_HZ z7NS947z1$;Qu5^h>B#PjYdK;s=(2K@RP58*;?G-#+|!ksj(JJAt<=Y=-@Z@vj~>YH zx}WkE_P9JGY0U71LkJekXbQf&)Xnk=e8%;$4}%EmFxWPDCfXM5IR|H#K_T*Ox4t%! z;P3p8Z@sw>F{!sG$}uf889&an+J2i=Gm8{N2gn)MOQ}dkdgW73VIa0UZ}|WAxzerm z*lup8MqdBpMSFt1Yt>&sM{IL|b2_Kn7FWUM@jRGht^s%GDpyP7-rcqJ?9<0ZZKf$H zPmU|G3syb_-NF9(YIyw3%8A`8fd_beI}aSNOaH8WvP)Ru`MonUjdi_?d^U!k$anj< zB1^J*YF7W7(4r(mHlW}%iTlE*j$7@ve{%Qnd*0f))Ai%?($36CESBH3b4$j>LtR~e zJZC)?D65k&S-7|5N7(I{b*9e_Z$A0?)Mn2^b>C~08m1?UcuWx!T$XeNM9RH5r{nIDa zndxDV1MDG9XWnK|`(>k_)KYN!1u_TZz$>i4X!rB{pF4?MKUY6wy?g;W9jA~C?4cU5 zt$GLLgDa0~I!sS^q-`n+;xfv&c1y2`JYot-n!sb4Y=P>t@_kK^PXla}Qg?`W0=&#D zXP+L|?3Qo91thaU-I8PJ3m?LdB})OFW)-G#wIA78i`5+@UI35nNc-G>^d|5I$Y{`n zkK1`CVNi<%jDFMuE!-lF>^v6*#YSM$zB#?p^VL&7dvV2+Tp( zBEo_`L(G7Xz=O7cH|ER&9{c+2ZhP09iZ!5fHW3~ uHF10)132}Qy~TTVCp?Xyxi_Twz<H+7r6n@>s00f?{elF{r5}E+J4t~x6 literal 0 HcmV?d00001 diff --git a/docs/core_docs/static/img/with_structured_output.png b/docs/core_docs/static/img/with_structured_output.png new file mode 100644 index 0000000000000000000000000000000000000000..bf14853dc06348af195ee03d75e731143529d92c GIT binary patch literal 87132 zcmagGbyQXD);_F=(jwg<-QC^Y-AFe`cPZW7NP~2DN=piCy1TpkTd3#zo%ebEIAiPq z4E9=U&z{$edk4$Oh{8c*Lce5nl2mB`sjHn0r_1a!RRNz(F z2=?x)SNN~Qh4__RbPm!X9FQl`dU+8A#Km^FF9^WF3$u|C3crLX@xRS`iY8D{&dvFv zR0uY%#1|4yAdH+#_Qe5G+U@w`2YkN%=LO&Np^>^-L#Jx-Huv`sPTTSKUdL8?ZLQb& zA>rZSZ^OaGeE#=~O9~&wpBx@(>x768?0>$XUqdT$LH^InfD3d!^63pwCb2_7MZNl; zFMslHuMzS8_pAIvCfz2=P@3QW=1a_{6M6K1e=<~*k1C{G(iya|jKDwHZ2-_@5tQCxWrI}Su+(Yc|3~8Z$w--W z@k6XhgM`0X6PpDe`Nj=%yFRoblCzLxZ){hsV12}1{p*)pKb}dQ)1gKID^H%yVw0ox z(t}o$L*by~PdG_cR_gFD#VSK_P_gRhYL#DT*m_80v@MBLlFu86_iRQpCF+b!HcJbI zydu4>M~&ORs!e1I>@RmGAWTNmZ0AUH+dR-2jl;;-VKJHdch#BJWP8IfP`uO9cV*H! zR=0*zi$JM(iFc{P9tY@q82G0-7v6Gt(`XXpXs}C9(N6t9VytP`FHkm6;#|Q%V z87#2X6K~e{99KgkZzeMN-z2X0yN@PNYo?VAhS>t5fHWa|>Stf7x6%DFy=ngY;(@S| zV)MzOvH*FVjFBngZINDEM=z~#!tK5wUXdvyhxQF@4!37p6YY^+`n#r+ttP+7;%hwB za>bl-+S{cn^GS+=<57{vXxlVTo&Hlll0W)f1>fMc zPe|Q8%g}dc#oOrhU!ePJRA!Bgfzvk$Cs5rRT{{SeLg=&yA`cuM!tC2=>P9emLdWT& z8Yz3Q4UAlPW$&Q)V<%kiv~QNpPH=O#AcLjqFh87-l?q2V2)#9$_WdB7izq8(B9%H= zWAxU*m#<$dy`Q}zGfbSX7H3^$!W`MxEvLLNJktR+qIo}FCX(OB$4wcbnm8}94y~-4 zw=bwCLP|t<<*tr2rG*<@IPUFvc1=$iU0iPNoe$!Sj4W+0V)T_9V~sr6ZrlHwWfK{V zOimlV`?hj$c?8^YOSn%Xm+4a+X|&nII8tpq+*zU51siVAyIolP$Z*$aImI^8R(^G~ zsCg@&xD*FDg-9GlD~szFPvLKD)3SQjXCLsY8sWF_d}I*;-;JFYDER9m9VtEwTrtlb zkB`^OEC`s~sjPhv)EYDRn$=cV9O1Z!h(=4;>?hOp%V1t!d__u;&uUMH+kvIc&N)Vr zxHNs_w;)9|>gS&hbTT(TzM76TFr}!?Y zl>Jf{sO*>{gc=&}ge$`UCA2U{l!#h0G@prj*&b1Fr-iNWJ=~15**U%3l51QlInxl5 zN!QsIrrR+H(<|mz8hzoqgxxnudq1p`h;hFOT)oB~H{DbXX}Ee|u07t{@5;guq5hO~ z*|xWLE-+=i;mMPgp__26dAqb`-ad4v#d5~|WAYmBb|KnU%kv1*8vY@U8zRwaY*I{#B{W4)& z6h|2-tXMF2DU7k*k5IES_NG2NJIx!ORsp=Y{cg_|i_>Dh$(dH;>iBqXpW@4#HhEX99r9c)=I9n8{~S*R+JD-X~FRI8S)9BdAbJQMC`me(m7Dj|2uev>+3+k1uj zT8i|`t}uOZpU50ZjkF!Nlor~5G7jGW$qFBzEl~-Z=aR2!)+?DEF5@sgQL-vh+@8Vd zIM0x{$<)*5FQqA--Xu8GLUvChF|!h zKZ#x@_v~7hLT5tu4*{g$WKySOf77ZDEn)Dwfql#|JiqR#XN}6D&NQVPP5Fk+6%TzC_Y|lpoZ(VV|thp9T|M zh<3X~w#9wGxrF($i;LQ>znvMhR&u)&9NcdErm=G~0(_Qj>~ zm%xdRn{$Wkqo28#)~gAjqWIR3f+AN?GI**KTKi3GF6SyQr-$2&uUzJsNv`IwZ}kxb zsGiu}4`ehkWapRC+m323r?{5R$WNDF@4WsKalxR5G#U}RwnmgiprD`?_73YTCf<>0 zvH22KbN&e{^)J~Q)oeuYZ}u*O6cTzxD)R~+x5519{y&jQls;6LZVNh60+swUEwi)9 zdrZF%g_6X(jb?wY zNi6BuYAEgJqX85nDe=R4)BX>BN=4v?{h#Kzu!DX}sKWycIe*Ax0O7C1I*5HwPCnOI z&MF|}qe`cTP|Q|0A9TI@=fnBHY{3a~vgsU-w5S#<_+1su&qfZW;q``7=9139K?R``XU7F8>Yo+!h;#G*;pV>q*5?eqV{%0$TZugxtJ^IsKc$kL zC%yyz-$GnL0qAV`k{tWj#5xSX+~kY4v20ep=1T_${tJ8IzXq>yEU1%85BUX+;-l4w zNJr2d^Ba1(gaCBOMsGI!<>^x(-|}`tM~c;aocx}0 z*@H{XT92<(vJ`r<;2&y|!~qQ@GqI!ob#bL)MG^q5mSVLlJyD z?~OzPI2afg^P9i+8w3H^@2O8O&0pF+;DIR@tMtrPn;>9ghfNyv;xkDm+`xA6{97L) zP#dG8ab6j*;;G5bqT8bD7Z;THH`%3t{f1{we$$@F=E5)uZM2=U)g+XkUO4vI`?L9^Nx z_s_d%FF_{g@5lJmLyE{I67@BXt zEz=*MMRykJ?vH6jJwRzSfv0iAKTQ8b`6nknksru@wKT4YH*XEoy9}Y@orYMy5C8;m z0CyQ3^Qy}Ju}^8R)zyx}`K7dI`{;i_e7+EpU#Fq86;Ag~eun)%o)GVIO;NSDUL<<+ zQoOWPAzl2DHohy8%gu+GT9}}4>81URz5fHcg67fo^ShBo5VhIGz$DvODFVTtp!p)#^JC&AYzAk-e2D* zIXnFJMg?4eGjf~JM*Tnc&>}*@!;6}lk_pi0fD;h3urU_=+si(H%{h1#tCR|;siFV) zqh}%l;5osN8Hz8oy@DdL0h3Ii;?LmmMAkD8%?%9-7GHwd`nRAUpwe3H-YPG<%br^P z%H%f%;1Il3gYW5jpFyV|)Ido=0@dl+L0ZJW-Bn=a_IjzS zky2HzUIu6A;m+dA$KTE)CIkc~L$-uWP(II6o$78Q*07F)=3mh=2H#g9 z&nGjJXopSv}d{3*5h7zjHs6@OX^He&x@3-ZmC%`}zAmrTfQe_!@HXUEV1 z`WAON-}zJD_}BpT>gN2t;PF3fSOM4fXjb3UbTOxCC8IsYrv3_;akbD?W?b;;*RpipVlkR_KV{Hd>(iC^?_9q{c>Odxs# zwmM(;k%k;cmOWAxEgc)BS~G-&sV=b`e`))A2iJx^7N13L*)LV2>Z|+gQN9dwK+5HoA@I_ zI((Hw{&FOu;>g2FAD+CP+IE>*&!Bz#@9S+(=H%0YxdIhgM?a!gx==_W#Pw?+`!jA` z@7=l>%iuT;H-54E&?F; z6$2pav(m+OFIuog?|X2FGF+z}wM7BiPePYZV(WM-%G>RnCvV+48 z^*re{Y-<|fgg?@sTJle7_NLdz?O_v=r>+NF){g53Y#s z0L*@@%1CnaH2b5VT%Hp7_&@Cy2%1EIco3ml(#;};(}d>Zxoz*g;v@?~3@L)jvBLPb z95A%&n2Y)Em0K&<*K`~kYN^o(L_e`q2V5uHdf2AR!ym20=UIL1%#*(_=?!#g?3WZt z?JLIdFLcqfp9$`aYvf|b{Hx;cFUgDU|5x8b|imwS|)>;Lri)_hN`B_)XWSjZ%bo4fzWpFr(1QK2!cZM5p+{Jc%m+V_( z1t}f5fkF}k8Z{sdS39P^u9ivH!}wppIbjpabpu)*p1*rdt`Y|-6o9SS)d zjEQ&a3Lq3Jo*FIj&3tC@g6Zt;VjXoqT4gB|%O&19c+xXUA?}(HtZIC8q#M|D_k8S@$oySo!+ms!#c1DRr1I zDQd!!;a;}*d&nhv>kDz|_Vo6n9+$UT{OlD?!V3nnWxdnjShA&)(^KSM@*opYi=&ER zjhTboOExR62ly|vSRu<%ivz!*aJoPv0A0lBJi5(~71#-XLweFcq{;6<^WQM-T3i#kTKU8Kh)^3U9>y8vP->e|#H+h;s~{ zQDG59AK3Q5+_Xz@>i>~AZMG$zUSm+FoU0WHeK=M&gq1H{d4KjAyUhvqSvaj@Y@hTq zEeYoAn`#0kL2D4D7Sia(XDzG+o$k0aZgxsi!Vxm&N@zn^;b1D^Q;9K-evG)qie&|R ztLeN)HkU(Q?4~O`qCBJE65&&&1cIpPpqxAN`lIFEY{9`v*F6+Be)f6`8B3p%TJt3d z(<>#xjgcrMQETkJQBjrkP;q`y&=RKcQXdyp9&x1ky8+d(Gw2I zxnc=nV>B4EMD!@Kz-Kf5Y6ctdgr1y#CA&7^m68lR0ZNq%>H>u*RvVh*fH$h}#gw?3 z7Ic8Y4u5>zD)+JQfhaGdhok%`(3|E!0Z&pWK8V}x{%}L&LK~0NXIHV^Oc=tsUeJ2{ zdG-z>)w(pXHSokjG=5t+H);ArlIgaP`-rQ-!WkN&EI@T1#+%nWb-%4e#@UtKg4g}{ zI;p`RVQ!=6L7Dz3?<3T=wV~4Uo6lV)Dkh^#nX_QkpA-%e$dYT5hK*Y3F1>o*-|UQz z9^8?*vz2J*GEtqyC@ve|_I!G1XFOeeJ%4VnOGYpmg!yOYKz5gG#nYDvDs5CVrvBsz zXcU`X?@~vt?FU+25<1mx29&UD2VM+~|L%N9rKR0ElOs1R8soA}Eb7(SdbBRR<)BOb zLYlgFO_i?31hgUnlTNGqA_Z|?D^#1eV+!J};V6+?*=1FkgkcQAzS@qy96K ztSAG4yCp=ESKZ(Bax(Q+#9X9@0CjTKUjQ)Dk z_59uTm)wJD6XGgV#ca`we3bl+lprd^kAw&+vL>{?5cKe$SLP5F+VG4f9mQ&wOtnhd zChY+`V1$p$%`a}ONDg0_;5cw}AfN8l;}90*No)6nqNQ~mJlgd5==uS}6N@#6(5lQt z49SmoNll+Pg#*z-$b}-U@YTptlTT}6S7|Twpav|APP)1C)9Jj)r==%im1&`lI7S*1 z*^$gNw9;KdutO-ta^%ubE&kckz3QLnr4lSE0U9nRCXQn2+gq&bxPxz#?`!dvdLEPR z8=iMwL8N7EIyN#VzGN3Ap6oqf6?jy5Q&L1a?^As2*ED)`f z@9z^-M#Y8Vf^57BAL6!cM3x2G;Rs7cL`uPw>C05K%mrKNP|%}ry<2Ogc4^6vTGZ&QAuY=`_0w7RoXkTziN~Q-#hV)rY}~=?y><*Zsv2zAlJC zT+Z*Ko1F=nnoqM6Pt!sc|2wPv`CAJCNbn(1ePxpA8LA1rlU-v4oAyk$tv#i-fVHliEpt0#~^bP=`Q@8{_=Jdf8L&PaNB-55JocFHd8-S7G8mYhIC+i{9z$FWR_V1y*oyblCHs=wneAKR~o66j+i-r$#YIaD@Nvy!<@H;2T7Pg{aMbn zvt8HVs7-he`)QYrE2Vc(y;@Rq6ghgx3q)q^il0r@@WPqAoLXsTsZ8x`lp}rfqlaV zkSUT9TzDgpoom#`1g-9Yl!LC*nlacW?Y(D;l2zGyNCD0jYkwrq2C$3Fn|2eR^G;fkU*6zv7zvzO7K)VZjvLAh zKRNl$IJGqwP7fnm9n9{rJ?Af@!@#{-mb_Gcg%S0k#Od9iZ-~jU+qqBP7$0lzRE)?^ zpB?j!uk1?GUR@SG@U|!ynYAoV0&zG*JcP%c#jMYAXs%hVm8ov(Q;Sk&in5|r3U_e6 z#yFdd%Xnnt<^hX??sf3p#TV6ld@4=b%}P?h25c^m^IF-Q_rXS%n}Q^r%gCAQ&jL4u z4KJvRnXk$gFl-Xdw@@{q6|GnG4gO-w&#^!&h1Iu`{hxJ=OGL-iU?PG_&N~5t15sfY zYvTxyV?CxKk9o1T2pO1)h{IP(9*C>~gHj)TrKzgBu$|^-Vy)UIBHrpriOy_1KkWx-k z)h#{Wpy4h3wF#xSchu#cg!dGyNH2F>ol_GqG9{unu$XS*{vYB40pguaE2;lQd{{?G zJSDy5krvQm)tf9*#!U@;!%33a`Z0r&3__(o=@?m+N?+buM!Qz}l*+&IrgnGF;14JH zfc2cjB3s@G>$ci!i&q*~p0Q{29we@h|9aubbLrA-!|(}67ZokLT$HL8wcqmpMDC#a zW(~c8*;!@+soe>%ru8@$=FC|j(DhlVTvZg|8FjNlD~ga+!u_SyrPdcB{Bn|FnZ1!5EkkRZ*4If}K-y2=5OV5c{i5;6HQ2$s@c7OkRP$c=DWq~tk74kx%1XT zo{`2gm9~6>nv8_`eY+cYX`*v2rj=%@ZoHB`>b=Xl*n``F^QG-Gh4xsH;kt|ar+m7+ z^7D-dMO{|(uOW>C6xu_m`64!{0k;rdHFl&npJ}+_^B}8o2g5`wBR^jrIr`~xf*~kU z1IkOsG1UC4ypwl8pI|9-i2a`tw}TjXg9gF1<;Of_lfiPl;2u|3@pTua@IG7Gb*Oi~ zKL3sEo+ARP(5H$doeK5I2TlOKP8f4}B8O7yN${3|UQr}8MrnEge6nOL1_1FvuFpm{ zAEimvG?8d(7Zn)^YhQRIFWe;CbnD9=Z1uHQWH6_3VdOqaQLsGoI(qYdNOEV~lZTaQ zEpqa>k;62xqJN=iWu?o}yhv1&jy_OtC+XP4QG78=M;cS<)(iz=sr&wn?_WFz>^=bV z{bE(Zl1~eXgz%&NtA9K5|5dacyfm?(<+M&Y(RFg!mZ<4aL4V00V!nV z>XGbn_ZU-JVE2F+)Q9)L1HLb7Bab)g>dG~+No7?n=c&P}+6k(lUOxrE+@!6@)C2k@ zOzRFRACNu@w}>{=xFn!Ho1 zowdJxHCJ|Ec9}-0w>SSrlHN&Ei<%N4&dZDe0ssuKf*=@0?SWe1ZIh*{U2pn5XiOSU zEc!@&kVPn#tmx^G|70J43A^}UnX~|0w()+zw}L5=rlnou%5)yTbT(`$Z0p|S)H;d2~4y~5&9Nd7lGqSA_WS;@p5mOdP8uGfQd)gvDf4#9^S@{veKVndQ;hLQdt_I?R8a%OVCJVoA(Y6IC6|Q*= zZ%P*58V_SGG&oAylACM|xvb|Udi>-rzY{>!=^6g{|6*ZB4p2C`BA2iJ5FUzVvP2xy z2MM{{JWZOhAV?zmUXEW=TUy8M_jz)-Llw`dB<#PGvD$+TL6lbQER@TAOM|NcS`SS> z&{H+IuKdUezYbfly*9Fst1{mk+-IsH$+46w9hT7zqE`fA1f z;v|OO9PcpSn5gW-b*|nMAHE&h^71`E7l=I=C$TN$1?ep4O!{! z{mK$-aA%eIF&syZ1dURqdqYh?07fd4Cg(|}-NXb22Bvk}y{|*?3+U)sE@Gm+x3JNe zJ-*o0rc5(#qT&g*894x!Hbl#HrdA`2?>Z`9MA1P76p2FwyN$L&KJ2cmU%H?ez~2!t zxjW^`rm`%_XtlZ`p>Q}$Z=tzq8`(8&sOrCZ|$8Jg`)OUjq+Y^IPh?@sNY;% z5n*l1=r;LTu-Z`U15;mt=oZ)YR+dBel#C3#0c3JS8-xc?o@B#QH*Kh3vY>z*ohl9G zv$cru>P@4)%BT_ady%Y##`V_sPDC$L0~^E{_iNZ}JnjlkBe!;@{zMWOhP%E;fyLA| zTN-=7VHpxl1Vowv^VFWY5WT6*p$UvmT|jJ%p5r%X4bI$c_G;*|C2})RIFudpia%@> zGU)^*Yi^Gn6hs90z44SLvju?kB!wKXQWa7hXfvb*5nm`=m`>(kf$YXUh7y@h9C zIH#}NS95KwM~g~(DW#l^KX;(mV_?oZ|0VtXlm3n?@DU(#YQOL_#EXVUXf! zzYt<)Q-!Qb=N6-O{y>^o#ugCn-|Y2J7| zySIiy=R@(QDpN&b=>yCAUUr>+FRAHsfN=yqb3DmWl0UDEjlB7$sdP3I(eT`U4;J5e zStCQRya}xbWUzp(aa)6os=L8O69{GA%a(Nt2$ajR z)&Y>p5mF=UG#3Q$E+_O4h(;N4{@Hcf>%d^9~0}xA_~Y zUF7+AGk=-UmSEZEl%Tvo)tQ>p6gpzY(KL1>$Ni~VTY^fz!mdDg&D-;%=41W-sE;Be zCL!jGj(bX}N?zU%rw~AN&9@T^3o3kmx`)^Gyo{gJ(Q34Ni^XComtoMY6v9!YMFmA< z_A>nNz7K;|oQwCEY-Yt&D%5weg^>Lw^w>6a`?JsaID&-P938hS6^^{OdyE9e<{Q_x zy??FQ*YBjsSkT)e;g?9HcMhV8=oPpM;Xen@GkKzn(GmN`9OwtV8_iDYO61ml%sBb2N^eOudRo z#lj1Dt@nfF-bXM3g-0}7#OY%ZeCGgcvM74w!WFMQdxV%5s13m2C_*w$A-)f>an?Zd z7C+n@Gxro;?pbC(L!}U4SBNVXQ;5EjK12*9iG!OVA5LUxWnp014KSM2r3a`VvU_sk z$fHY?5Yg<~B6;7U7EYY|Jj~LMZVqKM3!8}@Gs+fQeZNCE}0(>Q8?Lx8g{4BiYOEc2|*2L~}qEK;6Q(>Zdy45wVb*io2Wwiwy< zjxIL&NXo08LPkbWl5w5|`Vp*%Z)D%N?mt=eTZri>+68Q08Z{mb$kr>~?h@u3enHc5 zdA&Sy%Z%+$u6pBn?3T)6b+Vn|T@*naKxY&=5P`$_D_4A>9Ft7yc4;zAetccmb16JRB)ERfE6?mSrZcYcQJ)eB(aZQNWpqZTZaP^b zWLQbdtf>67ut;qC+kwAAP~hrK1QCO1Uz>C)dGs0bB8t&~OCjWg2Lxffg+Z-Z6VRjB z#X$n5m^Pz3^uR#Z$0+}rH0aMbvR;$4YuHAAejMw?NPD3hZJEJcS zp69cLFG9TyAqk0yxZu{H2tA3lbAP9^423_rf%__)-d_4UIJz2b&Ympl`>U+W3O$w) z`6PxqT}xpwq7^LgnogxQCUts>3)@AD+q}EU!1~VwchD+}3!#M9CHpZF8=#^!PlTOq zo!qZBB^+lq9}KZG84f9J0#&!QYZ%}vgaxk)q4ti1v1$NxiOlY} zr$mp{6g~*awL6{Xk5hY(q|Gq7R-GM>!Jz4%Me-3sWnUFFdH- zwW!J6a5tBghRuY(-Y(gHb=9wqc39QALMfSrCj#tOF$e@(Nxig|z=N;}W)bP6lc1rY zJAtqe#bTv_l7OJMH+9>=pTbNj0W5(c5=*Fso7s@Q$7b_M=X5Rt zGE$SDcq~a_B&BK@tT_Kpf01grHPT_N?Yc-Bhoi*dc#WB2kggl2^AUC~O>B>j$EmPi zKtNIC=JeFE=XLsGA&~8YJ#Np)+xvBD%~76ixAD@rToExCjfGCvdQQhBY024b4rg0j z&p+Q?nB_{uLj!fWNRz|v2Smi|6AJC78esoU%?>p`S0w(RI3pvYlL{aI6$XPLUzUKs zT)q^>#z6ew2ZloDykSAA%SpSfAt-UyoBi_kwV`B2my?y(At;pj2fAGWaM2{;`?|8i z!ouVxpO)J^XWG4akCxlZ!{nQttdt5B-Xb6%sI_@C?&YLKa=D(V-L|-#>I;dAQjWfb zfXJ~mxbOMcGYxEi<)yb=5lLsKeQZrWy#qy>^>SPq94)nCDQL<(iG^2Tv)cjF{%;C5 zz-p58tPLg<5vp(2n|GhCoR_oIhf~n!Yc2E}f4HW9_`FSW6w2+ka^v5iI}qS~9&Sgd zVo`sz@MAQUmD31!`etU2%2K+Ic53Kbz>yTNOr0pUIpuWvI5 z(lCDX8SJfGj-`4Wm0ZU!9Xt+COm`scyGP=5ot|$;b1Ra>QEMO%TTBUw$$dc73}g4W zW2y@XT;Cf`Z9u>X-mkYN-hY z9?P^P{B&Cf;^Q$*cgJ&oM05pKdJLkbHF~IH)LT8?IVFo{>4~%M9&Z;sK}jXb1Rqf< zS7h4tj=uv(*VgzlAN=ZUpZ8fU{QT!o@}9xz{#R6L>=8nXQbg1(P-N(IxxsccA)CV< z^80Tbc`dkI(5Ol}q9~Gae7%{O;N@LAuf(%@21s0}LB^Y-XyI&U??dPpnEvBf17edOV zQ(53>+aIcwuW

P1N<0&zJ|GECJ7sbazCZ)qIhx_t{U%k-66o*G?}t~}=?`P@cz5}JbSaKp5o9q4tlrL68ClJx01o`# ze2RnGV5!9w3D5h1UZq5BHLA440$6u~h6j8XSo;vOd|ZW4BJb(!5AIy<$UTBooWrVM zxLCbCk57HpKpM+-tGA{x(P8y!AwXuUcIJJ?_U5`hmEP_2r*DUbKP#>dahwqJDyL(E@Z7_N zZc@gpNxxBQ&!wBKFR~OuDpFfab_S8w`u92R2cQimiZ+sxKMtVKMrdpf6duze=dXEU z&qM=zLBWbcS%NG)RNlodgS zXR6)SHBL)%h0v?35q?}|Wy=;9q(mr?nj;hB%%iGZ25g^53R!-74|GGfK5vlyj0HS_ zJh&=~j6hOIt|tWLv1k#%j zLDra)%w0{jX)A=5i0IuN&kK#e*@V*@2%9b-ogy~~p_>N^`5+hL?u}OQr5sZtpt_2V z?#L3<$w)xLB>kk6gczOklQiS|8&w*0M9G5(x?aW3xCOFA$PcH}$)00*FdJKy+(^;- zv#5QnaJjMo;;TGb_MSXiLd;SWRC7Flkp3pw&;zh zMJT8NTI*jCiH*dSw@jr>ve(C|O~&l{KQI%gP_}!a^JoPxjiu-$*)av=YQ~;_ulKc@ z&lb!PQIVDGmV4xxVx7be=k#9TQ=(Mtjo*iAIuh3_TCOou%l+xHGBn12-17Ab4vWN; zT7-iXMNh)~q;;V@qbe6DH8P*7N3$n(#s=1Ps5L3tAL}8Pk@Le2xaNU*EwBsf>_>ij zJA3jYD8lo511-Ts1l|5U2O>4|<{3xgVCzEaCaD{U&D+}2ipNq0qIgGs#wCzDGpsAej zm-Y}XL?j`fUPys2I5v`41s$wnuRMlXJtHGsP)$b+^a!B;^Yg zhy}~PZcs@9$7SH%uaA_I7F*rX>}06Mgx%Q2>x#d|X}al2T*+YN5fNnPu1#bZcrv9M z$poiNWT=YMs@Isx6H{lVxzZnUoy}l%#$lAp%9behdhD0hSC~zZ6qm+=JQ0&gcDXL4 zh7jMTk~t0B<=$Ja=;kpgpM7y}{v;L&bVA#EkdZl30uilLKU3L?^yE zZPf`b2F+fI!(-no5%!r?U*eX^MWJ2zDdEB#USGdrzaOF4bz|;ocfv{mR^qNyMKDmK z7KJ|FZMJG)*ihVzBs+Mi<)f6i#t&8IWk)KWehs2h4Y5!7M5SpSIHc!5p^b^8ntvyl4!+007JeK7`EG#W3tg97G46D z)JyI_6uP+f2PLAn512gIsaHp}PY=*=!lxd$pLcuV9*U8wgkfX*=?*zAJL5j@O&6hz z=(dLJ4ki>DXpS#haewZh;MhNGa-Maxex0R#b}}rV{_d+YsMZ$iO3!r2wvR6NCQldVY6tARu<5}xuMTtQHh+HYtORNdE|CsPJ@czc-gpOZ zczuHt{Hu>dMg0VZyJ+%U+SVSrxy6ehudb2jFv05)N^Kbt-5=?q*YVz==u`Lx>Lo!l zd0aFJG^EgZ!@k4y?~AQAZqr_4ROa8T;~@tfHrS7sB@wT0LiLAzdUg;yP87KvaiqQS zjhQ>X%5k4jTSIDo3hp2lNSq?#keDLc=SKAuS8tvI9BN-K=zQkgK14p2F->g?WaRz?R~=J=(5e?noHuu`)@h1{Q;%yA+36GHG0vg84hu?i^8V z(t~C;TU#_HitO3U{=^%e=ZAB(hR<1&&o&DUS)kh#c6*Yi@!WuDWXC(Kn}~OkBb{-r z%%Pb?YnhO#ZoO9olZLQB7k>1o&VYJ+lc_#|7aT6vjJZ@HQXU$PFc*TJgvDJyANw=n zrLk+c`kb3!62tzb*%gP84bi}mK7mFg7OM<`GquxNHn-K{3&Vqd4}oD4l~F9&H< zB|(0Ub-z*~jNC8|1v$iUSAqtk>ta%*%)QN|X{Cb67>jjz#pW17trn^wY$DP1CXaW% zycNqS{+Wv=Y)F+{olkOE1<62QsbPNApBd|RRfDJw?2-uei{~z+@0iAU>{Cy|vy&4;7`Bq6 zXGOajF6-Xk80593^a$NN7onSrl zsTyRk>>XLi`zQpgbOri60zGTQcpuJAP#y%U{Bb{8QM=ymxjz5uwbmW2!0_&l#SOVW zC#v>ypF~9^+H5Y~Nmq4f#hvj&q$a~11V#2(XXi1wK-5fpVDxu;mpJ8V@2&vy+OlpE z-6uF*-C%e4MEc%Mi*P&sw{Vj!L)%t9#2i7Cb4_W zqOk4f-PCbE@!dV){HLhy?+M`krj<=Lo5z0F}Yj z!7LK*<3%iof6n(J;Eq|LN8$uJl#J|hpTx%Jh%#%R3?=e z^$jJ9Ww4Zn&*Mp@_~EfyD`i{<(B;XNl8kX7P9ou@KF`}oJdzx098N05>=WoaKCHo{ zH90^r-d!nYS6}3#zrj0o7xb`P9~g?CJK=;+}nk!dfuz9EWbX za&bU#$|KOTQ^4CF=5UO&t$iAKIuUG9%KCz+eHBLXW%)au(Pgst`a8^v!(wQbQ-pxE z>Wn!aD3p96%+lQsUlr?vG-eoe=H;%;hodX zM3q6Cn+5xjU);943ebwixUdl~y(4BY0QfJ;40mnK+am1S7>gQ&UHoSkz!TTgzA^F2 zaiX+q$LkKu(esjlCtNUt2^qMKD->!**TFA}9t*f>cXzQ@K*WYhtt!n3u?H z&drLy|9pRSD7o|b;LrJ*53*qSkcu&=r2_uYdvG^EZzo4M6czYa6-_W#a9-;A_BHJ*Ad0*Ww&`J&FBDp{$nsvq6^2ptM`d z06J!yj#tGyn1dd7V%Vp$*+x_(foHc>pr^TV~BiJ3pGzkUOaCA|W-WQH3V(iqceGS(;IS05&heRYz?p`$gtsXG%fI^)ig zdEHsI%=a8|)hqbI8yK5-q0AEH^vr(-!0j*^h3p9w&(+e8^Mh2oo*qlHrxHA$8mr*H zRpk;%X}8(FiRW;v5}*g_fi5iCRw2CsMF_ujU@y!i%?D_~&2`PAyPv8)xSiYf;)f2C z&EUp=g>F8~aT)2!lKh}YVx)j8nVp9#I%Dg|*Q&DBt3QUzU0+7pN! zKLl) zLM9PN1@8m(c;B?FLc17sVmzg46S34{o2O>sv;FCsa2l^SF3=N$eE&W`2&PoG9TzcL zpWc-!S}}r(97-`1jkdRXl*ghPIoPdAtw^bG#b!Hkc3fn<8H#q5q*H>A?^SMZG)VUD zVnTM3o=QKf*dPJXkDMKkRQ8+F-Uo53GSNufkFwvm$mB9#&DB{I6PSm{6ucxP1lxg9*$TmmV%C|2KY(|&MGES2VxIjrG-0*nbv6VoO zP!W4(6*2z#Q{dIXcjnl-m z8;FOmpO}{7lKqVo-^!tae?|_9q&@wAhBk^MAh&)}j z%#{`JCvSE6E*t&*QtzVEU*b+6RGcYF=&kDj;QPd|d)K=;xj{8E`9EW$c}--X6~A%d zVFrzU5SZPWXx&Zxik~JdQ-K0m!VTIiV+00xF1m?zkwWMG+3%|L#ZQYf_=BWDptwQ zd6Cp!jT(`KY%?GWo;oyYP5p4)++j;=d}edBi>_g>$Tfuzysz1uLMVo$7xyG1|A5p5 z9DI1^IP!ZNpblILDL+?6)=F9RNJ z_(Bdcm6D_`V{!4MyZhsI6!~tPX6Iwv1S<6}csNPWy#_h}gWDPF7@W}v3SY(N zoF*Pj`4!q9FLn#w1KR>4$seT-GgxI2pf0u~lE#+}b$RS^721D(6PH;1Lap&V5rBU` z(-c(A{PRWfxtE2|-HK&!#BxJH;Z-Jc--#Nn%co*F14SsM;%T(dDMi(%tt!GcC6YLt zjSI6BLUlcV4PDA1uV?x*h|)oh3I=8h(H~-y1!GTc{Enm{5jhoGCmK;R`#qVz&M!_ z%Td(~EmX~cjU9$FTZZkBRAAB~nKILAYOg<)q zMr$KtK2Hh{4$M@kVsj}5Co1vTw)0N4O4und;j=aDI$D&7`}OVl$^Perd`&3N#`8l+1=>F)0Ck`C!q zKtj5tyF34N^!2;%fA1I!hle<4?X~Bc@yusF8;;M=wn6RDOtp(KlHx|0ro0)WR{i;< z>agg`LIlOAA%#(O_3w%N#d0^3EvAa6E}brWDA{wnHBx54*VZT`NcPd{>OlAf6rinl zW>t&7nbLwidIdXGf9JLeb;|ptt#qwREFmf4;pO;KhOG(ixSm##{obkIVw)oYpKte% zPgq6AzSk}~q;u#di~d;CmBV*We6Var)$9NahFQ?+!lH(QjGlQrV=yTvM4fsZv|OZZ zhvbCs%+{4(s}VEjVpY8er7#m#7?Hqlg#*R3RTw~k=OvquDfPBNzq7VT=oA26G#!!T zZb#iXwztljXp&AR(~>a5xLQTYAJzskK@X=dh2u+9+&+MTCDpqCa#v)PW2m>C?s+e7 zmS!rWARI(Pah3v_H#xj91+A^Dt>fp_AXk;37TgE@CxSFW7GK)HL-m4&LNmF4iag4} zd~IR74Zw@}2RZ zCIcO_FYTUZTlYvh-_6st6yfyeSO%I5oN35mDIA{%Z_a7a@ODy!IUn9w5d>{~GkGnw zhr(MS+3;S18Cy=Z#7vq4V@ngv!&M}=t;8Y5y%YovwAYozyyYU5w03M z2ecq2>CS#O96Oo^tp9$hDtXv4xwMDkr6*1F!nIq3Tk(@;3tZvIk#Ln)=V%1=ZWDSJ zz9NlThMYSVd^ITHU7*)Gm#Sd;;L!f809T1d+XG`JutP}@p^6W}*-a2?Q_|bMzq@f> zPc#{iaR>aDYRP0Uf+C!P7TKduDHZ1<>}6ONz5O1D!Qe8J?SGz+Wjb5*eE5|cudUGQ zsPecEtp~Iu)PB~0T;e(C0-BC#GGYUmxk2MVRG3=Nb+5ntUG4+uERc4@ESzqw1J=Ve zd^&I!bjvdOv%mxVsJAuv8G61qYXByq{pPNhKBF9F=ZSjdbh@pCvRRlZb#-;A{bV?) zW}+N2H)oV^mE@QW`>v%Y*;NM|&NQKH>#(cdz%BrACuuiyVD#}3F6jVX_O+44_G1IR zG$F4CzJuF^3Y>F|nX>q47`>FVp4kYoXT~Cix6*CONFC$0vSZ3MWu-6e%WwY7%u}Ar zFcY^YD0$lp2O_ALdT-6Odc*4^5iXSv7T&+j4JzKVb)Kg|QUw`m!(v`&5>>h8(emXx zMU-(uxIxUkuynp~oJG*#y0@saE4w#Z8kD2m=@F}^)`00T_26>@3Ch^Vr40cG%B}jT* z4;68v%WD*bB&|!s2bTo#%gAV_)iaR*mP9wj1 z&;3KKa#05|~q1g_2?Zy<-G38$?f#dRzSEIn*uD_()q81 zgsqo?BEO`o$MZW*YMIWq#fabKy3BDoF}!*Li#IKrJGkw6FJL0G4poR&ryd{3KO-{V zQXCMVuJN!IPqS;RrPdh+1*S5dy{!GM2ylXsKq>9v?#TSHr9sTWW?qq)Ok-R3x&J{g zukhMo`}PT_15Hy?JogwwzT!F&mrCn?bXP3+@{@D5vHHu@?7QMDr?U9LMS^It944bz zZ=Z=ln4U)zx{XY^IH=VhO~JSwH)#K7X~)fQDj0}ab2;vM33Hnc#xhY_ev4-;7sc*3 zaAP}4WlrYx;N-p8WvS_ZxWC{FP|K|W>S{}C@uhtBJhY(ta-ZH_cLQ`8CihF5WFv8| z!g)$JJ?89=aiH}iu;G5(7?B0>HPP zv%PIlq^2iklT8bOcFbz`oT*3Sp_WmYy$01gI1-n_C!H~qMarGRGS%ViEh~q5tXHcT z#Wj+Mwcen+(pGJ%wIBV}8?be?$75Xeg8iQgfOh2k@i%F`?MX29ZM3WAa9`NsnwgpR zBya5#PuT)?9&Z9G0-x1f!Szh31jec5&jF?;&@Eq?9s=#0)!ROf(V2p)lhUjc2%Tk6 zF^{4^C6S(*R=R<#>$haj(mQBl1jW0dkKr|J7llJ*bPG5Qk1etzsDGnt(;2lVuk|F| zF^FHDTf9Y+W}F%_!*E>S2y~<_@no8#p8V!aZ>~ISr2;KB`H|S5=mDm~&&Gf9Q(z`*({(1B~p5--*ujEKv}&lCc&?XK7Oy_8{d3C{EQ%rGOW?78DYSwKdu!i;zxXXND3R zzAJOBlppdE9>ZeP*2I(c5j-jdt$39Yr>X<=BV%UZcHUz?QRv-0{MCsnAlr)b0Xw%m zg=HE@%T>_`ZH`e$CWF4@v{uA@4Jr}QGc@@Zj_cp?x^+qe*&zrIjom=nfV*Ozw0BPQ zcoI*gvd=(y>-|hosxVHV7v0X?UftrZXp+Oh0wEx(D62_si+<5bH!fH75#|`4;WVpy z?;q6Ny?m&$?tLbrky9dlFID~r0NaW4;=6NB2EC*y_AyWL@X((1b zI{4LjdGUkv`9<~X5Q@DyzzEr8&>KbJTZ$h{{%KD3Xmf{rtuJMo4qBKv6a9XW4V78Q zx+#oEWF?$WCB4?E^FDiPtdc)ENm?N@bbk~ULd8gLaiL58N9*Y@x z^<*yggt$2QN}%T>Hj z;TWluC&%nh#&P*#;=j8CKyA&yfVOFR)d)6?HxM8ZSpy`19K@m57rpI@`6cV7r?)3T z^>|4eSMtz)dG+A=3L{sBRUifZw>-{+a8yb4F?%?-NAxy z&h&WlX}oPU6)h=!{EI*^6Qg!Nsq@C9x8O1*qI<`;aM=C<#r91tyh+VKylQg2tDN1N zIAQEJh4MmJzn*+g2|So6Q*OjWLiP=a9I^nmj@$G;a#UEm^Egf9cj}$P-}d5n+lcvS zbmc;(QejJonL)>V^=V&#TGz7LsnLt(bGDE5mGe)e9_B(L*hp9qa|ZnY6h%t5{)~Y) z;O%T^YyAwJ!$KQ(s63sA_L=Lx*Z00n>l+?!UTLwSV**sJ`{#0tMA zvfbF-9ys63alWT4lX#eTmoC(*8EWBvzqzYlGAbM(@mUip@dtwjGIuOPokB1@HoxI_ z!jzD&MJ25=wPx_gb`QOR#;N323#zR;d@*!d_`zxJ#)+@r^!^!pVUlel2VL)_cvd@C zFl7C~paGNtD)KU;3kt)13Stsz7y{x}{5lnO*dnl190H4;zc!2cmdR?kz=_jCiyqK-(jv2q=%(l^Zk z#AHQ*0fluucIzaHeoy@-vC{oMP!&5JSgjOiSB5dF!+2^*8gM*c+q>oK9>I-5Jgz%h z;lY1NX$OXBoh~a;#Rp+`Oy(m9tty`6)9lGGpY@lVN`qd`P~$Jc z_Y$iK3)MTUH;~?Ax0A#fhi==W;>Dv#*^y6uQ_TwJ?r6ont?03A|~Y z)D~);C@r;ezo}xPggG;v-Ib3m$g=c*R#@W>Fr_VXIK1h-HFB{m=}59?$wUD0B_2%h zE`_tOzMR@wrNQq4lPOoXobI?EI(3LS)(%eq56lo5L7p+0R=Pg}9bsx*mg-RppQl{1 z-<$N0LD?q5GxgiRM^oJyY{&-a0#-eEyOczh+3V{?PsvhRH|XSA+lJ+Cz(1Cwj2avR zQ;TCh9K4VckeymLj*0S%bba*`yYimgv>LWA0o*5u4uW^&*_I4$3JFnH)$(ehL*2WA zgz~&UKEAZr@?5`nc*s>L=EH^W=Dc9+OsU90uIN^V>`Oabxz-1n_tK4kaC^kg zYz+4Sk(IV1Vq$k5Gw#%UbRR{l5o?P&%O60MN+zMt+5ChzHwP0zp2-Fhp&S7W`R|Dw z1N8Lw5;^|DChuino(3-Tr|~pt+4cbF`~k_H);8rzZ?&f(2QKA}WsoqefPU&Y=$@sNn64w!X9I+_+^|Bz5>Pii z?LTQm_f|U7)!Z|bdLGPFvwHo;-Ia8|x3G-hn$_Bl_4{eanDIa8iY78_;GtPc;U{!& zWt!4T;`g^iF8qa34%hdui~E@F3|s74Lh3M@o&oLfcoQ$=-xFYCFmu>>+-nizqa2?1 z%?c7T`_SgnSH3L3pt z*m|^@HDCK%wMvzD%EzzKM`Xb;suE~{$8hWh(x<8Cx7cQ9XJfTm0s6+SfQ=0k=sf0) zqu3c&J$n~zgJGi+IN&QmHC$vOdE5uK3cD}axr=gWcm z*)NduDrm9WZ$@;g>&1O3)G8hq{gN|a@06P6?2%L>yO-AYF5-1MkDE#%Bc&VFi1G19 zGV_ZGBG;7^o3I?ArQJ-S0`>B=<-@NHcCj-YAR@y;lZ6m)Ims4wE7Hgj86rGWnQwGG zp;~o)+h`AIgsWN&NwwsEiDwjo`Fw*yOm~ux49~9%HVQ~oe#d(?Ra$Nolj|-jE<6j= z@@&{R)!#*jq%#XW$>ojYX&>I4La@wt|0q#f{4GBATA}dv7V}(zOPnmX`Ra;tGqj#gbG4B$7%j>ZfHLB#Zu|&Oy6nbj(98zXM@IYaHm?Co&YkG%dE51hmrysW> z7-dupW$xI|N;TsfrA!raMe+GOb0o05SPO3>Cm8t)uU2V;T`@}o#L!T(ZTQ|iK`}6h z;mF-rj03#n<#ed_`z>_VmH>C^eLz4;c+V~P0{U*0rY8Rw#_*G8QSZen`cL3{=swE+ zR^=6JGx4Fdf&;+50W?{)(of7o>`0NUov7s87ze64j}=!<)PRt#Y3eQgmdIy8H~%Aq zM6u)p8h%75zpF-jh;Eq&(k3LTkc{X>rP%_R0<%unvu!;frQVOICHc;zYKE1e2N!f| zmVS^LSWXB1GdcW+!14QZe1Xs+;uyt#t5iG&FYvs&s3?{!p`*wE6`2JnHmYvxOPKL} zU8cME%LOPcefQ-FrDV)utw!E(yuySjC;PL6YA}%@seU734FS3S^%cbif_1-}X9Fid z#wi3qb3AH_iZd|<;S?w~<1lEp0eCV7|Gm7te8ja>*e)p9dIHeMqX?p6s)i`)nC+)d zAS+B`Hu-Z4|Ml-@muSe;<2}lAoK@>Vd_7cW{2ts_MBx^!I9)sQFa8 zp_B_vvYYutJ?O+#ubZ98_uLW^ z%+XT&XBelqKvzG62ymv&C#37R24CDr*THpp=MzC;=P8809R3cI@Cf{^v=;&s6jdAK z#SibcSbvley#Y+-j=@3Xva&MH5M$LpKNF{gej%NUDUs%nKoQxNB@!&$6VuSZ@%{Vv zE|Y5;8yo&WbV^}HS|p@kilH*T&R5@f&-~fNve;nM5=N7=m-zqtv-;-1PX^{ThMP>F zrTIPKE0}WmOxh#=EQMc`0u|NEE13-ql^|pe0E}IP$7%ol4!Y1=pTK2rEhn&xgEhW_ z0@wAbo%94d@%LpdaiPP*^V6!T!oa}(I&5~a=}&*)deSMy^3TQo=gMC(g1y&Z7Bm;L?W zzg8Kc04`CIHj0)nTr~h?dv6ofdU>Sm@kpWbw-}uW6GLd}r6zP;z3ryujqX2dCh5e` z)uZbqX10#_znk)Bo!+McUcR(4am+c;i<%a0QF@-3v$ zTM_uXkZOvyj+nTNsnGv=Cn&0jM##FfzV;`2{5U$QKfG6OrVV)-YLmMIm4J&+ZQ@Gw z4JN@W4NX^1a`>4~r`~T9@qQ`_KH#ZiMl<0bgZR(>jnYH*?_@PAEBo!0r;I>)KjPGS zJN<)`LIpkkZQ%A$>?wV|G=E|A3rR)0CU~Qvt zU^%0`9JVokwjx3sd`PUeT-b{7e|-wcV{~Wh_wdPNB5ES!4+%Dxd>FQ0F~9X>nql`9~27rH~m$9rNv_M*?s>a)fAyU4-sPwoaB zKHFOKw;vY3TYRk7P|vnMUoO~HQsKTwy+rWWNsZGs*UWg6QcBppLe0x)qpdPp(T#!} zF+ZwpS)xtKTfc7$x34|Cw10<5ibhJKuL>`+ht&SFkQ>o(u-G=-Upu9%H-qs=n^8{( zMmd!}V8hzIG5GPY+>-C2`BB1p*|O3zWW?JXEG|cVXasO)g33|;-r7P5aI?pAzS;fn zX|X3?+I{*1E_ng-tHg@hI8M_(ksc^gI`uJ>d)&iSMF<*BYHXGXA+cI%GSs6 z!@m(*=`VgEH@hQzu~$*D@D#I-u_RJJ+>pjtTb&GsLL{)g%WHn$<>?b9RI3r@5t`P@ zOx7Ijdt&C+bBZJ9jg^PPjWwfD^OfRf0!{jZT`RG5YajR8ap&uY27IP=xA5D|y` z4~ykp4DcU$1b}Zlg6;UnA(Fg?7)+AAi`C!l_jM~ZWdc5}@ zj!H=z=_=-LY0N+7x=^R32j2~cn`3Dp_-9RBpBy`XP4b}~U`>8!_tce`C~fKX1G7vq z|5PAC0V=q&j0k@(rBDLQ1crXR#|zhP?YZ@SJW{4oI<6VI@!w|yL z6&C-$&U7E*!c@g*aP-A58%FoMl%83SC*rZ6Z(F3$jO}I2EW9o1z(ghyFniasCHR9Z zJMFE(&`?)mU3~5p#7!lF1OJYmIp*NpYz-cXh>B$mJS0g>%TQFk zDCbiQI#>#h#`R=iKr)qR*}9|+H*dcDiJ%Cg6+dk6sN`1xwDy9A(wb@_1U=Oz!VtFf zW*FiYn&IV9*oE)kq$+lfpiKKx(-sE~l&8kUyZAdWjC|zdMhEv7ul4%BbYb4rVq$2X z)PG(K3yE3(W)O|^P`dyx^x4{@boI^dDrU+@H+fexli*g$NQs}S$_ZJMg~OO<$x*ZA zVxr59y@XqZoqM%yv{JHK5fzLW5x~Z~aUTB@ki5r1)KoGXU2FIgkpGh#=|VR`0wSAs z;by6NoR#L!#gVUScBUeWZ#~Bsh$dBwM1J`tsIP=OqZJ}5j#F=m-;|n>s}8C(j-K&1 z^D2&?1i9+ambeco;J(Y{3!2552<-8@u~PeBP|CW#%jhO78m%wK_wZI{8_OYY%*ya} zoyauu8RY|&@AmR=l0iO7>B+CbjdC<;ICY5%I z84M(Abg+?AKIlQ6^W0EnMDCf^V8eb{9}7g_Vu6AMVewM^Z`h7Vf+PVljofU1WAR@P zA-E^=3TV*6PTWkBtUlyanE(q=N|Obrmc17OX_O^RCCdL?HOWte`qq!3J>;#if=$jxWB|ua-8MglPC^*B+j&&3 zOZYn7%%Gns8Z|3tIo$D$6UZyljbxjpbTNap0ySH9QT(|q$Hi7h_g z+ee?J2Gt4oF27t3)L`9D+aR~+Vp2=em&tayygXx}(n(-U%YOAq`3)PEj@*MSMbONH z*ZB>r2}l2p?Ut)n4ebmjMzIG*my(QfLeg7H_!;a_j2e5b;JZYxH;_2whko-nbr+`t zcQ;BhtoP5s{dZ!o5c7XoeSUKCb4{(43wp^BSeI9!FuX7yp^b#L%s$hfik6QO#`*!< zEe&RE&)M!Amn{ut5_#O2)loU!k_lF9zp)X>-*~zbT zUOqv8e$doD#7!^jFyotETjTAi_Q*AEo5WQgnUh!hc=6|l*N7(cBs}}go~T5wJR?|% zhtx+FAv+{B2Fkv)1FJ=`b?Q+QzXYWDHGoGkl?uB1d%q}rAvZ9y$KdZb{$8*yG_5Si zQ82}lRuSrW=RUhk-jBMt{|Sa4a%~GqrD?GPLc+Qn!kgG{_Fpl4tPgX7BL3Nv(mjsY zJ7WzOsDv0S@N~fPJqZtyb|Isc=SQzGYsOF4Dy7y{Z)47yPS!}bSBt~&8|V!{x*gSO z_D~$Su_4J*Xb&r9bX$;;&Z&r%_l8X)XmmPwrxE6c1u7L1Eio7z|3=aba&I-gAW6~s zRnRBUbp&P3zr6|Mg?9`j=14|L-*dM-L+&jdzKCW;J5z~5;xD9?E_{NEi56l`R9D$~ zrvn0C>1kD?bT+<*h9o6aQ>;bs$wAq2>;soPUAW}BOlvjn12;zibgj$V);2jc50l`8 z=yS1~nAtqIri#sLv#mmq?PNGEY)k0V5a-ZITi&?9Y7zudxM`<(Ay9zQf%Yr!-yP1O z0)a2&^3G|O*X~IRsCZY?cogkkrzQVS>}!Kz1<8dZ9|JO7X#QrYFq(XM9s@(^>nBbt zV?3J9o-(c;0;&B4$>n~h?0_WW5&?o5#ycg%=WolnKGM&Mm=f=u-}#w*E$^}mMVe?Y zWWy0QZHrWXlJP}~p5|5W_Ymok0$t17o85xLm-?!xTpG8ftT@aO@z><(dwk_Sk(u&U z&*FRw4S%h)OF;ZKdixR6UoWPP2C6^*XM{H0^Mqv#3^vJ}%EfxSUH?<_0y~ldb`-Og zI6kce8@Eq6tEZN`$SQj=b;D7>7SoZ0Srp7?9u`UD$*5O!@e3`Z2J20=KJh6H)}&%k z8nL@R*-%RpnP$-R!=o9bX&q$a>>@i@&3hY+yKs+Lq*z*|(+D%Aj5o%U`1xHFOt-E17@I1f3FGQdYo}_y6pB1UAHXBMk(W zGoLLB?s~YYebAhAhN5GSn_!P{ecfPXMTPKce0WQ6!z0R55=rr-y!2D1H`MX$x_7j) ztXi^q`@lG&{D?e>?Ple$-L9wO)BT0v7=3MR&%8|x$~q2J72TWcdNr(aLeo~{FqP>e z+D#<-)T{wjpn(tS` zqPdb&8O5s#%ypdYa5;fA5V%li7^T=jokki(`yGVesib$*A1ij-VeCmjrTVO5D)yfX z^%{aWH#|I)zp?5!IPs?{d;;8@!snuWyZh==1)3)} zK$YSLYgUMyIonQJf6=YAbND3gz+2~ASD+3>Or&mJY2@3VdG8f1=cmIF-iv_qQp!|` z@i%Fp098C);F2@${{|H!NGVap9sO$7{K4scLw-DKHOK+f77g{X6!$)Uc*Mf+GV*p6 zPlT3MT0U_XF2oT^3D&@=|P(lsD$?YF|ifDkWq+vMSY52d+ zfR{GfrNfp&TL&~?dW;m;zmj5dOC##24i3)QE86*%6Xt8y&qa49M){~;$+vKbQr*CC ztv*fZ_^LoPHftDz#}k4ZJWrNju!dg9K3m{bH$@o)Zt>Y1O#k*PqA*SsN8x1n5`_Dzw5; zbl2C|BN;2rt&Inn->Vl`>08}$V*Ch%05ZSyP5z#BPO$%yV!FFS|8-@LFvLZmhlU<` zR4{wPz&rb2E_*DAxrf4w3M;{cJPi?^=HWUisrb@If#+d`=dV}+xG)l>A;WPZ^;JGg#skYW7Ix#t&F@` zpFiuV;C>iB17Xf6%x+!l6%u0 z@xUbWkUKX|Ols|gkOS)tuv4G8CjX4*Y^ZA+a6;zhYW4MPZGG^O3kyuu)xy$Bn0>=- zQ@GPJHJ?q>xYhl!&(7%e1qC0GljF`#oAQJa{wtG>_yuBA?60sGHsA>PugQUfx#nUC zOZao}MlpcnmnP)F-$5Z1oK>%>&|PXf#pLN70KZFz3h#D@Z+vxv1U1k*C=(zvqNnHO z4W-}F(dBP?f6Fu!hfEa~MrJb7du65>!Iu$98t4_9Fg%h$G+1>`ZXqY2Q+!_Er6`3ecV zc=b)xA2zZA0X?UVE;P4n-r}GCY^RHh_)fHY6 zY1~Y#%-$}Q`tWY|1f(d$9nH?R^4LtPLxigY%z~Gd|4Oz<#y}{f8zXpa9Rqgr4*2ev z`!r;+o4Pv=!8~siHlyn}z5MsWj z!uNK!Zr>q-JI5YF8Ja=7fAP5wqui^x-K{$}X+Gls{|rhDoF`EU$>U zr!YSb>-1|cvH3+Y-1+$5n?VHMd~qs#2K89D0Ebaqcz>>8OlXRL91=xHIw4SLCn2qM zz(0Ev1op<&fq>vChByLrWLnxKlPy`5Qu(u+GOe1VlAwKFklb90Oxs{P9@HhH}1QHZlTV`#}_+1S|X&FUM(57;`O zz&QdyDhD`lqgPNLF8_p(j17pRI(fW&nBcV!!z-OY@`PhJA@gi0Trt0cU85Fri%}%_ zdUr0c?TbmIbvI(E!P^bowS({rgPyiLZJr~6HTXhe>{u#RrdN^^bKL)Uo!?IH8i32R z9$aW){va#pv?Me-2#BO>e6)!Jw-J16T<5`*}TB3p)`?&So-MKikHc3m&X^2OpI!%zlizBIGIqHM@9m` zuwGzv^k%$~))y*Z)p8S@|NoNsAxmP14f~ycJpw3!D%9VAKMrY6Xcepc3df@NVRCR@ zX#NL6BZ)=0w3rXdFrSRDyR=DI|CT>yMw~ArgIBxeXPFuEf^Mx@z1UTsHacVk>9K^04)C;28+ zFs%md$z<%{L-mG{E{)R)!@;)Cb;EL8Urah6_NPl8CStt^*loSb8zZ17k*DPs7HOxD zBT~a;2wA$h5JBMk0u8Gh#gf0NVPt6VxxnjDB!|dd34!aW7jR6RrQJh+%;ir@i5hZ< z#!PKqK*F9ecww|Yw$btjR_5P^M7lG)6GUl}PfTAzrwFN}AsuJp3)4+q?4f<4*_kkF&DT&X-}o2V;xW)>`Ji{+h-G zr=BcK6A4R6FD8zsfCldP=&VM}F}ZxutLDCfX5yAOgF{^qMJYIpiTeJNGI)b0clj0` zNTg5xuHd?UBjQi~9+v+g;@;zwAa)J^EJnM&WCq^Rhs4OxJUAf$UhUzMysFkBv2n*d zfv0Ppkksjb4EPxd_E$`=@2E(8{e#tz2M6`u8$Y2}RW-C+&`jz#pXY>`ulLrBrV5m% zv3#-o4F|+}>K=KyqLRU+_F7X#OTVgrLlvk0cmaebCr3LbDIQLE!|vL@5)Y)DVLZHJ zD=7(H>_yCX5P5ua6!0!_85{OvR{$3f^Bd!W`H%I2xc?jD;x9sn=k~zA?+t73n^8B! z_HA{gtcvhCG%)s?Tg)^eFtU)>%xm~SIy$KuU63L|5M-DJqs7SMFAREv zq#3ooJ$r2v)S`cy*so|^;XhZr^@1tGQVhFmg)FCuMsO8xZc3#c#PyH_L20TSBkDYcT0!1|c zQla9d>s3{sD4AZ+z(ipZ#1#Pl_^wD55{vsa=Z7dr5zBPC$#C?~zkgDUOL4`bhwB9C^D#n>n;sn}C;sOr$Q^)nMv3o> zgwx~w7*5y9D$QMx`~UFgRY*$2aq9d6L);D$gtcCx=m}5mQvoqWdCM*6$%$*^jI40 zY>Sy#?xUFsC`>tN8>XkyGJ5;V(7?5WoHhZ6-p@(MFTi8gVLN7hqYLSkcQsYT+7xq$ z&KFVjiGdF2{baU$2i)egG}O&Ec2q+P4dhu{lUcAmj$2vOk+!(Nx&KoY=hlp^p|FiS zOyQcoBNrl1c`#xGsNpClrgk_8?W`mb`@V%7cI&!p1F`6IZf@bCzybJ2FLyIq;uq@#^EgJ;}4|~#vAf|`D(4(%n2ODHoOn7 zE6hW-m`FpjlaPOqfwxtX3kcI3rGsUYhsrZ=MYY6+;J;+L5poB{&8wov#;^5fR- zX&whoya9=8T#<4tO#0Gn-LD4M*T;8fs!0R{wj_E%RXJ5PrjLV1b5}}lT!6f#t4n*C zftXs%i$p5hBQohFH?+-H9-KpqP1G-Se)J>c$1~h%|2k}3OytimRJ7e{e~c!Q51suO2sV%QPEU z!@Y3;uYd##{_+Y+F1_RK%zPE>*Bcd;!U}k~e(;WxXe5#SCX)=2%x8I?o~wy`#D0Od zO`*rM4y9i!ac1M`Z^+rL~p zGjR7ncE+Gvzx#0bbPLb6shRpEe04G_!Xyz7dfFhE(h1V*tj3MC$7yWeDkyRAo{gck zc_~k}g>fuS6*2w0H>KJ(S42l23<7*aqa;H&AAhB|dH_2_)IlqHPtC6ZirmMpL2ZZSG{!o(lmtN;pA1Y_>jtyWG!c72 z8=k<~7TPVHNYJHsbaQtUk>drSeVRWUllQ4K$vmPW;2H6Qt^nmZQ0K+WQW{DzRkS-D zOlF&Ea$*Fs4xfsYi?{8O=YYZ(hy!B2h*)pSwRsW}5;j2*#6ktN9qo9&`>wJ9%%bS+ zPF0%Lj#$ptLTGn_0H^Gl>FMb)t$oAT1@np<0LOHl2hnT^>oC}RGrstgQdz>@&=sr4 z-LwM_D>)JsrsMGU{?FQ7dO~0}@`$8YP2O9DC2YQ*8ZSdm_Ct0E>ow?k8fj{zoda*N zJ*modKY#d(jElCAC(j-sXyVmSq!7F;LLwZ3Q4A?II&^O&xx>3p9%mtKUcN}ShsQ?a z464r@=#=>Hg0Oz&kZQag7L+@)Q4)btuh8xeQe$(qFDf5I+NAb=gF(IAv(ih(PPfCJ zW2xbObOvkm#^dl*`0e|GdOX@-U)Uto@~|QNujS@VTrPfb`h!At8)U87*|mxl#NgPKtL(t+NOS$42nuOQsLzPyZ)hKplB|q4QzV~6Cn8^tbau2Y5DtvBpGRAZJ zQ6Cxt6AewaYC7Gj)UrI)&2T6&-?!=x(s)kkxt&{6cjj8!`-L2%4<0-ko9S$|AA|M% z5B}x!iM5+gpFX_>qHsSR&b7vvQdL*xFBFk%+)IENA8fl-SIFaP$39RmbJ@M-Yk!$aKO8r zJaNH!`N=!+U2`SCIkNJjeS?vzPzVx|X2n4D-ZwNb)68M9{Zq=OG{Wsd_(~vXxT_!G z8INm@nFY7YHd+Req?FS^kI8j|$f8C_s4xG|3x8k|(C zXBMrEnsoUa_VGRH8V|agi*5G|(37g|pW#?oRN?wPnF&;MsIB`fSCBp$BZ0MCJ@bN# z8H$G@V&A85IGqoo4R>X(DQL^04wrcRt3nhG6BO?o!I*7caNjk8>luO({U5TuCPp-6V^_C4b`lBA+^k+%XLJ>6RT zNjRTw5|>lf+}-^h4k_^dZEcT4RaE*GT0OH2rUdv9eq2Pauvsk~clu9x9%W(X)Z|j) zE0=}%#nNZHZjDwbk*95Mghk;f+o<7gKgEL%pK6_7{c>hH@f4%U{38p!3wuLibL z6B9SzUZUL#2=uKjc}2k@woUcXYken{bz0)a8Z8|$?S${`^-AY%MNPj?x|10nBQf;ZO-s^o(I5_d&kpxhj z%7944R`v6D4m1r;>wA<>c{n&0hCZkpi3uwSbCU)TTmX=q`%S}g=n>+PKntPm!5tle z#h`_*o<*3PfX;GgqLK%@I*w@x)-(~E|MF10zvR~q&I8|2Q|!{7FBrewLoCzU!9h3} zSy@Wl9kSCB$&6vI2K;LSFgri0q*|`+oBfRTbSz&E2TuUe?dq5^(E*sRtI znuRe&AK0+)@B)Db@G_X8msFIW7>}ZqdRzT|WR;lm%1KYKD?gCw9@-cjpfr#4xVn83x$s2T9XWH`clO_Kjx6K@NfwP+Ve zribEWnCv&FSwa1Jg?Au%XE4Rli7}ze$vxUSxig_-sUMdD(#0?y zzQ0r5JO6lhWg@*f@lv@~cA6vKdJ%hZwy-&?C|3ke0G(Gnr@E#}Fc|-}f<-z-^+=6K zVyn*WEh-^lXo{m_7K%yeXl}P_p;7glv)ddG!g_mz;f2=ly78}xEb?XIXh*mAQt?dU zV!t->tBm9`-^<6)D#81vq>6BS_Yv-@OXvGsb&R?8P-QZRDCXv-M61o+o-76Ui+YCd z6VwZwwW%U95&psEx`-IHn*-+5=X|0wz1{r>MTEul?`v9@V|!;GZtE3T0fi2Rt|@=0 zN0kuTZl#{?F>`>xN~%^23^|+>mxXC9L72*-m1FD16?`jbJpuODXpGX3wQ0*`>xo=L z#iN@v!pK`dpQ@;8f1zZ1JplxvvK%v{pYC)677Hl zyouQfQf+3FAp+BJ>477NFo=ECdVM5$s*va^C6(ZXAq0ZU~^vZE~0DbiFzFTq4Ky z_PkVqbvz;URl&Ad-~H84=1%vqHJGi&R#jD-ETVjsQP3OZ?F;+io0aJCbb}&1x9Gd7 zfDax|Dsu-vSugl3wtu$4=fVa8SfX+GuMf`#u-NSno_!JYjXB4;KYz(>JSbS!AMN{y zst!Pnzna|Y`utv1ReF0|>@!8i2Zsf=-;897#HkEr3hCv2V389f;gd}b+RPxCy4Xbs zh8Z{$b#(($6`}zAZ)1+US18(%7mYV9(}V|8j~eAA6sI$z!NK021H`<9eWL5E5TI_m z{ZU4DD5xUXMFbH75%K!PxlO5dNJVmQ#F;qpHGXASpRD6f))cE~oFhN&+A|qO#U~0I z=nn1_kWry9+L&`bhG~*Z?Dikc-boc2{|_S?EC`9cJ5s(5rsHi zh~#XfuIL(ZfBd;}xghMVTHe#`7PoRRTYEvjD3z>w$S~p@5pR-n)yoA(CVqQ1o&)WF6-a+=4Ae?J^_v zZ0>YeDAOBisJwh$HNg6Lij?kIQrZS;C!dY6vofe@6qou4`z?Q;{YDer3ENP$I&Suq zi`hFwm_!R+F=p@x@@#_fLPc~px?DEZTo_4pf~Q7oKg$R&kd47p3JMhS!(^r85?{fx z%Id{4>WG$U9*v2v0f9oXM3z$lGL=X&5&MDBQCfZ;o4k^Gpg8;s2v}k0gGV1y2m2Eg)BJdzcXfX)71(zZ zNiNb8q1+v`e6<9Jg1WXBYGFCsp?-do0bq=Z7h3q4;lz(cMJo z6)!IKtmG(z!+P?sPSDnNf=gbiw@LQ0nTa#qu$srAuG2gobeVWTT2X2A0y^J%z9X|( z)%J1#i*%f(1XLIb*jQ&RQp2K)Y@S3s>gdek&g?CQ2>*|!vyO`D``WmK5-Ob{APqye zbV)Z1T@nM*(p?JDokKUw&@EDmkP`aHmASibbzlVcCSc5ex&2(p0uJTHuVDqB6p74h2D8?K~Z$Fsq|r^ zKS9;(U`OVwbjK6L$l+EZ!?nu*(W2+QuEpL#rl8h#Lgl1+`AJ{ALQU&@v*sRD!f{7s zrIi%hxE)MJUo$UijTf;sM0R{3V9;c?+3pu^uiDk>eEoS$jI)`~Z-Tx&`mm^M$gqTn zod~L8aop}J5$Uv^G#oRl+22vo8@sN5Fa$rZOXjL@<;3&yv7XcP?EmxY6VshQzC+fA zVZe{Bp?0NrG&pj~rV-v`6+hZYkCs=wUoE8T6*&D8EIl&Bs2nEzm@iGQEb}pQmO?`+ zZT|K%_LfoPol(2?GF;ctBYj@&&A)E^SK*?WX2HL4M~C2fRJ5}Pd^p) zSnJFKf@p(w*xERA&2J*a9idr7Twq36Tq1D68qHxR$@FSAE*^=7vuM=ekC zIgs-I0w`3!BqnOYBc)uI;{R^uIm?#vyys0I+>#4?B{Rg zDSu{J>Ci`w)-Z!2yn7f~${HGQN=i*$@v4pt5`8|U{o=TUha3S%|6o{pjDO*l->yki zr1&x(VOZ2vG4sPMpCOqFc(t3v%;XmVatLQW4fRJmbCe0>=Qz8&;c;l#e@4rQ%RxHR zVOfYc?spxSF0oyQXvh%2xVhRBid@k~>7CxRh zwqlAx%&Gy(YqDN^KMB%N5mu%9)Ay~>Hq`WRLD7KhhaKrfE2_VJQtBupwxuoT?-{!~biIx-~WI@j58uQpFmIV(c3 zb`LGl`&`_S98RG|4{jpmPBJ51Y^><}NWI|kDEy7ZD1FCCgK3V8c6OjPn+>?nm=wbT zzN$?GpS^e`?lY(SES#-;O&r;X>^1)x;YcdB7*Ji~-JE^(r`I>o4O=p!431kDry!C)R^f`u_~5v$Q6sxWHsyDhQo^+?XwrKv6oJQZ$#*cCHkg65m;Q z26>U)?1EM$Ed0T%I~{_^)f!5#9{(&AqLN{1MgKPoqY$92&d!UeVto-MKqeGX-P|yA z(Gu6%{^#sOwi{gIQt#kS;4u+pq$UAOE#XZ^a~`S5h&%NjC{*3BS+qNlr8tE|6M|FP zn%()>+WJa}@`*ttTi2d(o!dV*6vM8I zCbZQ;1J2HM7&M>1iaRh!Yo;s~YR+|f&>PlN8QEtl}VbOO5hQD{> z2-!kPZvQ+{h4^?5c6DSvKul*Utx9G4br7~j`V}S&*i(nozF=~$bQfM zLYfwPpcTcdjk~*At+UVQtJh#SP@4viMw3;uXv$dd?V8cP^)s$Vr^mv|2j}D|v6OK6 z165<#-^*{-c0GY?F3i46%q*S1rNZ1))_dq6m=uXv7;1X8(V^I1YuyQCHfPhdD?p-q zN$fdn?hQwOB$N^#%8%v>F^an`l?rNY>s4#T1l_xi?dRouZta(aVS|(y+O#|88Db{h zDsE>!mV-ELVSR=_+@#`IT9UKNv}2kU< zW`$9qWIK>37CNaS0`r6!NlL&*? zNmEiebM3f0-hVt6uY6fnbN!X|hU^lPiK9l~}uU%luk>czMS@cwf{VuaNN;UunmLJ~@6)-fHmK>;j}TvSMHM8$vr z5~2t4pfLLW841Y#xp9!y0^l`j>=di! z7<5$3&w-Plz&0oA>>x2y^&4LC6 zhj-ZTnp7lFFt9Ang_8veDWucw#0OGHmYVB4y_PQzb=k^rBuCVZ8yu*&aSO4&!vM7FYcdW2oZ#&O3=A$(AMN&%Ik|_L( zV}~y;)E09rX)T9-zbPwoKX$t4d&==rq=2FcN;>E7WuCwET8Oav^uH zAPRyf{oS1uRRwmKzurKOSVuieo2jav=8v?!cE*J;Z>FX1PAK*k9*h0kEXJ+haiSHH z7^%3r%PT6Rcf~*@sc9nq_qynmlO;@?nyjzo^ga~1u^da0LMEk{c<*qUzkipzqY46G z?4(Jri&Y57M19|Xdli1SD*4LS&riZhG%mSWtyd=TuiTd?-vb~f8gG#u;xyKv^aX?w z=qmfJTX|c)Garm)cBoIKlAt~(iU3a#3wfV2`rjS$H>}2mrK-tfwI2kne?(r14s7DD zyuZr%M6I&L2f-{2#wNobMMFLh6WtiTuCEQH;&y3zrNSAAfE%s2}S`$X(YjIo>W|^fCMc*r-oFb`n%H4 zXRJ&<(NC}4Fp6fCfr;qz0SR&pMTqHixnvTzle0$pw=OzFdHx`ikRBzu{miMDFNy!% z70uORGY8`5Ol+etSAoeV&-YkH?JTtxJ2W6c{g0zFETEj9;*(^2N=#BAw+e(w7sy9I z&sAeia*tiwub6<(vmfu?XZ6c`0e!snR2VKvlQ(a6TL8PS+_@m`-EpeIj`~xP*3)B| zyKj!=5J#>S^DM)7opM9cir7R+fmw}N7!XWxvOnW=wyOzta#BZF<4brx)u#+?6%>a%^5e8#@VV*j5;hUMsDM3)gctd&)@?VdyhCJ&e6gzYS1t zHo-SHU-UbXG^Fe$X8XWrYkObgOecDGo$K=T#m;{Szj(Bil$3nO)K_+KDwL`De!ljx^&g~`2V(yJ}pIleeqmsNie%e6qD zXR&b)m@f*{3uNRm^xjCB1s>qk{Lc%3RUut;&4{nK+{Q=4(tSh;FLGNsxT+CRI!!S|HTc{$?Le*I! z6cYoh0;+9#pU(gh>dJSjt^VnDk#dM5fsg%cV0DSA=|pwj_}RQO2H;Vo;I^V3Y4zez zDglPIo}W8@MJ4n$&k>z4@19W@>29aLLG;Au_(q+-zw|oML-du!Q}5*ZxqUZq5hT(nK*v zP$c4+#&mKgxce1iHhI78l%e3(zPFq)Y7dFv3e z+wk0tzQCdVJ6WfL|Buk+(DmTbaRI~XX6yTGT^^*1hWtluOwFNo#H;Nd9}8r8y4)tf zvretv&(|DZDoLuDB=SK|%Gw@I`oU@>TMLmpPM;01`f!1K=ww4eJ%!0Ln8VVK6Rr$K zmv6zepr>L*A`QTlV>44(K|4JYxq&^{8K`<C7oc1hY< z_5N0Xfe_hNVc4vZ^j$Hgf8c#W90Pl*?41U>&E$R<*D$z>@MfM?l4;HB=}`orpH}kK zyC2SnpJg(%WW>JRgkV`4>6@4W0V62k<`__6V?0cqu4!r=a+jS^s`J$3?{QZYidX|l zZ^(cg!1*e};KF9u2@~>W&B(RiBJS z{2ayUiHQlfBvF5Y{XE+?ovA6%=z#y7sge$0WF~jW#Pqtx@%Op%&<9YvARJ&yb@-?MFvkFQ(t)B#mH>B6T_0O*|OOw zOOw)Dd9sUfcB5}n)_Y)tNil4?fo5K35qa%FL5$4IiAU{T+;V^7$uOkv*Z^-Al6;q% zRT6Bf%zDDJEp-E#AQ@&LXn|hJW88p*l2ojo6ht#--#1?L{_}3ax!{oU+(%4XK#}k~ z--=c#nE;CW!K9^889PF{XzTp#g{DZB%=*S{O3{xKtG{QbRKbnJrwVT@>&oNp?G9|`Zy-r*j97Qbt6C*63DnBg711>M5A$o+7 zK4km~5G)IACcVOQoZYR#dpXC)q67G1i-9YS=*8cXDt&Jt}MeX>CR2J%Y?p}Ka8WD@nN zH4wz2KsOtKRo zuW^K{slwtx7h~2~h%Oq+Ta@78)PwjmlEdC48u#iCpj~{0rR9;2>`~%oN zs;x(#HQLm+eXTT>{t44u`H)e~L6V*kX-AKsRv1K%p_B#CQXJLQf!6K5pTiCt@=3M> z&P#EEPb>73XTb_p0f3$V7tjNVXDL$%7zK<9f9hNq-g;jKhkO%yVq(L~E(PZT_v zi^dMP)3*3Lc0Z@z2wqrM$OtU>`t<||fTyxkXD>T9W05O*kXLVhkEgalcQ($2Ib|&I zqkL(0im*V?4X>ufb)Tpzb*vQ$Wz^VRlWd6f7{$%v(Kqf{aet}n!et`Es{3A&J~3Wg zX=swFVABA=Kz&RoF7d@56(TBANv?Qv=y;w2YppaYKX-ZMUH;N)< z_Z|LIy3TQLefX@uU@ZM$3V)-?XZQ1)RA`}>@xg5T#yH5jU+nqmHRP%7f}BlnfN;UaQzOL4$akp zVKK&CEDZ6dvjD2z!s6B1&olheEkoQT#fH!VtNiHoiaUldf$(J=AK{i(r4&dIb#-jIy%Df>q)b zdbmNB7$?GO0~z<_vebL80>N#%eMj1v+*UE;q>QMHQ{~0~UQ^~nhS=y2wE>WZwOC5A80t6a z-5RP)URCsdIK_-*4gelEO~A!cc!M@2A<(eKOxh@hZ|N>!R<4sh`ES87#<>JAW`9xB zSY^GRI(dWt=1OE*<%$>?nC7zJLcV!ResV<_Kzsed##{Q-y${C%rv2I-q*pB}7B!l&RJQHRJ9w$>%kTM4|tE8hHJCN~tOs!<(|MwYr!w$Qwp7JJO zSFNxbp*?}+CgQ8~KHe2+ICX<-*$7D$;U9cPpkML%9l}arV12;zh!>(F@(lJgnPT3% zcB2uH18ehn$tjl0IXvwroR|n|B9WC(l+fOq= zJK$D`uz#pYcF$}b0;0nFK1C+fY{=Bo?i(3}$4N(MOLEUtpAqT6es92&($XqRM+1`W zu|uM*9eWo3>z7uHkmK4Fr2_<@lgr~(WKlm90{olLe3juJ5EKz|I|vAtR&3< zrZc&ThGvgrW2a)wg4!S%85spl&EpE-OfL8|mY-h;v6-K#GU+B&>sQb!NJtm)xtxTx zTG;d;TuQ`{nV9zIw9090`Uwdd^cQ;3u7znMxD0N@pT*=)NdBKF2guF;!4fCZFX&?Gcn2; zC<`W_#n0IFg;==ojQ#!dO9+zF5;9;loV8X1^hfEHT9t3V(Zx|^KDNK~i+nFgi<-m< zNnj>+?pK)h6A7SrboJ;6LbA)?l_K(YEepT_to#}x*+k4NO? zh!p9+Z9dhNdidIvmDKnH9y4qki5C=FgA!k~|8z(8O~VOmKQT&_JGAWpeQy8rPw($9 zGv=4Ifzi%C6ZbdO$BU$T30}W(A-Xq|qOTTuqD}M00}{;&?6~n(oUtfaxO!%}_=3n( zv;RyEWvmU^d(H9RijG%1Lr%tWogrw~gpBxDF{pXQ159}~=6 z!YLYomS3h2Lo-7PQ}JQz{B8WRrn5;Vo^p-nZp`4YCNu;pb&$Fi zl5&di>VtruUe6SbY{b5yX}!sOQzwP}9QB8etP)l8!UOurUBBC~cGLOv)_SuRfV&H* zNn+{i~C*7-trN~Bqs5UB zaSSwg97OasYLGJtO1|}uqfd!+J7~-o5XG*aaB+x#{(#O{_f3?*q%51d!qhRIkU{WB z=>|n*oiveI$?N%DZngnhML{EP-7i>-kf}xK{W6)dcz_rG!qRJC*xy=YrG0uoeuXbq z`%%o^7Uik>8QqEzt4_gZzC&gg>jpJULEfb$UW(XgW#R(67`!(+uxc1?e*XGxWWxH` z$7v5T@tEh;(XGvxP=`8^v(gDUH*Y+ITtL z{^HXCH)p*>oQ6+h)Yfmjo~V^w602qMzb)=WKbSz=ns}p&PJ_VCvEJ{QZbAKi^7VPy z(vxK2QK*CnxQ0b~e4LD5ArKDywkjamjN-Ip+$%|-kOR+9ZbLJz_)(*O6J{+8N$J14 zVpQ0T|0vt`9eg-jV=h^)PJ*F;Nflj*hIm$x5Dv~(<9!*6*p4O_;#!uW!%aHuvTL{X}EPv~!v2p}guzsxI3)^rS8wG61`w*d^?`$W)nr227M7b}FRAQB->akU8JF!p zJf_G|kPdTtHxY`G0MupQTiW(mHS1cTksRjdx-eZ%`M7-kri&pkcMxOyKfZ>6oxG~@ zTBJ?m0AS&hfSiEqtV)xXs@^zIx19e$D(xa8g>0&Mfz4?OJYCdCe**SeIIEYeBx-W6 zkPpL=%I!dzHpYo`gHFm95r`*@*+90+7B6D@U?rBcrf9j}@ouC4-I$6FS*r!cW7}tm zH!`Ho6*kk8Zt1jl*t*}v-6FnIN~&5}RR;KhQYk6PK^F^4PU z6NE>(PB1=1C12(2sg+=D^<3gDJDnDe-=EIZ&*ciQAI$)|bDqx?LGz>_e+7iJ-WQdA&?d8*Dw`R`wY-rb>WNJXN0t2Eq;hnB6kTNkgJO)CG zsUOkxiCvwFBNULu}tDTq?)3qE8&jPyZ|fKDB)KV0hGD8f`a<6 zCt`?Nd}%(6Av^Y{Ct7eKXZw@4K+TQ1h0^$pUa%Sd-*wW13 zq3ghyc0S9y3sVh}iz3YTUU-1r`x)BY()B#5)I-5P=VLSA;OBF!rel|B> zALLe{Kd+QpiVaCo@Bj7oE81_RqQ$9g=}4T*dx28~B9KmyN~rK9a%h#|FTlzH+ zyui$edHZW&B&TS1w};q;o4a1kD1cy0X@&7`*5WirT`v^0e5c=o)W){hh zZ@O^>e($7xVrgrpXvul|bLQ_mx>j0xtwFsv+_o`6UpJc$Z0rw#Fwh+j*Ls5gA8`=a z^uL-PPS*C1ZObW`SzS%onQ8uD8&$q2_G=ReC;Wz6QmkOdl$DSBgS>3FJ6))kqm>F z_HW!ZE(r-E0uirUXak5?xTFkQTU$1%#i#x}{1^mDq=g_7^;kN*QX+teshUeYwgsrX z3Q!pg3@VJnEVEockR2de?ErXrjn8z}Q8_u3rKmZbpE{CYALyRTM!%sGQO#cf-o-U1 zYP3CNY2p4-Spp1Zfaax5h8@mYF9XQEtNZ)R?I~B^_Qs1MC#~cgz|_%2wK25RBEHu+ z-~`n~Ys4^hz!X;q&{xvNgMZ2a%$i>hWvo}+7IptfVtDmzu8Z(S>t>V!3)8R zno2@f+%p!7E4BRi%qp4RVWrzLO91YbC=9bb`F6Bx^T8$(zCUJ28&&>hdXpA3*& zQErHxh2zo$b5unu8+TBUVfQY@i#93}pGO{|$U2fFUYoRk8uO$(0dE)ki&QWu;mm2b zlqlotsZTPL0=9~iU>f^1>KyZzM7f`qTX&j12I0K!2Uf;G;ftDw(1)*IIQ-US;#al1 zyQRalZpg`b*(7{y#Jp-X*g=RiYO0Xn+Tf_^{Bl8%mu$W&@U>{KxB(tpU}9pUfnpL+9XI{SH4gU?twCr_%dy0ZF!x{~0YhiRGajGu`nQfP@ zL2nNmKApL=DwFo^Nz+o-p-zpXTh6B8a@8$LAjHoD0#u+$h#}^ zvRuVVowOEQ+nF~x4Z3n$Nmmbt<^zdsS1U8*14>iN8>olMbL@5X(53=~8w6LKzm0ax zC=6HGRkkIAQ19Y6IS%>Eef}7R!E{IH@u@{icR+uLP#v}t5I{I3;p`ou^ga=qRn)Wk zTB{>&bH`V!uMaT_Z>te(kioZooAo&@*I{$H!RGDs=r#u%E4NYEW#!avg8^f!nOn|0 z&veb*^!{L`g_!TQEw>*7E!;E`6QN41_T{c~Grc^NflyE7(Bd9m1oDofwWInpnn z6c_KaLT!RkteTU(y)3}}&A08*Vuaf~-KSA)II03$>|V{1v@7#>u&h;a?0#News{IPTmDF+USr@#=@NZY(=o zQ?75e71G64xM>Ck(-Fgi;K`0hh{(tRE}=$6x(yHlfap$ZOUcTv^h;!5_PcVJ`f<`D zJd+MV9kPRshN1JmZx7bB=JhzgT4}f%;;;Bw)_FpTMDM$BF~~hV9)jEEojhIeU6u4@ z;aed~)+sAwnB&>um`&%av)6ZJf*#t;p@&q{k1;~!56<%qpYl$wmdmzYlijk|wyUrB zUH`=k#cZau_!4qor#y&_)5`7PkT-BbsJ(;I=40#G(Q!E`C$HR)a?Z^S8M4dF|H5GM zW~4(YueoQ^-qO^__i}C;dok_Fgp>Oh-{E{wPQa7V3Y$T3T7#`fQ%AFphtX(b1-S-ellRucRmG83Cg@21JZZyvL7K&9)?qTQW-%{<*5$xDT)1{?7~GkAS;p ze|?6GPN4IX@E7=~c!(t-l+u5;4mIOua4-qVW3Az2A0fPg`o?l4516LS`rB?PGXxBA zx-Qr^WqrO8#n1PLg+O(Oq?;DnFh7*FBn3z&ppcoLt!Gy37a$rn79^-1z zG{rEJ-_bbxqYdDn)1FNf;z0zkbGut^jX%HPL+AGifR3_1>29_!2S-@WNod+0J=6}` z)t?QYwI7ww;SaJr_ssu&AUm}|`zTvjeHZZe7n(O{##wUf;j~d{GkkB|1r?`{PMo8A z|BqGBR0h9+7`Y=dELzB8%PUI78AmafV_iw988cQC^hVgkp(aW&iN117&%`-Tbu`BHV>f!$3_oML({V zZHIEXxM3Rp4^#RHRn7w`Ad{+W-U8-A$>$Jy27^_-<~W@Ft54}H^W-<8G+1`m&x0nYdwFv!vZ~{p&G;FZe%;U{~+O-V}siLrT7-|7g2!7m_92zGMJ|>15Og*J>{5Y&6VDBam7quh?gweSQXqzU&H#JloN>{?aoc}j z=Rvz{i!|1p@|Q`v*0=_bZypp?1yK=I#KlSD#9^O_I-!Zvd43%n^~q1b`Kj-I}3#7XDB@ z+tp^iD1M0qFV*QP6Gv7tQruO)h)TW3G1M|?@~4}TfYZSU<{?f_5mM~Bl_`LX;U>eG ziUS(b|4xitiTf_ic}Q)(0vBJ-?|g{=mT?8UK63${+~C*)+0OPE7h`g4%6`xqS*7$J z9h=p)g|(c<$HPTF2F~EnuO2p4_$3bdgQ*CaLA2D8$?1GrH~I}Kpg@81ZeoS|Q$<^W z)CW74LvB!7G*TJS=*jJa-=L)X<^ax)r zQlx+v0F!WkdMOP=pvw_HY1J&;O!BXDe1`Xniv96@d3`VSLcmpQb%h_NYs_tQ5p{JD z2K?K^svnQmy3GZOJyU<{*_rldd%_TEe zG42$+zxAv;>z74S`@Q-|_E)qi&KYbzJAio35FqX{!i68kt7te=QMR+Q!{uZ7h*q(5 zRnop(tG3ehP--OmxXjJ*MUpvQ&?W+fMNRr5L% zyYLqrSG*D_7)_TzQVBy^L}cXI;74vn8V8Zbi7=oXVd8mzdI6;q8JTqICe-oF z;)Q(aMWgT(XEkVK**$E=LPABR`e?~ba5^y@#29MzL+Yn5%1ySR4{|!>+u$H0zFgJ- zHX(s++>l-Fq;f{No-FjK z>(W%?S{0E-)34*}>&ae!0tqppr0R1p7-G1u`qGw@N~?KP&9(y`2iMZ7kwCG%X^&9O zAo5JY2zP%)S*c)ooPkF1UY+T=D8CHTKniS_+@mx<6#`Q#wX(F$I-NDwR1P?Tv|P)afF}<-F3syZM4=$jd`#KretPsWzqG3wn1Q+szJd>o zh|FMekJv8F&u4`FzCjka%ZIAWnjmi0Ne{@=+5%eM=4`eB!l$&fbTVmw-sJCFr#myh zI!K=Xd`;o#cD7bigCOt0&x2AfGE$1*XUIV~pWQeJ(MQL9x7a)G%~*%5EZ5WYl$K-a zMnnzTyf4NBkx;{+%*Va>@rPD>1z0bG1KQ2xOrp{W= zz%a9Jv*nP9=)k$uepJE!Na8Yto)y#mxZ=y7Z_`ALLIIhK*XQc;*2`K>e|0Pj7rxw3 z*3bdArbH`No`?r9f9n|d5q|q$UAA`q@~@OW7R18M)_a~;inV^ui&CFF8!_PS7~~3$ zHuh_n9a;VI6SI7!-3Toqq1;i><(*nmAjTLz9)5%0v9T>*s=%PX(pD(#AYLfd=3uJ| z7C*+I@pm&U)jjR`TM|O@GKzf)7+5`3(6Nqd3QAoVpkQ(rR>EabOj(N(tju`~}^- zj7aZJN=qxfJMLJSkA%OM1uTucrk$u+Zqs_*&4ZWQf^=!MBDBw`1I15J(TgLtyXVl8 zYQ#?+JD7GC90~Os;GJx3V69U>O;PJdUuHWD;I7W=x45%3=%5f2UULcIQbf7+roF%F z5Nz~sVMf1!%{Om@%58Nose zO>MN-96xD8AEI~xoT(q#gG8#@EDt9L^b5Y(EV*iCuMIvb0Q2Ss+<}rH9k%+vQ7C9h ztrp=8i^k)?T9E`Nb+&jkNoKCSy<7I1Nl(_Qk>7Q+v9}C4%n>!xT4>a&$Z*t_tDL)B zw&4j{%46>U7g-JwcwH^c-}R6wEVM5z2{Ikx=1qxXBUxsTs%3Zq)dYx?rmDh}6b_Uw?`j%&LxU2)w`O-z5S+B3>8l zm}3jnRSpg^*EwohSQIv-RNfGVJrww|v`ngS*A1of-uVc+>Q&Jh+C+b*LB;u;BJSIo zFkPXUv96e1?td&|`_&!`6{k8jaBjqU^ja-S#%-4E85f#oU&hL+ChH-vFm$9q@?EwubI}$j`{!IS zs`D6_P%RriY$EoVo(j*l_2%`lEXr`?vV<@d+GsJC!qqz3Yq`0#DkY!WT7rwVhP2u= z8IqYzj7dYW^r$if7_Q=Lor`$O5G*&uV{I*+K9|B{>o;%zTVR&R_@Xx-p0Y(nRJAPG zVGNFKS{;syrM5Y~{8&DO{`Qo8Y8lZ&c~p>(^E^ zf#Pu(I7-jw0?~Y0{BF<303;^sw^U>Z!3+a2aCf7*mdOO{Ud~h3_v== zHVi@{OZu3jsAlvW5#Enp$$*ci-r!+h&)QmHrOm*4p^?dLfe%6>wVNa&g|!tr4BIOo4yfX2XV&6E?mto!?c~#anKg{n>QhX=iUs`Cp^yyi@OH zjFl}d6ZF1}uv8K2SEPe@>^=b^#O|bKg-DLdrQA}j^xd*oFJ@0|EV*X4sgae(*-&={@_7Fa#nm^)K|0$7aKztW!T7`;GF+=ihs`_*Asc&YIw4&&XUk;3?Jpi zr@kp2;^Uso=)fW146);TmjrB~63pTpg!NLCnbGXJ)#3oW{ha4%&MF7CB!BRLlNsR} z{S3Jq?_C;*h?s!}=Tp!FrGIH0NXtSJUT)r-1WhWWcWl?oEkD2ZJbc65Gp_?b8qMnl z-R;abOqZ9M2RvFEZDX`aN`fyA>MH2Y2HUl_ICu|8#f(W$PNunh@)~UPO!&Dol*A#I z!EmP~H%wq=Q9hen;C@Bw5}s*;^VbO29X*(b|NY@T6L9#d8XegK zb9VJSwkEKR4U4iyp`-6osv;LjhkJ$ZfPFFkXQfdDR4k|H-jJuTe$4@?u4z15$5_3K zcZSjF(vG<>!h5#}S@}5N$*;&^xx6}(O=FfW^*YMg?)Qts9N{@k4<&SqO8(VUr5$jc zWzgM$?;ewFe_LP6@o=5vR82IQo5e3=+i%Ix1_Izp3oL_f5iI}HaIAo z-S<=%QRIKs;Q;FR_{7DjET#20vB?IIe@P|fdM1C56FiXisrMJM5FXy-X0}f`mUfkK z#oxc5(LAJf|NWZ_utYO~0VZR~AV3b$t&3q-A8mHA0_wYpV2?Gfe}^_2(ibMmxcASI zd@yMJ?kY4=SnDqJLBwxthQhcMw>L%IMb4slC;AUqA(jF+t`=6dBeqp~buoicrRn^O zY8w#xcZv%&QqjgCx0OL1^45~+R~tUVUXm*wLOAtuh@4J(Q4z3o@anx_eL+T>G0Zl{}g!xH~i8E^ohE>eB;{{xh&Z>wZ%sE>SWTudU*mK<-Ow|{(oQbi{8g= z6}k$OD!#8T^e`joRlr~i3y#Q9A>io(+$ZkfuOI+B7n%CWQ6q!V6Wik>o{$_`8q550DDBfBp9d z5>(YLCG{>PgY+$1?~*Vh)Jx=3(bGs3TXOuS_lgx38sW3KLA;UugGmZY-_@-Rn&6hl z%hTmFbt_i`>+yHA}C^jgrd^jjUu3Qr-ULP-8HlxL{UJLjsXOQ96&mxA`aO-tCZ=EVxULfd>6`_#AV(PShV zyJGH9-g9ryaX-vvvw~X_YP+x0n^I#2xCNgjt4XAR%_k|bYwLTH`5YE>k~WO>)(na1aB_m#CpA57=!>O}V=En>#`PpkhN^Ngjr2MTn4PFR zK}UbhZNWx^gTarw$Vk}`cwcQLR4#xXP3iRMRAUt9i?=`mO)}?+36#OB%rj?~nf3uD6W?U$B-hoDv!o&ITQ8a+3Ui*s=kA5>!$x+Rxuq}Scuy~WP; zlI=zu>4!#y4#6joVE!^Z7S5y^K(_#liz9*Eb+av(PzGUD5K`o5mROR4tVn0L^y8LC zo8kkF7Pw@;Cd^vggJog$kp(DrH0lPQA=gHlD?q_%q2H66H=jbdw|@^^bL1Z#@rFaU zerC@YC7HEWW}Da8t9E(8P*wi7m5s7c(q7JjGbtD%ISotigOANR*!7;5^d>`83rOho z4*2Cx`{>%LxjX}DkDC1rqxlXoWM9rBpXic)&Q<>>7FG+d7)-$_xG(Rv2j}|U&(77& z{vf7flVks+)NsH$r)`=&`P)pp%7nGv+Bn{9S;3g*tni|lAGTsgXYcV;#c1)19I>6Q!xw7O^6<*vPt<`B;z^aQBP1A19WZ z6}DTB>OWb(y#xDku#e`+U#)$#-%+JRd(MH?DP^6ebigR~Na-llTei2q^VePy*#|M3 zgG_Vn>lwb0?#hOOI*uFxC|A{zm+}dI*qW{@BDwccqkz6DlDr!^(HosWv(U@9_$DaO{C{iFnUFq!6kNN20U*-Mv{ zI=h)_$Y}PDeG%M{YJ@x24di2fy>Z`xH1R*rFDp zSH5+NnVt2PoJRA*(Du48jjMRXQ7-~XGPl&?KY)#G`F^Q3<4(_kS)2SYVmR0_hvLw( z0UgNjccdP^`~zzm0M?|^6QtwXaySHTYJ+r>+Mulb@pj?BR)(>6Xx%3RVQk*WdqNt6 zOGh_60fv2HebaEI;k2lY^nTOC zYM4;Xpy4yrwJfw9!cvfCVU|uwayG7TAn#JnuAem91=zPyqB572VToCeMQNk&)OjOT zf!PG7rO80)=xQ7;Kqn@sYACnY^!=2uF>ii{BS9q6iS3Ii$*@90qRd*cg_wY6K^cRN zD1Q^1_y}qC4cJDJr4-1Yc3c>T;^P%fO7FjNoi+3^lmj0^cnOyb`eVD4UzmZ5Ows#n z#A6Y&KG%nXYJ!iw2DEs`}vOV`w5cTI(@b*E-uNh&8>U2|G=>@;RBQPSGX;g z!)UW1O;PsLM?&toJWgpZRo-@D`Uh(LrFpk=Es$EsLLZ~Ms}(`(+I26d4`LQg(tPUt zAI2NR_e<_o)R{1epObtfodmvT40Lf+AYtPfv>s;CLG~jT*T*~q3eBZprDA0c_Vr>p z#HR`Dn9P=R1RdeU=jgRT4Z0t$&6g+=63lv&qhqhu7irig;db8|L>Q7%t(t>=uQH>$I<(A=a))MZ9rq2LdRs}?`D~y5q(EU3D}&)b9IgGH(=Y3PLZANe zejAgZcE-n2BmV7tYoavkg_{997OFz~%JEo)3xlV>pl1ED$^32)_=2IE;wDUPeE(?+ zFXv#n{t`w$uaTsy43(`xf|m(Is{@M}CBb=+pl861f1)O4bh?12Fo4toFNw&Smz6U4?|Bze}@rTraN5>%Ekl5S@Q0`>H@S5 zvFThdG1&v$K6Qhg64jI5ofE|kuErgK+}c&XnRi#F>|3HLcx*RNQvy~^dhq=sQ|*r- zFUXz&F|ZL6CCA`!sZHH%LATlWNRl7X_BKfY7qFgq+EaMrU}YB9O@2}!UZjZMtAG!U z*ai)?^$)%G@shH=K%c4R7`M1Nno-JR_|YoNgYMNpL6c8Z9PBk`QJcWXTBPwnblj#lNZz+Q->>jX8r^nW-va9k$M@FZ)jsu2Irola?&bmWmR7&Cgg#EC~ zQy;jd(SWFN*k>A+d`-bjE6b;|kzSx1Vg+B#t!(TgI@*aCmWO}mzOejSMjfhnpq+X- zpx4R7g9V5d6{WzXmB$hX?wyqS>7Y7ay$BwgdXgEN#{_KbV3$X&Rx-gjBen+HySM)0 zxlx~0moXfL(MD$G9C;aU=E_}~<38elO{?tZhNcZI=;cdwkhpfb0z@fQ7B}bdAzNsG z1F>&iyPW$cPzNg=Gzld?&6Ss#<7n2+ApzQzhiaoT6PmFYl5&x z{)a>%&h(J=D<=Wn$uxr`z-Y?7uKSWBq$YOC;Ct3~@C51l?*RB5&HU`UUK9%K%{s$T zDa(8h2PzJHhU$|jVipYxuCFt6x~9kwYANuCThB10*mmuCi`AIWk~@r`3_ZSeDHvK- zOBqP(F&yy@BnWyKB?9J4x^v_$=B%u_^uZx0bis_SUL&{b)!VG?c+&3o-cNyIu1Sz# zVc-6>n|Fyr^{qV^I{VrqD;gX2v{Qi8@xAiwaWKl%$`SLM3y?w(PNenV>sLh(*J(d# zoF?$CreOZB851Xg(6#R?ZOmu7H6kr;lZ{Fk9QMk6RyHT3?6BcB`={O84v*kjoJ-8%^bMdwLzTgt>L|Z*0+Z{}vrOle=DoYB=F(~1SOi&71D3}7<*Io@`GebRd1M6#UV$F;;i&b>?E+sNE401w zQxdxI`n$l+^2gJWWmqphdtX0VFR6lX1OKbP-ngyB-#maR0Sq|T`8D3mO zbB7J2QyKVbBe;K^{KrfQ;GWT0CFkckwrVwZnZ(X%=r>~sF~a+iD-vnOAXwYfnaDZ4kVv$z20Ug!`RLVSv0ATNT2Dyb z{2P`5NZ2v1Zq9Uu+KlLe)I=!zwwKcCt+-snv`|B|O`1YijRMl9g5`oOdc3UPE^81= zOPZutXqZ92;#OEKxXj_mAJ9Q~`^hfXvQ_-_tgj?|WiPJ4&OfIx%csi-c$H=NQMQ%P zfU(us2cIr_{l)#LX~KZTiR_UX^6d_xg^4{_*rG`3mka*mAGMk47^dcX30~tHZgZ_A z3@yygS2Y(J@FSJYw(kzK=GP)bMuAJ7uH(%FeE?wn;s64NtOc?O|JIsk`iw$cs*(!f zr3A;-rB%j$V!H+4v-&&Y@LSKZ{Th5k*Diqo{T+)Z7KT1jf+P{|ND!euH#6zFyP9f{ z1{#*T6=%>3IU4%-D0>gOq4bVLWu3D*jQr2D9zT^^@T7VcH5~~sT*t@U9mKY|2BIH0 z7>s{dSy)(EA9LB%-E;fb=B2FT=lRmULu|Iub~|B5;RmW}8jE$C{Zfx~+Vr;OMU#F? z0OgnsyGjk!aEV18fYo}CE{FVEem26)ax_vb#WK$Sezu{6ctrYNL>7$g>eRhx+}Kp1*uCtk!2R1vC%EixBisk-t$&SN zL>K(xqpZSrV#Y~6FwIzUmL;6A4=S5Y$%9?xy%P&@5%fq^q|r$J{kA`qD%{XUxxttZ zc@eT;%rC&0`CDGG;k-f;u(9dB$aIIleSlLuxTXC}lm~E4AAvInrxPPpz@_}aOGh!i zcy;4X3I~4HfCaZKGnk73Uugy0ny7hF_610T8glSxg;-{0Qw-WrIa3N_e@%vQk_Jp+ z;Own1ka0GFs?#;8z^DjlzGklALLxW-^>5M16cH~1&Qh$;vIgL^^yeE;aoG1VLwAdV zyGO`L2yu5;g6?6PPSXSa|GddADB~q@aEgC*Nn$Dt8rF9(-g6A#lssrkMO6)Zo1q4! zFEbD9q?DsV{+9Wj8{dl3p7=W-W9t92zJZ%4^c+Y>uH68_d>iyZ8A!PVNGeewb{uyB zkGsgfHYzTgpXglQ_o$1VU?WVoO5&M1F1G)w)-#ZJv0YI}hM$*dumqzNqNO}uNT+xR z&{;(d9JBgCclnq(bAk#UH zCw~WXja&d;7adPwLEJ|U0$V#C;;7>g>oIu1WeP6j-%AiU1#Y<}{Ny4iZY@&*J)Vf` zIS-8>Fc}Jp1!($k>B{ei-{JZT1n+R&*#~#ZOoBV{tWd`VfM<6mBcwAjGR)?p=>Jxy zQ>Tr{I!vpzJ;Ug1(cm;*U84a-XXrhNKuty^syALh6Q}e7o}wY*H1&Vy^sWV!l%r?W zc_Z$JJtSH@4lu{b)04k&qT+(=Z}(I${eL7U2PDVJB$o^%=K)DBf(MJ6IDxL&nArU4 zqu)7oWMmLBBnyWue_r~Ri~8TH2!4F4xizmd^^UF)D4yO-azUT!J&R|-wE8)O!3LrU z@miieI?GDy0{<;JE)em#d{6DU0PsN*j4HB8z8g32@bmf=s=U0s7-3kc&f1&qbaalp zZEKWT8n6J0)9a|W0MZ0K=G1GqezjHu$|iZuA}7<6AR|Ng*qplL2}z^23{g-B)1+@ul3A{tc9hWaU>dB<#(Lh!QM8^{?2nqZfn+K7 z>_KkPXN+C$l?aYu)GWuKd#w*<=nUgx0qpy4WJ zaHjw=K}7}dNVjWMWXG#A4wl)AAGEA4LeCkkfE{*;yDLS|XF7R0{K;JjLO}fI^oHN~ zMqnTo{9sX9UK>}wn0yjcW=mb-#;UX{>U}nc?ldj27a4-<>4>gY3asr6S_AL*+E1Ph z8mtuAq~n*@2&0+LE39*o@74zLA+Chd;^60sN;v5WfsR*kx0yOAUKjP8xJ^{dzb%)Z znr*G4^8DIky6oSy{h3qorQ_t_*y8_JDVRX(4sQpj1J@H)Q!o^ek5uIOBMn2Pon!=? zKD({1J{oi1M8jMfP6bHA_y4HaGE|sD>Gyz|(qHHgmNcAFzeJw5{;*BcKl}6o@ACQpP@IQUK*&s*7%*Mu7{D!{E8~`Udp7{1sFJaRQR(5u! zifv4$XWfCOeWLKdN}`a#QY~!4ue!iiPque$2kd6btHJFAJy$jd7B-KtYRQHbx-eLE4Cq6&i+l8qFey}* zcvt^PloXAC@|4O|F+log2QcCBd*{=~OWaJUUu!4^E4^~ri4FYat=?oycY2xElHtPZ zrh*z(#}2qXp_cJTS#LXo-xELVg7cIcY^vqk|A1Z+z*Nk=yT0K-DR!Xl;mW@fIO#EF z1C@IQ+p(qximdl=F;l? z)V7=-;(Q!rl$EC7fmn0p5MaVSkQ1Xg4=6q0f`}ow(A}i)ih*2U8!Wd9;ESaSV*fU2q=G)aa)X&v`Hfc?kY;g!=s$bZxtN3Ri=bY6zm=5G$JL? zTIo#8(pagp-4xvY*y{Em>(yFEzNK z*9DdUpi2@o(S98mM)0Ekuv%*OmxAPQeSL@8cEM=TovD=?H42ruRI=f6bYB*{IJeek zRHXCuD~)@bm_7dkFLaJZle{r7cAV{lrC|U|v;T%L8qCBAWLSjTJH3*)7u6H$BHw9h zMMovsNwR)26bz7#1cn<(MsVQ*mHStG3Kmd?od{VOdE6I~R*+*5A?W4)y@1(+z1 zfPJa;%M>3Foy!KUrJp))!Y9J1O;5Z_mhmMa0(5ZceHJ*4zahd9QrJ@1n=5J374{`} zvsg_7knAXFQ>qY(ihs`0OXVxk`>F%po7S_zSj2+h3Tf z7e`%Lo;` zV{K+=>vLpr4m|7Fk$@SVqx?S{Z+k4eC7LZ}jV!bg@qj?pB(-(*`=0B*hl)3b)p)QR zIs1YW&{lHh=lj=uCl2;((;EAP!6$9S4>!3Ns-2r&ZVJocLqjNSw=is;U` z3{W#01o^T=;l(&a{GaF_k9&0=p;C?c*m`lSG-oD^E-@?B67<5^*%$t4jH;Ro4Poeq z7!a?sg}8ed`-oNyb^cSkQ><~*!X%Lk*k9XQPBW|qg=a|v;Kad7K12)VgK^LxQy?O|Z;2Pk;S zac+)4d(w-C`Wuax2p;jEiP?O$J7#^OSL~>RU(l~F8*dXlneVc+z@uk}6;5elDc%T# zcU8*L9Tv*phP4_CgkS%p3c~$KDBD75CmnaNWJrJ@6$4vt5fh{|?8NhLV)6}Ouy2fG z{?pOHXS4<$It59AZwg5cW<}6rMk>^^Q*@C%oG3{vzdArKnZ(aDKe1hzR30$oadPla z#z|l|ry1}_aeClX*5FwGz`&ruA##RNX7VTD43|kh#SCho@=?<#uHATkb4-6bcSV=D z29E_B-y^;Edz5>o;VVy^j@LJ(fe0>c$`?8wG7*&7~! zcdF8G+Q>DOb}b<|Jm10U+a(mIg2OuG}G*b3Cui~9fNAHetU204xTTz}pV*2_bnS88YbRp-e7&5H#*v({JHM3Fen}QxILO43HU-M^kzHl>zRMXw*?Zb;-f1Xpt|!{dIAPd0Fj#2*CZ~V)+6~z zpj9{)P*1;u3J^G~+jj$6QGkiGJoXhpMgdS6*C-B`1JnU!*q!;;;8+Avp#G5i#Ni>p zQEzA{!t}8FP2h7XyI6)~@pMRp$0Jcc3q`?Z&QQ!dsQ~6A{lmo;T96>d%U`-;{ISuQ z2hR=!Q~d}OWf_k&{T(B&<$%bVBh1K^;`h3N91aAymf6;TKxqfc1QzS3KS_treS1`g9u@@0`9p8sm&;mw)G2QyNUly%@U8C zR&nMsu1)k`qRT_>x6!w-TeM2MRsN~gl&6o#!015k5VQMW;_3{upHaou(R1(rN&bX@ zU@F2`u)sm#1HHN?@-zy^+L@#T7eda0!&1!xKq}0Zr3vD{h^y2mEwI16StD=+)XM4L ztGG)+J|R+6137v@pU~1L3P4uz{+_SFBGQKfxl@yA07tJp*jl1uPb0 z!0D%ePzxj$ffpa&Q*641NFr%}_tk$2e*N^qg!`h&o34WF$7 zN(@4wzN`dy*KrfP!4pVe0x3cTE?j(Bx2X%bvWh&-KEI0;#7X&3^Mup|ii4ec0I)Et zd?7zpV3vD~OtOY`*XY5hCNl_!!CvAmX7FW(( z3Pk*bwUEhyQcfHxi!{Y?p|K38NM10cQE5h8<++&*~4F}O@^@x ziOl@+5xQCZ$mG!rWyh!nX#y}9otd8!kf&M&Ial-R10@Q?(y; zz&>4qY*W1MvJ2U*x;@~TUOSy7iF+UT;cFlOQ~fGdFwFmz*GiFIqd-`Kk6MIGc*!Pz zNEbBkxWd3aBLKoU#Kh91E18 z4=NR~o>qjAZfYT(3Hs|rs2(F?m;`w7$B^O$fbB;o^sL3}KyxFUQ;#1l4Dh_lS$m!& zC=g2W{z(q`Z6)pCg%5>Np&I3HYCUlCU~!h9U{Lzr#h3qGCLlEr&}%MUpHrs=c=;aa zHcyyPE$(qqNEw^lQk!TfSCTR*lKH=F>+gbJgEP=%6T)LL9NYr-i4nGmdi>ZN%_?Td z-}2_yKyM|89xC;L4`_a_WP3bYMs-WP^;j;nv3YAQ*w&T+_72BO#uDN5?kCXEA7RH* z1^WtsPklFCM@*0m#`}$2l>9^mz(UgqMRjn4T@+9(2T+{w4V(yfvC?F6kavE2qWKN$ zK!gDKjY-JZ>Z#UGovan{C4dKJSFQJe4$=as9rAKwA;&?(pQ3ex6nO8}RWi2U75)$i zl6_CH0tgue$TIyxBZCO{I3Gw+T9o1Wp7jTswKeX&AUatzVAr`G2$CDH4w4cVf0w&e zL6ys;=cxue5C`ZQw`Jlh8R5hsL@5H(Hmn_Hy9t^=pk_Us%AyFF04d-=)X_ul{|KuJF!3ax?SpMgPQ(D4O%E1Kb(`!}B?U!jvXoAy zFOymd2Bv5fwGWBA8KNHuUA=tl*Cx|}MMS74f$}f&g_j03y69Z(>u<%*Uj)2`6HR3X zvk;4<#Qnh^DD9d!F9q)X#D+mp7p*%JQteY(epfFbsX{HUn}9N9iK3S?bqe~Re`!cn zR+bZ#m!AM3-w~RGc4`3wd;$Jt@v<}h#Gr6tImmhm|3FZBx}f8@L?!Mz9^@Aoz36d2 zr6j=n`n6}c?9Mk9&J*i68SK{OE7GnAlVrhmHa8G$ZE?quLPLv%jLOZ-8iX&vA)Em( z7P>+JiTt;S!2cbBdHzs={`S8O@+T<*avdP+psmSZ{_7{;S?9sC!mr#8y$2!zfc~V< z?jYo#JAzJ;Tob*~++y=Dt>fGjA=&S|HZb)2#}kt{7VzH{kU;D5 zYLiw_k@w?A%tbonRr8igq|)AR4Lu&mH2@ydFCTrl58NIUQ8He6VhBMk2w;Kz1WtP6 zm=`C~`8!!I0>$1a$(A5CoOnlBoOX&z9g^dCr&}5`|1(FY1aPBa_`>7gT!3ErH!#pj zL4uh(&>8IEs)od$Kr1D8_Df`F*lVo_R}?x!fu6+ISo^mU9Z#7c4aCXRmo6c|&P9T> zQ=E{F{kIrGO9*%dDtj9rt9^>p7$o~D3TegvI~&Z?hoBeax7J_dC?fwQJ^!bM|65`rAV6$;gfCwO9xo4IML3*I z5{FuYP_L1GO6E^H0QU=-a42ekA|gsBuo{6BrwMU}R;fGrPxUhML1p>MY0PwG(T6reoIe^DAt-of{2K09Q#6VAa1mM+>QRB~a;xkU%_~ z%06U&`|bwPU}|{l>7T3(XVFgYWa>52FH+IwsV0kAgX2t9*^&(*6wm_`sIn2UJ_Eg| zK^KxV&AHIO?I~yypd;z>uPh&EuO85Qzm{i;#|CoSX(PK7R@NalbJ8s|3fcHy-9m`B z&MPYi`bU!>0%eV*;90^6oj@IAsrrJ2ZM~N9a;^khEkjpqLc|T zZ@tKGpRgB3Z%@q&3f%5CV`71Xzn=a_>lnSjkDhrh&V#*fi=eV)QTQwMIO7n8L}o28 z{~z+gHK1VAtEV%3LDm70Uu<8zlgG`%4#d0`HP_P^|9<-aHM5+Y(A_zvm^Ihlxc^Q` zM0YZ|%5}5DXC=pTWt>LXA<29{)d^a*d9ZA%40kXU$qh%yz5DwS^re_w)7pYF@S#=@ zK}||fQUUw{?W=eG7Xzz7_;<(Jk=Yd&rv{o5T%%T|Q?M|63BK{O z2Ag?kw4Ps9%utCp7{nli8fAM@4N_{F3%UEl z6#c0}*tTmfZNqA%fDMOoO%XYgV!!A-@@PQHlVK=b&)?5z;$KdVsL@XhSVLpvqa~so z!{A(h2AY$oCyD883?eQ^3orykndCG7#=pOsv4Qvmb_~jZ%_$SXhDKy0mwin6v99B+6#Rz>1h}GR zZ&}WR7>mnn!3+cF_w%oUxxMM|`N?kcvzIhjT^15kqpFtHvz)SQZD?WW96_RfZd^2$ zTDPQopv3jf;a;W_oubNGn>uUhhf5SYcdt($RFpC_MoEv$EWZkrCG6?Rh-QaMZZ?7H zZD~{YhyEiBdSEu&Z*C<6^fV{5qf?Hbe5AH!I?=%&690>D`A3$!N3Brq6~jBU6fM?^ zJ;G&A2G%6-d1m)K(E=Yd;?IVT;n4;N+E+-;+8>%k9PK=YdoV1x6*`Du6?KSN9fM;6 z=m#@M+9%LIlG`O_YrCfi-L1U}>f`eFFDUOu$0jmOas+L-SvQkfp_R*kc0}RT-*W8s zd|>-}vDOi}-j{!&MC$n(VVd=^IsKS#WYb1d;t(A-5-#efm-s+4Bf)EVQn5xbc=ea*&v! zDdb!dhslg(ZLj-U#_aCT&kR9?u36g&d#r)cIs5H{T?BuD2H=;GjXL`DaCJuPi?3GZ z&j)*dF8Z2h#lkP%?mD*N?pn${9XN}^AKB0WoHdh$H`!MZQpiJ%$r8_ilmZS5BU4Ay z73DIp+d-V7)z)V5sC3>e=^TPy^L0XJ9~YnDte{BEmmxE)e*X{V@z(vdpV@V_CUo_D zkQQH;sST4?LpBY-2J!ZCaIP5K72BOfv67C;8r>OE$9@b*pFObFQ|r50rnbJj_d!*; zQ~5oKyfFRfxf9f|Q&^hm@u^NxA{C7#ri9 z#)rOKU9xz;Zj8=4mN}x+-_mv)89QWZtMxdkEX5#in9~i0oT;vKwu+`;T`4QrBoem$ zzI6+ZI#XJA2Wi{iTE^0EZrZ@oDoc%FYv&kSw?;8 z9g#^NkPpQ3Jy}Y@-l*8Z3_zI-EpV;w%BAHj$-hB_cZc(B!GsM(xVh!S3CD^HsDCBd!A=l+&r+ z@y8__z?X;B`GdpT+#UBD;nK6`=tB43#X0sId95=Oa<%)RAe!P>qDi(-y?61q5ozy3 zd=q>w*E=EGg{zCTI)m#S-Y5>5!XxBS&-xorSlx$nnlgVS+X5<1&Fq0o;VLKsK zPoCv`KDRD)G+S(iE}qFDRE^(lwSqnRMSvPuk=obAdNvbzck!>7B!1C_ZN=)X`;PUA z)UPPyYGvvM?VIo+BdANMJ>ss0t9g3gZ+VRmw!?ssqwP_awI>OI5|5y;bLI@4VYJn% zP{wjMd_xvq$ZOtaydpG)iz68+-{3Kgd&x3z>!;F7!k+eIJZQWxgM!w9lfdq(a=hbb zQwd}RQMP+>ui$3xz-xlUa)h%}1NeBG-Nf(rpCyJiOv=+w1mudC!rn)eY{wVFfv|k{2V{O-Q*~d>W=Eqhx?Whu}{X+M-{*(og%{yDF=m~cVxc4RyBt- z{r>7fh5Iypm$vHm-gLeq76Ds=+H#l_#)668qzEb%b|vL_a2jTVqkS-5Emv(OiZgTd zc70;1M^+V?;%Ia$YL?%88gv$z+V;)d-p2mSNi=J7mc8G20_tzoQYW%CLSKdHWT_?# zn75qu#rYe&n`Z517!-bD-%4+d+uFG0}f=27H+;=ZMaqUy^>F>1D#x(7)~b#MK9 ziC9>fVp9N{w#^4t-BMuMv)L9!g_I`GPn}3}-|q?*$2lQ9x0=-h`* zhCRIB=ltWRnOzxfpG_RF%Dvkbz3wXtwvFT~4?c-`bKpBQ0AE+G_k_E@zO-_{iCW~~ ze~s#(Z1;;>-2i({*g5%CtPSSd+PX9q_qy+n)e>4Z)7(^*Z+LZFB2~>#N;tpD1~CGl z-r#VZXW{HRTQpmamRo6cifcHR`1V6|o!@`^H3B=k5_&c;Xhn=Z`*HQE#lj6;%!FU z5YgP;PymsJq+0Dgh(kV`&_jw?yBrztpsH|RrDO}s+7&7Ng(t0X-IxJQ-FbNUb-0#w z-)$;OcAX>9L+3R;Z0mERH@%J_hynIE!e4REo{K-qE@RLww10w9JQE0MwkV4%z_)KwOy{ahP1cF0O=D2B#y>vFVq$D}>$f(C=f+G5 zy;R*bdE6C765Sj_Mo3GoEj#0e_^To^EBtdp(*lkN9C&cs7QX(YRlgFpx##(w#F<`V zTXoac-93^&dmQUmeOR(chms5(*82L7zNJC!*jJy;lYIb$@MP&YJ|AUrhu*>lZ>)5^ zp6xbves_;~Kt*$%MrA!|V7vFaVSri5tnKc$YWuF4(hnhH;e8L3_iVCD`FQmY3see# z@@>ybvAq;b(f@@?74SxyX4|wbQh^Z?DW|szm}Tn6K4Jngw&mWB_(> zO@U{xx;25;gLs9qo8IsH>Bn;16+AIc{k&zlqcX@m0qv07&c0I3U;dM3Ak8Cafs!v z>^XvG@{H_CQw-ks3cC_4%06xGtla694m%Uw@By8N3~cMj75c6Rwc}mZHhH zVY$FTyQ7natXo?!+B55Hg`?J2MyTu>wa@)q7ljW4TTnWOCdaxe>&CI(s2<-c&}#8x zFcp8mX)}{trJYo7>`EkQ*&k!@0X4$=Xs8A@yKN1RXfftfj1{VCShwOA&s&eGpV&7t zoJjpXd#Uc;x4~Ofg7#ykO4HK;5eLA>OLqH@tk-zZk0XskrplNt>IGjHI(y09%B1h= zoq8EAE09{YjTH_x@2Dac`CXI9WOfp{)l8bzQ3wt}{b+eNCn>Ovyd+!oc3eXB(dDqP z?ZXM;U4}W}geuJvhjF)+4!n|z}wV_$TEU}G~-gew{7pXX+ z0Gran<=4CJd|3O>Gr_pF)1px@eJBX0=dUkd}vv}2lfzuMsZB*^_X{arsRSIxPSVP7y*t&(=xYO7@NqN9!kl}hLIWd z7iCDg(BYLsjbtpPMrr`z;b656V8Y}w=oR#>Z7!@!bP#A+#!Htl=sXu|HY(_<*V*^q zes@M|pYL4WehN3Wm-@Y@Iyxjd**hEm;-yHAO^O+tu3sx`t}WW<@057!SbhVYP01W}r-?Mmbxd5+R3l{1o>Z{TZ=9I(_0ib+E-rTm^P&36QDJ2nt6~b24c^fq z&X3Q%9T_lRd$LdH)?tnGHa3(7MMbCu9T018dXC$fju*d9eR?fAe?dt+PAn!e*JyS; z#p;N~l=N2In%pbbhqvA)BY!a|6i21xor$(oWGe#g3^E|p&pLSZ0v&y=5iyR@pvX?S*?ct1dFTp*k#Irr#=maBE@wburK}O>}TcudIYi@{c|Fhl{ z^<7_|d+`REv4uzb_2iv|#<>-JJ1tn9kE5nXlIC~WsZ6G&=%uIGHuIuANxmN}gR+n} zRSsyd;(|V3DD~6{U5Ea0L^{Y9rzqiZe++@G9n}LFl>in~Z$;wGq-5q7k>g7sYE zKAN@Vi9Vv;xxK;rUG0&AHGxpwnX8Bw6a)VG&Po$&QT$-{{L#dCa)X?~QoyyNXRmID zvcLS{knlKW`hrdT{yWR~8<}IH#huGXHkR(}16#`WB@Xer<*$a}Rj~$a7Bk_N%ae6e zb&N*?-fx0Vzp#?IVmaw*!p%P;y09;SKeaxUlrC=L?&iDXU}m%W`MhDNYkc}ce=_A; z6diWaR(LXBb1ZXg_CAc<%$m2udS{$Vd9d60#8oI>G@Ya7>)387nR2{Te4T|b5+gjJ zA&`ZyWr>Cx)W(VhM-!vb7B;!c-93C(b#xMb`X57tzS@<OIR5^n2)EhEKn1?xk&+&zEDBXrHio%8_tZ?tN9BNC%_a zGP*){LRk&6Bkj;Hy&s{HlzpB_{%J$t>7o4K$2bFvWuJ@=V&%^+&C_4hopakSt}my1 z?dtWZd&{6KJ88`*)f!)`gh^T>_{VhbZYKGqK|(}NzxY)6wsxM|CP(M%nxL~<^X&Dt zNYvwcm%bzaEIl;KhFI%B4!%}1KHjG7ijvj2OchVR{kKB&!;4-+Ts+ZKQymu+&sj*h z%)}XHlDb&B&1elz{+LUo?_DYrds0KR+?8c)*5<~z>6_4i%BN?r$}<(c8dn_`m4RUJ zbhe9rT7KWRqN!bV`;iNCC!6?GVv?^Y4>bb=_qx^Re@H@Ky^2hk$dsLXf24{%dRXIo z+0=fhs$S!vc&HR_OmuItAd56VV?h>EY;u9;0ezl$5M$oH z$5cg)1C>`>zpxhRbdF5o3v$8fuO0ZT@MSX(JT0THWxegrLh))F-MgFx>&h=?6*7$t z+)bU7Bro)IS;sy1@XffztXclRH}|gICWouJSm>K_ii47IS1uHikD4-WZG%qE=Oh7} zJRelV^Y!35uIl|WK5*a+;swXG%oGnU=Z3S@X@zG@2E=8v=}3N6V_!$ATTyPOmN%WJX}7Hl-`M*a<6IVi@Qa&F^yojw%b{yJL8*|pV1fe=5cUg zUyn~xh2aBZy^%SVENT{Oe7jDmd5qgQD#dU1>ro2P+}F({u>eEifPR%%m#OG;;*8@ckD*O3h-8(Jm$+{V98fbb$)64K%46EimZhRx(H?i_3V zCXQ`(W?)FSv`4Hp!lm4DyA8R=*Cx*HZ}g`y%wf^2R)}2}vt01GHC?k%99d}6JE-mY zb<0R(VgsKW!}PTg$m>8(nP?Ntg;U<>`M#T-TO_zDGQ;(b~LiO#4u{ z*CwkV(K=Z$I>~{>)D$!eSF~p;B-t%=|H)KE<_ZkBa?NP3ZA3n>mDWW;B zAa4-sSIp{@Go&~R`aKEd=D8KbaHefGEQf8rfP11u5r6iqD@;;==iat(rGlWMPNadK>)Pmpg-^-G z%voIWe1cv3z?xvjn*u^UY>!K#~y zoa8lqcI+jFPrLe4Hn1^-^DW&|7%m%)UHRZxB+{k1?Pr@v3Cy*+EoE=f-y6qePvL$%aUjYs&)a z#-)qCboVq&>e5{75o(I5fR7c5YuWEx>E4Y8pRdq*zNseeSDQP&&ZY3{OL)YFZLa-M zi_UVU(4B&Sq2LDXOOC|FCq#}wM`8fhVcs8NfyxvTeiGxccqT>B<vq z#Y?d~qP!5le8+ou4?{H2J&aG=D`@^qknDq|?!(M3rZ1QNppw7s-t%Y%GZ<%%H_!TvknX({}{w|pBusH$_ zf>#8;&o%E(40{P_yUq=sXyyv)uoH5Mi~|{ET$}wP4Acm*!^s|YH1M61hQ}w{Ugqmh zKfWL5$8@$mVxOzlP@4?DvD|D)v`-lb9Ew2XF_koEkNz6N{6fpH?^wjj3j}C;_Amd8 z;z)3TS0MGCyupQbx74Msn|w`=atTX(Uttw>pJYg(S@vTzsQ-%umX?da#STk zDC5^J9crl^t+`Z3YZZvuJj>3}tDAN+Ya>__Lmk8}Nji>4SQ6?C>-Zuj2oR&yonN9D z2o4Xls-3P&zxjuk(P;=TPsp+Vv-DvRp6169E#2nCRQr(^;fjb^K4)1boXH>o(#6nhXnMJ`Dk`EE^1W2g6GT~7*q`OS5W)FV$%=$ zXecnl?Ce(hrkKNovr3q7_0;->=%OSQE?BcS;m7bqmWeL%HAbXFe+3W)ZP(65e~d zbaqLyO1gIuv7F-L7s;iD+08d8F0QNQpB7HmvhsIrzNlidUNx2ZYROc6y{Lt2L||mk zuhOI1@?g^s5u~KmIBPsnK7RS@*HH@dn2e-O-o=&>Bqy7SVlC&yKSJqGx(3AD_Fk5K z<`IqOM@lUoBxp^%LD{xX3?$i0XD*a@n{L%_IgCxDR=`d7URWMROQ|cGwJkgRgw>tb zuhJ9?keWCf5(NBI5VFS8Hi$$X`zyqMq^MEj)7$!bCQ1uD&1uD$4jZ<pGkm*jcJR z|K4_0zm$MYD$=?eEMs2Pj{e3*T?k)#Z-LVH%bY&%wd$Cs(gkM+XJ3Ya>Z{1r= z6|j&-$AH+`-?fzk2N7ppedO=&6>l>BevSX2@vRVyCl1}^?LXCh;y@`YB|r!jI}6f0 zK=mae@C617OS;YS@z<&)`=n;=ljyT3S(nykf_EL|o--03-Zq%UpL+J-HqS@w()sqbYKwZK;C998Y?{nmwn}}2JnyN>mA*UFoEK9+2a_7HhrVlF%o@F# zCGKI+Y5xDU_g!I4ZCkjCpaLQm&M{20j zOQ@oNw9rBeP3fVR&;xfSsC&E5Irs5CT_5t1n5;F|m~)Quk1_r^$@P7XoI%q2)~mLQ z9dJtX>?fMXPpI877O}qHubjFd5k6uUE9tBEVwVCjO)ICFVWFVKsaBgUt?hy-<}%+l zn{+Y%%rncly0J>|wt7VL%>svFWsxp}Wp)E2&R#*lRZI^kPj9a=)*{bH{=BEH^Htfn zKgQE^Sawn(%qy4G$Tf{>g=w^lWU?j3ah=P`+vBYhBTSIw7&xC`mzL||VMKSt1vDZl zP&rCxDo2Zo3TKI&BTa2VE#2^~Os_kyB3eHz%6>l=y1=@x8DBfepr6?=mL{^Ll)LrX z>U{R-+JLM4e)p0AM>V@fBg!bg$F0#api1LB3bD~sQZzqzSiV2?tMyE5hPMFD$jR&3 z(Z$^_!D$}B#;sXfzWlcDe^(V4(Z24lHacxkzG<~FLtr?1&r9kBqBP#_t`nb!vPV|N z`smIQK3*h)=7zV$wQkgz_#Kat@Etz=nm;q?j7mz3r)J)L5w5?`%k>wWAGx_z)q-HQ z>4eXE9%S3TVzobo>PRM6#@aj1A4WR9F@qUF)Z9-T$`Nk1m0D)F?Hy8WFTBx z^OY}&`d9x_$!2!cvEbH|CPI9sP-W*W&ls0ju^%qYMn;fZXn&K6u+`7carE>XaEvpY zR&R`J!-dOtJX~mP1MXey9#0*hIi*fzvl{%%H$ByO7nUB(LJR7zUcWvrCoi`D@t)y& z0D&(F_LNQ9xQWs%rsl(WB@zPyg6MG9cwTR&aMc}FhNi(Wk8;2u`o$K*D-%5w2r4lb zVF+d}$~vncsU;pu8&eXys<|(YR&hC^k?L#qqKidLDxYebW*PZ;U~+NtjJCOGR^gOP zS<~RhTMU(&9Ex9ehcLkG^=sx}y_rgj7%42rI6MY^7kuTjN*i?$X*FUa<2a z(y4nttvu>bMd(<{`H= z@&N=Ya81b|T1dP3C{=d?lpE#5bg9H)<%h1Vc%}XuMfEVhdm4MD zLG8^=Z9Q{kc$juXbRcV7KGZE%K{V~=N(A|x7i1rb_t<_H7FIEV**$OeMiP`G%ba53 zn;)x^tjT*U#zr*dqjZg!sc%}q%KWesB+QNyv%otJ5#V3?vEh;fC)tgc>J;nJxfI2 zdv{rUk@{z=)-Ul*FxOEH4wpqXV$9YkJLG0>w2otNB%O=rqB{gY$u;QKUAz_yU#Pa5xnIr`%p z(p5A}Kpmxp$@~@?_=z7ig;E^k?jyQKOW~Tc$|1RrzoKP z+tLP)vlpv>n^?R#N=DQn2W#o~<1@X#CT;BRN!Z^2c!;M`V&zZ>aN$93KZ`EhYP?}9 zQZ74n29@d55kkQMRq1{4+`6X7bxMTqK=$l?B&|_`9iY`@Z5R~2wu6F&dgHZ!NEi5F zh6ML`mJW1JosyQ8q>J>w+n1dv^{ywiPTxS~d(q|ZQ3G4fsRtanFmc*DfVX2gFOMP@ z%B{L1Gxo=|4v~~QRneF$J>eouojd2KBMdRJp4ENCk+DQ>R=prQ&U*Q|tt9`!?7q(g zbkT~Q80n-|#BTUnz_bAY5j@ z@+n3oRw~>3Ds_4T+JHu@#CqYIn!v%87LI@Us^Ga^8X7A8&nts*m2m^jQ}O~qXi)k@ zyM(Cej)*q11|(%F&*%rn592cpsWGsS1C0t3Cmh1zve*`yO6VN>=hQRFF@eU>R!pJ7 zJPV5!LY>uu_`fT6YSKtFf{jLUtAlJD?+uNNGD>)P$BTbV(mnY3;Hkc%yZpX){xvFh zPQIluZ9)4j2i$arEI1*hxvQ$3U|a6JC-dUuL(hfg^gzAYXjgcXzgAXh0%wBEeeW+Z z!o&KNP)4HF&CoP|RPq8Fq)PVjcA%z`O%^jVGYtbnY?^oC6T+fpqTEz6#@_$o4uN}Q zKFZ06$jZuHhzXf53y>&P-~ybe+s}M0SA%UiFa4F}F_8mx>({fQ@452=pZgfOFa!XI z4GAt<&n0_X;X;KMcuJ#;%=@--niVoB0pU)_obk8%fp)#!&H_anUwnpWeUN%kOE#%3 z(=tX1;uMeP`H2`c#R$m#ro1>1Yoi7MSZh)xtSLX)8gmj2OyGiMN$yc$Q6G&IHjm7q zRo%Fzf;UrZb0Os0KFuT%rge(XtsWxJYi666Vz#A5mWm7m;nVy^!8Z@nW2{Q`Gy^|n zbBR2gNz!Eqce~!9R{s|L7JdK~>XR&U0nMtUMR9UHJFrXt?k2YbT&Y|~>| zPv@EVOzm$GdTwgYl1N^NEU$zfdsj7+ca5&{)10#M$fj*Mf7mlltNm}!__@LWqA&M! zZOou6do`;wRZc7dBr<|$42ASb;#9?R>&ydFk?RHtA4m+U-t z>tD_LI}oc7Y63_fK72?+OKXZ__vmJq>&~2#t4!khW2s8lo?+tC`ZO8c0ScZsDBMhq zvAvrk5LXQI)go^BVy^Y}k-%Q1L+J`Hj?y*VptR0Xf#``C2xKOUDlJZ|>46w1s@<%R zgo3u%P)LUBY=vU%-i2j*IrKbsO)n&*+j3iCz|*063Umwnu}SfTwSmvb)XcfC zlp%Udy=-*$Xau>%*3K4E>!lJ=h{G(vN_$Q|jr)hP1EV2LEj>n<^u<%`d@zK}YFuZC zK{EqZ6@oAE=y_sDZpyFg(ZaT)7C1+*h}ZH?B{?tivxurBcM%KCIBk?ylg%rM3> zx=KVsDopo5SWIV}7Nnxz*txpg@ujl_at-3W+NPAl zTK=g1?XHyZ?cYqzM8UY_)l5@MwtoNzN5$;1(L zvbJHXjS9Pr?kjcWY}tF7XZLs8pJW^1+3{%|-gX<&Z;2CC#RVqHbt*9KcPke7SXz?y zIAv(dCBS9{-o%m9MAm3NqaF~soD0%^tlKQB@jbcA)nYk8))MNr8N=h7<-H};#0`;~ zHYjQ+^a17OS*7}@y0dR&*kyeFiSv1ht~n|98{Y*FI(0>W0*ot2J?PNA0Z?{g`(nWUmFhTaw{jsmNL+{t@zP=eCLzKGd#;^Y5( z85JXuy{gjPA{WqI?d$vQqDZH!sER5fnDAn|XH24DsV$;Bzw$DNDku88(~X^E(DWfS zUs^mKXHTP38aXyw0#)kRv6gi8!Y|>xoqV5p@@HB9Os|gV4WPBR=6oIs6Yqr}ootKE z1Sfi!*C9*9Dm_Y}k-88kc6o<5y8J#B$<^*cxCEFrxHBRL!C;m|=D zRWN_Gp>V`S%TvO0^^;RcZN355T6@J+hTQYOY|?s93Qz0+QVyzp)2y)~pkFLxG`BCA zHK1bKTjO0EImPgSu3AVZDmy9Swjp6YRsAH1lQlW3kDIB^Eb^a%@$>5iCSs+WBN24y z-jgPyPy(4NzHg*4lJhI!UFW>b!Za5bk6L2#gd67D1+x-|E%!sZ=JR&GNDhhCxxeV{ zQr=H$6m8q-3N++5G_sAFXV=eK3gg#5er~<`^SX<^t8o%rE<9D@>ITBqO~^T?UuDn7 zr+8w8zs~4YiN#73stB(W^AYQWW+)PE5;hNq&KV?8_uSUUTwBj^O%sLMn~FUrm;h`! zJbbJY$AT8m%Cn^-I$rK3|M1}o|?nLezKNBlVBBBuPcMT~7GqZ8e!9He{Q^r-{-YLO>DZq)ND>wdr{7 zS0qfv59RTdFPMy{qD)vshu6>)Q*+w*9OQPDRW@ei@I6nR1 z@`^jeX@For`GM8^%J93j`#_z%0PbXNCR zjA0MTCOF@%m0;OBpYM4{=}+$2OUe$#t}nX z-YCu?sfMkX+l-J<4HAG8jgqZd%|*9}_&Y7Y9~Qs>?^9yEFX zGM;YIAY?oCp4jvMST>=CEcNZ1{*w#vue{dH9Ska%3Apuye^|#es=X9OZL#?C6HT2l z?*vzRwTl3@nr1tvwD;pMti0&<1uEq7<&w5A#i9Bnsld)4v2Y#UFb+M%E<0cO%U_$y zuBW(1m~QgsP9s}avrk6SK8$yA*8pyg*+Amqf>*$AG*!1Sew{k9ob!~#A9?LjF@>Q1 z*8H86Iplfeg-*-CW);s&{!BDLQ~5e#RqOKbk_0WG-37yqo&Yks$Jic%(B74Jc0DK499S1P_mc;?!Tlj}s{Nn@(*=4;giJA44HukahL6>9Z?018^O9UTXI44DX%?nD~rZ-;2?{C_<*Cf zBsODHwgNYOL3jqZk|H@rg$~sbGFGP=y$=_}8WoteCI_5{n9u2GJv3S!lu!Ff0`PM! z*Bt-Aei@w|aKLJIo?x3keoHuT+_$uANYl>Tu!p8lo?lsH`uCkpN%GF~SPU;#XZr5% zQBY8<bUj%V^Wf1 znM?+8@Q>wz(hyS|KTv#3X{0}XBujWYWZc01sR~a$zRV{iX}Cd6_t1br-0joL-PL`+ z17|TCEW-+1nln-QPuV|NhB~cT$Xpb8^t(f!C64R_Q&3VCK<@?=rcW6vN zIgy;ic6v*7kZe`;d@i8i#SCoeVR5lFB8LOh2xSX6<|au#sAqYsAv@2-qVra+ z%ibIVD1(`G_2F1C>&I90`^+wcJ^kl}@9~k6kR(SzQqHhuEw{yr z^$yhY^7E_9$jFe@`Kb}+SCRmHH?X~3AWrtU>S0IrMQ~&{f#0Cv5Et1o>&~$tt_r=P zT@j1h2P7iDM0`I``ddiI)`og8bA$$^DRCo+13<2+ZdCAqXhPbI#Ef@MjYZz7uKpeL zMuC;p9aLPxKLJK?tt*r?ygX#bjJ9QokjE0i&zBT3bs~al2~5Y8;>2}VCtEriUoon? zA=EoxP?A09dkZ=d&Et zO5HvB;odwy1f96eUCve-s9)kgGy8AM$8=bzgp-Ys^zrjsB##lh${@_ddgV?-kQmOl ziCT(1UFW(rQmS}&wGxriKr|r05VOXJh6UEySX%A%rEDHKX0lhR_-*9sJoX7XXxu?W zD+s~MEIybc20D$3TE}d0&$Y9yV#B66;jjGj#9aCUo587;+(vOT715q)-edK}_`J-9 zw+-E){goCH*NSGqts{5(x*uU3-K9J~Nr^ys>@yqs=*?H-s_#$5l*TJx8g#G5RPqjH zH^eSpa2cTcoQv+f_FVdbzsvr)++RKtO;+^p`8q3+_Xj2nET~ ziNA;hP&KCHF(NDsVrKX^y2VOrEVqTV%%5sZ)@pZo;IevIu;|bs*|&pkr_L1kc$Yu$ z*oF&k9R-h7LYc+JfhQP=7*< z(YlIRzt{I%Qg2M+laT5JFECd22vO9@K|UEA@O<9ysp)`Q7HEUfteEIq6Tuy zp+w4MEQ-&&94AuC)9)Sc>RRRSV=8c~!fL}&Bq@IJTYRlX5PD(tB{E)rPEsC@%FI=c zUs&7vG%U5xc4z1k5(S@hd^t2_?kb5w?ob|wlig{ZqIJAfuE?tR;S=;O6&?SVh4mkz z#2phs0Z>{%bIdR5hW*6-sCrv%TQzBizeUIrmlm~|hL`eDzK3nLTjUbtyJ8>#Crd3i z8qziO4-c`tTCj$T)yN9d%IVDFro^rNh=}2dPWV8sa$tN;-8n~qA5o2W<_9XpG(NO2 zW|Zz6s#}nGjii1!&uBc}eMH3d((_2)Q-CdKzLPhNQMe2fUyjbO3S)BwJ|%<0poix-U!k`ituekjsZ zbphzfjP%Ur?(XG+tAzmih11?fbb$(DuB=-YqFWr+sWSysMl)VN$YQJAhV_C!6x%o? z4wzb5==JWWU zo6sR7F}c#F1LFOKt-TSIUe6bU%%}#oIRWLs3Fj@ z?j==V)$kng}(D6!WcG@a@~1(%ko4 zL^r0zm1|ymwx<*+td?7r_wmtE9kuoAtyeYrgC4Z zJ+bqUqHS|Q?41t|tu)J_R#AFeayy+F>GWhMUM@h;<1^{}0J7KAS4rZ?$B%+v=ECA( z?>PP{a7bZOin9?jlHJB>+Vk^z92P-y=10%DK$k`C9F|e?!^oZfbvzFBgk-Xk)^{mk zmS<;+<42UWdQO_;OD6PwgB>P(Z$~w%;xuj+(_7%2=_p|tB(b!r;r5Qc2W8GxHm%x% zMf2pcx-#!(Wb&XxWiGGw9+rvON(IyItTjcAG)9h^(~e^YpMMWIR~@evg1_EeVzPib9$L-2>brMw{go zwQJP}D@nGBy>dfOyfdc6Xnzk6aTsqrvHEUr>U;yIRz|W14tx4_KKjZLpu0vrNhE0_ z`D^2*^u-ec(Hh0GVxWd;Ar|L5;4D5Nqe`qL){43m!ZTSbm0cCAQcW{I1T^#Ujg0YITD*H;}x>Qj)o7*>htbj6R~YO?4s-J z(hR34(7%D4@XA&;n$68^ZJwwwVh`{;e|)^VnzzE3-HDhuWQrgKNjv9}BG=jAm-sHV z$OO$P_`ZV&zQofUaVOF5S|0w)Ml^tQ=;Yxi(fZhla8+|Xd2G#xZx&C@rG_Cx6zZ&3 z3z{-I$YQwt%T$VNNl~Yj<2@$`I$*2I@tgDGJI`R1BUooS*?Dr zMymNsSNi(?cQt&OefF4^<894%>RJUh@uKaTko1tMnLU|Q=VU6SG72;2$l<#?DvY17 z{D%e33bXvnmHkJ|3T{ZIK!8WgSUdeC9q+fb>T{(|<00{4In|)+!#YLn_P4B0{t|@> zNY%Nng|nW{*j>x8%KW#4d}Hsvo6#%OMQ1U;4>$O5P~?;|Nb$`?L8f%!VH#|3v}%?g z=N)^2f5lUO-kpfem|0hV!n^MdJ!Uq$hA7^T7d`>bt-VBr5}(%@Gt@q6rKGEAUYv;JqEsinWP zhXtLARqp8}pHfNxn%0(?rqqs5+p?xTqoX31%l@%z3I*(ku&sE7E~sY@CDdx7?R{iQ z?f?3tvf%gie+EbIO<`VVBMDWm0304Ww0U{utkyNd;qC1)oB(8D!h^0$;4uaayr^A#<(n+@USWdk`BE--mm zE#KXK(0sE2N9HP*IT&WzsrPW4jO?o5u8GxB0;5}>M-pIA_3U{xV(d0&D0SM zdDKOX>Ps(mFiWgy#b~=?=q=W?hVv3>?ie2zF9|X5ESu&KEh}`%-mWwVqg$3k#9*m| z6iV4*pVUHomAc|Wce=$YqCx5SRbQ#<`(@+~8J*Q9?K{RH@zy(e!gA=D4AAgZ2#qxj zZ{aKf(D`g%KPm1fc8-Y!*ts6&1_0x4ZuA6cf}CZsY2$xxu)o7lcV5C;S%KAVCpI)d z6UH8P%FPR(%MIJ>#2Qz1e~ zOCi)fu0CC4Y(5!fzkP(q#Ll(R{9+9FJeF&YXAy4D` zFv$7FhyJ!)hd2J5o~D+WjE>$r9l~TZ6!)YPKL=I^&~P&QR%aE3r39M3@BbT2AIuPk z4#(6wk9J>1QzK^A2W`a8nYU(>^Or@p=xV&!-PJ14gt!L0ClE6KIl#Z)uILbOH+LJ^ z)xY=dfBQ>>9VBdyT}M#-U#Hh)G?v1qo literal 0 HcmV?d00001 diff --git a/docs/core_docs/vercel.json b/docs/core_docs/vercel.json index d573c16d6e7b..236cab60f05d 100644 --- a/docs/core_docs/vercel.json +++ b/docs/core_docs/vercel.json @@ -82,4 +82,4 @@ "destination": "https://langchain-ai.github.io/langgraphjs/troubleshooting/errors/MULTIPLE_SUBGRAPHS/" } ] -} +} \ No newline at end of file diff --git a/langchain-core/README.md b/langchain-core/README.md index 8c537e2a7a9c..21423ca7d287 100644 --- a/langchain-core/README.md +++ b/langchain-core/README.md @@ -83,7 +83,7 @@ Streaming (and streaming of intermediate steps) is needed to show the user that Async interfaces are nice when moving into production. Rather than having to write multiple implementations for all of those, LCEL allows you to write a runnable once and invoke it in many different ways. -For more check out the [LCEL docs](https://js.langchain.com/docs/concepts#langchain-expression-language). +For more check out the [LCEL docs](https://js.langchain.com/docs/concepts/lcel). ![LangChain Stack](../docs/core_docs/static/svg/langchain_stack_062024.svg) diff --git a/langchain/README.md b/langchain/README.md index d1e6717a268f..73f56d956129 100644 --- a/langchain/README.md +++ b/langchain/README.md @@ -34,7 +34,7 @@ LangChain is written in TypeScript and can be used in: - **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.) This framework consists of several parts. -- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://js.langchain.com/docs/concepts#langchain-expression-language), [components](https://js.langchain.com/docs/concepts), and [third-party integrations](https://js.langchain.com/docs/integrations/platforms/). +- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://js.langchain.com/docs/concepts/lcel), [components](https://js.langchain.com/docs/concepts), and [third-party integrations](https://js.langchain.com/docs/integrations/platforms/). Use [LangGraph.js](https://js.langchain.com/docs/concepts/#langgraphjs) to build stateful agents with first-class streaming and human-in-the-loop support. - **Productionization**: Use [LangSmith](https://docs.smith.langchain.com/) to inspect, monitor and evaluate your chains, so that you can continuously optimize and deploy with confidence. - **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/). diff --git a/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb b/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb index 6f43bc0455f0..5a27e5839758 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb @@ -1,225 +1,225 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "67db2992", - "metadata": { - "vscode": { - "languageId": "raw" - } - }, - "source": [ - "---\n", - "sidebar_label: __sidebar_label__\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9597802c", - "metadata": {}, - "source": [ - "# __module_name__\n", - "\n", - "- [ ] TODO: Make sure API reference link is correct\n", - "\n", - "This will help you get started with __sidebar_label__ [text completion models (LLMs)](/docs/concepts#llms) using LangChain. For detailed documentation on `__module_name__` features and configuration options, please refer to the [API reference](__api_ref_module__).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "- TODO: Fill in table features.\n", - "- TODO: Remove Python support link if not relevant, otherwise ensure link is correct.\n", - "- TODO: Make sure API reference links are correct.\n", - "\n", - "| Class | Package | Local | Serializable | [PY support](__python_doc_url__) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`__module_name__`](__api_ref_module__) | [`__package_name__`](https://npmjs.com/__package_name__) | __local__ | __serializable__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name__?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "- [ ] TODO: Update with relevant info.\n", - "\n", - "To access __sidebar_label__ models you'll need to create a/an __sidebar_label__ account, get an API key, and install the `__package_name__` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "- TODO: Update with relevant info.\n", - "\n", - "Head to (TODO: link) to sign up to __sidebar_label__ and generate an API key. Once you've done this set the `__env_var_name__` environment variable:\n", - "\n", - "```bash\n", - "export __env_var_name__=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain __module_name__ integration lives in the `__package_name__` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " __package_name__ @langchain/core\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0a760037", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and generate chat completions:\n", - "\n", - "- TODO: Update model instantiation with relevant params." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a0562a13", - "metadata": { - "vscode": { - "languageId": "typescript" + "cells": [ + { + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: __sidebar_label__\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# __module_name__\n", + "\n", + "- [ ] TODO: Make sure API reference link is correct\n", + "\n", + "This will help you get started with __sidebar_label__ [text completion models (LLMs)](/docs/concepts/text_llms) using LangChain. For detailed documentation on `__module_name__` features and configuration options, please refer to the [API reference](__api_ref_module__).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "- TODO: Fill in table features.\n", + "- TODO: Remove Python support link if not relevant, otherwise ensure link is correct.\n", + "- TODO: Make sure API reference links are correct.\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](__python_doc_url__) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [`__module_name__`](__api_ref_module__) | [`__package_name__`](https://npmjs.com/__package_name__) | __local__ | __serializable__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name__?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "- [ ] TODO: Update with relevant info.\n", + "\n", + "To access __sidebar_label__ models you'll need to create a/an __sidebar_label__ account, get an API key, and install the `__package_name__` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "Head to (TODO: link) to sign up to __sidebar_label__ and generate an API key. Once you've done this set the `__env_var_name__` environment variable:\n", + "\n", + "```bash\n", + "export __env_var_name__=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain __module_name__ integration lives in the `__package_name__` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " __package_name__ @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:\n", + "\n", + "- TODO: Update model instantiation with relevant params." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a0562a13", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "import { __module_name__ } from \"__full_import_path__\"\n", + "\n", + "const llm = new __module_name__({\n", + " model: \"model-name\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "- [ ] TODO: Run cells so output can be seen." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "035dea0f", + "metadata": { + "tags": [], + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "const inputText = \"__module_name__ is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n", + "\n", + "- TODO: Run cells so output can be seen." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "078e9db2", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e99eef30", + "metadata": {}, + "source": [ + "## TODO: Any functionality specific to this model provider\n", + "\n", + "E.g. creating/using finetuned models via this provider. Delete if not relevant" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `__module_name__` features and configurations head to the API reference: __api_ref_module__" + ] } - }, - "outputs": [], - "source": [ - "import { __module_name__ } from \"__full_import_path__\"\n", - "\n", - "const llm = new __module_name__({\n", - " model: \"model-name\",\n", - " temperature: 0,\n", - " maxTokens: undefined,\n", - " timeout: undefined,\n", - " maxRetries: 2,\n", - " // other params...\n", - "})" - ] - }, - { - "cell_type": "markdown", - "id": "0ee90032", - "metadata": {}, - "source": [ - "## Invocation\n", - "\n", - "- [ ] TODO: Run cells so output can be seen." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "035dea0f", - "metadata": { - "tags": [], + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.11.1 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + }, "vscode": { - "languageId": "typescript" + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } } - }, - "outputs": [], - "source": [ - "const inputText = \"__module_name__ is an AI company that \"\n", - "\n", - "const completion = await llm.invoke(inputText)\n", - "completion" - ] - }, - { - "cell_type": "markdown", - "id": "add38532", - "metadata": {}, - "source": [ - "## Chaining\n", - "\n", - "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n", - "\n", - "- TODO: Run cells so output can be seen." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "078e9db2", - "metadata": { - "vscode": { - "languageId": "typescript" - } - }, - "outputs": [], - "source": [ - "import { PromptTemplate } from \"@langchain/core/prompts\"\n", - "\n", - "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", - "\n", - "const chain = prompt.pipe(llm);\n", - "await chain.invoke(\n", - " {\n", - " output_language: \"German\",\n", - " input: \"I love programming.\",\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e99eef30", - "metadata": {}, - "source": [ - "## TODO: Any functionality specific to this model provider\n", - "\n", - "E.g. creating/using finetuned models via this provider. Delete if not relevant" - ] - }, - { - "cell_type": "markdown", - "id": "e9bdfcef", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all `__module_name__` features and configurations head to the API reference: __api_ref_module__" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.11.1 64-bit", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" }, - "vscode": { - "interpreter": { - "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/libs/langchain-scripts/src/cli/docs/templates/text_embedding.ipynb b/libs/langchain-scripts/src/cli/docs/templates/text_embedding.ipynb index e9e9f602ff76..b5d950b75b37 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/text_embedding.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/text_embedding.ipynb @@ -1,228 +1,228 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "afaf8039", - "metadata": { - "vscode": { - "languageId": "raw" + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: __sidebar_label__\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9a3d6f34", + "metadata": {}, + "source": [ + "# __module_name__\n", + "\n", + "- [ ] TODO: Make sure API reference link is correct\n", + "\n", + "This will help you get started with __sidebar_label__ [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `__module_name__` features and configuration options, please refer to the [API reference](__api_ref_module__).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "- TODO: Fill in table features.\n", + "- TODO: Remove Python support link if not relevant, otherwise ensure link is correct.\n", + "- TODO: Make sure API reference links are correct.\n", + "\n", + "| Class | Package | Local | [Py support](__python_doc_url__) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: |\n", + "| [`__module_name__`](__api_ref_module__) | [`__package_name__`](https://npmjs.com/__package_name__) | __local__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name__?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "- [ ] TODO: Update with relevant info.\n", + "\n", + "To access __sidebar_label__ embedding models you'll need to create a/an __sidebar_label__ account, get an API key, and install the `__package_name__` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "Head to (TODO: link) to sign up to `__sidebar_label__` and generate an API key. Once you've done this set the `__env_var_name__` environment variable:\n", + "\n", + "```bash\n", + "export __env_var_name__=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain __module_name__ integration lives in the `__package_name__` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " __package_name__ @langchain/core\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "45dd1724", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and embed text:\n", + "\n", + "- TODO: Update model instantiation with relevant params." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ea7a09b", + "metadata": {}, + "outputs": [], + "source": [ + "import { __module_name__ } from \"__full_import_path__\";\n", + "\n", + "const embeddings = new __module_name__({\n", + " model: \"model-name\",\n", + " // ...\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "77d271b6", + "metadata": {}, + "source": [ + "## Indexing and Retrieval\n", + "\n", + "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", + "\n", + "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d817716b", + "metadata": {}, + "outputs": [], + "source": [ + "// Create a vector store with a sample text\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", + "\n", + "const vectorstore = await MemoryVectorStore.fromDocuments(\n", + " [{ pageContent: text, metadata: {} }],\n", + " embeddings,\n", + ");\n", + "\n", + "// Use the vector store as a retriever that returns a single document\n", + "const retriever = vectorstore.asRetriever(1);\n", + "\n", + "// Retrieve the most similar text\n", + "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", + "\n", + "retrievedDocuments[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "id": "e02b9855", + "metadata": {}, + "source": [ + "## Direct Usage\n", + "\n", + "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", + "\n", + "You can directly call these methods to get embeddings for your own use cases.\n", + "\n", + "### Embed single texts\n", + "\n", + "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d2befcd", + "metadata": {}, + "outputs": [], + "source": [ + "const singleVector = await embeddings.embedQuery(text);\n", + "\n", + "console.log(singleVector.slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "1b5a7d03", + "metadata": {}, + "source": [ + "### Embed multiple texts\n", + "\n", + "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f4d6e97", + "metadata": {}, + "outputs": [], + "source": [ + "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", + "\n", + "const vectors = await embeddings.embedDocuments([text, text2]);\n", + "\n", + "console.log(vectors[0].slice(0, 100));\n", + "console.log(vectors[1].slice(0, 100));" + ] + }, + { + "cell_type": "markdown", + "id": "8938e581", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all __module_name__ features and configurations head to the API reference: __api_ref_module__" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "typescript", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.5" } - }, - "source": [ - "---\n", - "sidebar_label: __sidebar_label__\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "9a3d6f34", - "metadata": {}, - "source": [ - "# __module_name__\n", - "\n", - "- [ ] TODO: Make sure API reference link is correct\n", - "\n", - "This will help you get started with __sidebar_label__ [embedding models](/docs/concepts#embedding-models) using LangChain. For detailed documentation on `__module_name__` features and configuration options, please refer to the [API reference](__api_ref_module__).\n", - "\n", - "## Overview\n", - "### Integration details\n", - "\n", - "- TODO: Fill in table features.\n", - "- TODO: Remove Python support link if not relevant, otherwise ensure link is correct.\n", - "- TODO: Make sure API reference links are correct.\n", - "\n", - "| Class | Package | Local | [Py support](__python_doc_url__) | Package downloads | Package latest |\n", - "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [`__module_name__`](__api_ref_module__) | [`__package_name__`](https://npmjs.com/__package_name__) | __local__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name__?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |\n", - "\n", - "## Setup\n", - "\n", - "- [ ] TODO: Update with relevant info.\n", - "\n", - "To access __sidebar_label__ embedding models you'll need to create a/an __sidebar_label__ account, get an API key, and install the `__package_name__` integration package.\n", - "\n", - "### Credentials\n", - "\n", - "- TODO: Update with relevant info.\n", - "\n", - "Head to (TODO: link) to sign up to `__sidebar_label__` and generate an API key. Once you've done this set the `__env_var_name__` environment variable:\n", - "\n", - "```bash\n", - "export __env_var_name__=\"your-api-key\"\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```bash\n", - "# export LANGCHAIN_TRACING_V2=\"true\"\n", - "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```\n", - "\n", - "### Installation\n", - "\n", - "The LangChain __module_name__ integration lives in the `__package_name__` package:\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " __package_name__ @langchain/core\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "45dd1724", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "Now we can instantiate our model object and embed text:\n", - "\n", - "- TODO: Update model instantiation with relevant params." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9ea7a09b", - "metadata": {}, - "outputs": [], - "source": [ - "import { __module_name__ } from \"__full_import_path__\";\n", - "\n", - "const embeddings = new __module_name__({\n", - " model: \"model-name\",\n", - " // ...\n", - "});" - ] - }, - { - "cell_type": "markdown", - "id": "77d271b6", - "metadata": {}, - "source": [ - "## Indexing and Retrieval\n", - "\n", - "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our RAG tutorials under the [working with external knowledge tutorials](/docs/tutorials/#working-with-external-knowledge).\n", - "\n", - "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document using the demo [`MemoryVectorStore`](/docs/integrations/vectorstores/memory)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d817716b", - "metadata": {}, - "outputs": [], - "source": [ - "// Create a vector store with a sample text\n", - "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", - "\n", - "const text = \"LangChain is the framework for building context-aware reasoning applications\";\n", - "\n", - "const vectorstore = await MemoryVectorStore.fromDocuments(\n", - " [{ pageContent: text, metadata: {} }],\n", - " embeddings,\n", - ");\n", - "\n", - "// Use the vector store as a retriever that returns a single document\n", - "const retriever = vectorstore.asRetriever(1);\n", - "\n", - "// Retrieve the most similar text\n", - "const retrievedDocuments = await retriever.invoke(\"What is LangChain?\");\n", - "\n", - "retrievedDocuments[0].pageContent;" - ] - }, - { - "cell_type": "markdown", - "id": "e02b9855", - "metadata": {}, - "source": [ - "## Direct Usage\n", - "\n", - "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embedDocument(...)` and `embeddings.embedQuery(...)` to create embeddings for the text(s) used in `fromDocuments` and the retriever's `invoke` operations, respectively.\n", - "\n", - "You can directly call these methods to get embeddings for your own use cases.\n", - "\n", - "### Embed single texts\n", - "\n", - "You can embed queries for search with `embedQuery`. This generates a vector representation specific to the query:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0d2befcd", - "metadata": {}, - "outputs": [], - "source": [ - "const singleVector = await embeddings.embedQuery(text);\n", - "\n", - "console.log(singleVector.slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "1b5a7d03", - "metadata": {}, - "source": [ - "### Embed multiple texts\n", - "\n", - "You can embed multiple texts for indexing with `embedDocuments`. The internals used for this method may (but do not have to) differ from embedding queries:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f4d6e97", - "metadata": {}, - "outputs": [], - "source": [ - "const text2 = \"LangGraph is a library for building stateful, multi-actor applications with LLMs\";\n", - "\n", - "const vectors = await embeddings.embedDocuments([text, text2]);\n", - "\n", - "console.log(vectors[0].slice(0, 100));\n", - "console.log(vectors[1].slice(0, 100));" - ] - }, - { - "cell_type": "markdown", - "id": "8938e581", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all __module_name__ features and configurations head to the API reference: __api_ref_module__" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "TypeScript", - "language": "typescript", - "name": "tslab" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "typescript", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/libs/langchain-scripts/src/cli/docs/templates/vectorstores.ipynb b/libs/langchain-scripts/src/cli/docs/templates/vectorstores.ipynb index 72965b603427..f3a4714a0f62 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/vectorstores.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/vectorstores.ipynb @@ -1,367 +1,367 @@ { - "cells": [ - { - "cell_type": "raw", - "id": "1957f5cb", - "metadata": { - "vscode": { - "languageId": "raw" + "cells": [ + { + "cell_type": "raw", + "id": "1957f5cb", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: __sidebar_label__\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "ef1f0986", + "metadata": {}, + "source": [ + "# __module_name__\n", + "\n", + "- TODO: Add any other relevant links, like information about underlying API, etc.\n", + "\n", + "This guide provides a quick overview for getting started with __sidebar_label__ [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `__module_name__` features and configurations head to the [API reference](__api_ref_module__)." + ] + }, + { + "cell_type": "markdown", + "id": "c824838d", + "metadata": {}, + "source": [ + "## Overview\n", + "\n", + "### Integration details\n", + "\n", + "- TODO: Make sure links and features are correct\n", + "\n", + "| Class | Package | [PY support](__python_doc_url__) | Package latest |\n", + "| :--- | :--- | :---: | :---: |\n", + "| [`__module_name__`](__api_ref_module__) | [`__package_name__`](https://npmjs.com/__package_name__) | __py_support__ | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |" + ] + }, + { + "cell_type": "markdown", + "id": "36fdc060", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "- TODO: Update with relevant info.\n", + "- TODO: Update minimum version to be correct.\n", + "\n", + "To use __sidebar_label__ vector stores, you'll need to create a/an __sidebar_label__ account, get an API key, and install the `__package_name__` integration package.\n", + "\n", + "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " __package_name__ @langchain/openai @langchain/core\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __env_var_name__ environment variable:\n", + "\n", + "```typescript\n", + "process.env.__env_var_name__ = \"your-api-key\"\n", + "```\n", + "\n", + "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", + "\n", + "```typescript\n", + "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```typescript\n", + "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", + "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "93df377e", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "- TODO: Fill out with relevant init params" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", + "metadata": { + "tags": [], + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "import { __module_name__ } from \"__full_import_path__\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const embeddings = new OpenAIEmbeddings({\n", + " model: \"text-embedding-3-small\",\n", + "});\n", + "\n", + "const vectorStore = new __module_name__(embeddings);" + ] + }, + { + "cell_type": "markdown", + "id": "ac6071d4", + "metadata": {}, + "source": [ + "## Manage vector store\n", + "\n", + "### Add items to vector store\n", + "\n", + "- TODO: Edit and then run code cell to generate output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17f5efc0", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "import type { Document } from \"@langchain/core/documents\";\n", + "\n", + "const document1: Document = {\n", + " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document2: Document = {\n", + " pageContent: \"Buildings are made out of brick\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document3: Document = {\n", + " pageContent: \"Mitochondria are made out of lipids\",\n", + " metadata: { source: \"https://example.com\" }\n", + "};\n", + "\n", + "const document4: Document = {\n", + " pageContent: \"The 2024 Olympics are in Paris\",\n", + " metadata: { source: \"https://example.com\" }\n", + "}\n", + "\n", + "const documents = [document1, document2, document3, document4];\n", + "\n", + "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "dcf1b905", + "metadata": {}, + "source": [ + "### Delete items from vector store\n", + "\n", + "- TODO: Edit and then run code cell to generate output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef61e188", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "await vectorStore.delete({ ids: [\"4\"] });" + ] + }, + { + "cell_type": "markdown", + "id": "c3620501", + "metadata": {}, + "source": [ + "## Query vector store\n", + "\n", + "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", + "\n", + "### Query directly\n", + "\n", + "Performing a simple similarity search can be done as follows:\n", + "\n", + "- TODO: Edit filter syntax and then run code cell to generate output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aa0a16fa", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "const filter = { source: \"https://example.com\" };\n", + "\n", + "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", + "\n", + "for (const doc of similaritySearchResults) {\n", + " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3ed9d733", + "metadata": {}, + "source": [ + "- TODO: Update page with information on filter syntax\n", + "\n", + "See [this page](__LINK__) for more on __module_name__ filter syntax.\n", + "\n", + "If you want to execute a similarity search and receive the corresponding scores you can run:\n", + "\n", + "- TODO: Edit and then run code cell to generate output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5efd2eaa", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", + "\n", + "for (const [doc, score] of similaritySearchWithScoreResults) {\n", + " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0c235cdc", + "metadata": {}, + "source": [ + "### Query by turning into retriever\n", + "\n", + "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. \n", + "\n", + "- TODO: Edit and then run code cell to generate output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3460093", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "const retriever = vectorStore.asRetriever({\n", + " // Optional filter\n", + " filter: filter,\n", + " k: 2,\n", + "});\n", + "await retriever.invoke(\"biology\");" + ] + }, + { + "cell_type": "markdown", + "id": "e2e0a211", + "metadata": {}, + "source": [ + "### Usage for retrieval-augmented generation\n", + "\n", + "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", + "\n", + "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", + "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", + "- [Retrieval conceptual docs](/docs/concepts/retrieval)" + ] + }, + { + "cell_type": "markdown", + "id": "069f1b5f", + "metadata": {}, + "source": [ + "## TODO: Any functionality specific to this vector store\n", + "\n", + "E.g. creating a persistent database to save to your disk, etc." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f71ce986", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "8a27244f", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all __module_name__ features and configurations head to the [API reference](__api_ref_module__)." + ] } - }, - "source": [ - "---\n", - "sidebar_label: __sidebar_label__\n", - "---" - ] - }, - { - "cell_type": "markdown", - "id": "ef1f0986", - "metadata": {}, - "source": [ - "# __module_name__\n", - "\n", - "- TODO: Add any other relevant links, like information about underlying API, etc.\n", - "\n", - "This guide provides a quick overview for getting started with __sidebar_label__ [vector stores](/docs/concepts/#vectorstores). For detailed documentation of all `__module_name__` features and configurations head to the [API reference](__api_ref_module__)." - ] - }, - { - "cell_type": "markdown", - "id": "c824838d", - "metadata": {}, - "source": [ - "## Overview\n", - "\n", - "### Integration details\n", - "\n", - "- TODO: Make sure links and features are correct\n", - "\n", - "| Class | Package | [PY support](__python_doc_url__) | Package latest |\n", - "| :--- | :--- | :---: | :---: |\n", - "| [`__module_name__`](__api_ref_module__) | [`__package_name__`](https://npmjs.com/__package_name__) | __py_support__ | ![NPM - Version](https://img.shields.io/npm/v/__package_name__?style=flat-square&label=%20&) |" - ] - }, - { - "cell_type": "markdown", - "id": "36fdc060", - "metadata": {}, - "source": [ - "## Setup\n", - "\n", - "- TODO: Update with relevant info.\n", - "- TODO: Update minimum version to be correct.\n", - "\n", - "To use __sidebar_label__ vector stores, you'll need to create a/an __sidebar_label__ account, get an API key, and install the `__package_name__` integration package.\n", - "\n", - "This guide will also use [OpenAI embeddings](/docs/integrations/text_embedding/openai), which require you to install the `@langchain/openai` integration package. You can also use [other supported embeddings models](/docs/integrations/text_embedding) if you wish.\n", - "\n", - "```{=mdx}\n", - "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", - "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", - "\n", - "\n", - "\n", - "\n", - " __package_name__ @langchain/openai @langchain/core\n", - "\n", - "```\n", - "\n", - "### Credentials\n", - "\n", - "- TODO: Update with relevant info.\n", - "\n", - "Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __env_var_name__ environment variable:\n", - "\n", - "```typescript\n", - "process.env.__env_var_name__ = \"your-api-key\"\n", - "```\n", - "\n", - "If you are using OpenAI embeddings for this guide, you'll need to set your OpenAI key as well:\n", - "\n", - "```typescript\n", - "process.env.OPENAI_API_KEY = \"YOUR_API_KEY\";\n", - "```\n", - "\n", - "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", - "\n", - "```typescript\n", - "// process.env.LANGCHAIN_TRACING_V2=\"true\"\n", - "// process.env.LANGCHAIN_API_KEY=\"your-api-key\"\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "93df377e", - "metadata": {}, - "source": [ - "## Instantiation\n", - "\n", - "- TODO: Fill out with relevant init params" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dc37144c-208d-4ab3-9f3a-0407a69fe052", - "metadata": { - "tags": [], - "vscode": { - "languageId": "typescript" + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" } - }, - "outputs": [], - "source": [ - "import { __module_name__ } from \"__full_import_path__\";\n", - "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", - "\n", - "const embeddings = new OpenAIEmbeddings({\n", - " model: \"text-embedding-3-small\",\n", - "});\n", - "\n", - "const vectorStore = new __module_name__(embeddings);" - ] - }, - { - "cell_type": "markdown", - "id": "ac6071d4", - "metadata": {}, - "source": [ - "## Manage vector store\n", - "\n", - "### Add items to vector store\n", - "\n", - "- TODO: Edit and then run code cell to generate output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17f5efc0", - "metadata": { - "vscode": { - "languageId": "typescript" - } - }, - "outputs": [], - "source": [ - "import type { Document } from \"@langchain/core/documents\";\n", - "\n", - "const document1: Document = {\n", - " pageContent: \"The powerhouse of the cell is the mitochondria\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document2: Document = {\n", - " pageContent: \"Buildings are made out of brick\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document3: Document = {\n", - " pageContent: \"Mitochondria are made out of lipids\",\n", - " metadata: { source: \"https://example.com\" }\n", - "};\n", - "\n", - "const document4: Document = {\n", - " pageContent: \"The 2024 Olympics are in Paris\",\n", - " metadata: { source: \"https://example.com\" }\n", - "}\n", - "\n", - "const documents = [document1, document2, document3, document4];\n", - "\n", - "await vectorStore.addDocuments(documents, { ids: [\"1\", \"2\", \"3\", \"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "dcf1b905", - "metadata": {}, - "source": [ - "### Delete items from vector store\n", - "\n", - "- TODO: Edit and then run code cell to generate output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ef61e188", - "metadata": { - "vscode": { - "languageId": "typescript" - } - }, - "outputs": [], - "source": [ - "await vectorStore.delete({ ids: [\"4\"] });" - ] - }, - { - "cell_type": "markdown", - "id": "c3620501", - "metadata": {}, - "source": [ - "## Query vector store\n", - "\n", - "Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n", - "\n", - "### Query directly\n", - "\n", - "Performing a simple similarity search can be done as follows:\n", - "\n", - "- TODO: Edit filter syntax and then run code cell to generate output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa0a16fa", - "metadata": { - "vscode": { - "languageId": "typescript" - } - }, - "outputs": [], - "source": [ - "const filter = { source: \"https://example.com\" };\n", - "\n", - "const similaritySearchResults = await vectorStore.similaritySearch(\"biology\", 2, filter);\n", - "\n", - "for (const doc of similaritySearchResults) {\n", - " console.log(`* ${doc.pageContent} [${JSON.stringify(doc.metadata, null)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3ed9d733", - "metadata": {}, - "source": [ - "- TODO: Update page with information on filter syntax\n", - "\n", - "See [this page](__LINK__) for more on __module_name__ filter syntax.\n", - "\n", - "If you want to execute a similarity search and receive the corresponding scores you can run:\n", - "\n", - "- TODO: Edit and then run code cell to generate output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5efd2eaa", - "metadata": { - "vscode": { - "languageId": "typescript" - } - }, - "outputs": [], - "source": [ - "const similaritySearchWithScoreResults = await vectorStore.similaritySearchWithScore(\"biology\", 2, filter)\n", - "\n", - "for (const [doc, score] of similaritySearchWithScoreResults) {\n", - " console.log(`* [SIM=${score.toFixed(3)}] ${doc.pageContent} [${JSON.stringify(doc.metadata)}]`);\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "0c235cdc", - "metadata": {}, - "source": [ - "### Query by turning into retriever\n", - "\n", - "You can also transform the vector store into a [retriever](/docs/concepts/#retrievers) for easier usage in your chains. \n", - "\n", - "- TODO: Edit and then run code cell to generate output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f3460093", - "metadata": { - "vscode": { - "languageId": "typescript" - } - }, - "outputs": [], - "source": [ - "const retriever = vectorStore.asRetriever({\n", - " // Optional filter\n", - " filter: filter,\n", - " k: 2,\n", - "});\n", - "await retriever.invoke(\"biology\");" - ] - }, - { - "cell_type": "markdown", - "id": "e2e0a211", - "metadata": {}, - "source": [ - "### Usage for retrieval-augmented generation\n", - "\n", - "For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n", - "\n", - "- [Tutorials: working with external knowledge](/docs/tutorials/#working-with-external-knowledge).\n", - "- [How-to: Question and answer with RAG](/docs/how_to/#qa-with-rag)\n", - "- [Retrieval conceptual docs](/docs/concepts#retrieval)" - ] - }, - { - "cell_type": "markdown", - "id": "069f1b5f", - "metadata": {}, - "source": [ - "## TODO: Any functionality specific to this vector store\n", - "\n", - "E.g. creating a persistent database to save to your disk, etc." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f71ce986", - "metadata": { - "vscode": { - "languageId": "typescript" - } - }, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "id": "8a27244f", - "metadata": {}, - "source": [ - "## API reference\n", - "\n", - "For detailed documentation of all __module_name__ features and configurations head to the [API reference](__api_ref_module__)." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file From 3673a58e10cc137477e15b5a9d0e85f2bae5d895 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 19:55:09 -0700 Subject: [PATCH 040/100] chore(deps): bump langchain-community from 0.2.9 to 0.3.0 in /libs/langchain-community/src/vectorstores/tests/faiss.int.test.data in the pip group across 1 directory (#7121) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../src/vectorstores/tests/faiss.int.test.data/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/src/vectorstores/tests/faiss.int.test.data/requirements.txt b/libs/langchain-community/src/vectorstores/tests/faiss.int.test.data/requirements.txt index 48c48d373c6d..532c1a53510f 100644 --- a/libs/langchain-community/src/vectorstores/tests/faiss.int.test.data/requirements.txt +++ b/libs/langchain-community/src/vectorstores/tests/faiss.int.test.data/requirements.txt @@ -1,2 +1,2 @@ langchain==0.3.0 -langchain-community==0.2.9 \ No newline at end of file +langchain-community==0.3.0 \ No newline at end of file From 81c865ab8ebfc40c3201be4ff61781e01f16b440 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Wed, 30 Oct 2024 22:54:11 -0700 Subject: [PATCH 041/100] docs: Remove dead link (#7124) --- docs/core_docs/docs/concepts/index.mdx | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/core_docs/docs/concepts/index.mdx b/docs/core_docs/docs/concepts/index.mdx index d01291666ab4..709e31ef9af1 100644 --- a/docs/core_docs/docs/concepts/index.mdx +++ b/docs/core_docs/docs/concepts/index.mdx @@ -43,7 +43,6 @@ The conceptual guide does not cover step-by-step instructions or specific implem - **[AIMessageChunk](/docs/concepts/messages#aimessagechunk)**: A partial response from an AI message. Used when streaming responses from a chat model. - **[AIMessage](/docs/concepts/messages#aimessage)**: Represents a complete response from an AI model. -- **[streamEvents](/docs/concepts/streaming#streamevents)**: Stream granular information from [LCEL](/docs/concepts/lcel) chains. - **[StructuredTool](/docs/concepts/tools#structuredtool)**: The base class for all tools in LangChain. - **[batch](/docs/concepts/runnables)**: Use to execute a runnable with batch inputs a Runnable. - **[bindTools](/docs/concepts/chat_models#bind-tools)**: Allows models to interact with tools. From 73c538a930f0936f3c1103a5bc154232793fb074 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Wed, 30 Oct 2024 22:56:29 -0700 Subject: [PATCH 042/100] docs: Fix typo (#7125) --- docs/core_docs/docs/concepts/architecture.mdx | 6 +++--- docs/core_docs/src/theme/RedirectAnchors.js | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/core_docs/docs/concepts/architecture.mdx b/docs/core_docs/docs/concepts/architecture.mdx index 85465bcf5bde..203340e55a0c 100644 --- a/docs/core_docs/docs/concepts/architecture.mdx +++ b/docs/core_docs/docs/concepts/architecture.mdx @@ -44,13 +44,13 @@ Key integration packages are separated out (see above). This contains integrations for various components (chat models, vector stores, tools, etc). All dependencies in this package are optional to keep the package as lightweight as possible. -## @langchian/langgraph +## @langchain/langgraph -`@langchian/langgraph` is an extension of `langchain` aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. +`@langchain/langgraph` is an orchestration framework aimed at building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows. -:::info[Further reading] +:::info [Further reading] - See our LangGraph overview [here](https://langchain-ai.github.io/langgraphjs/concepts/high_level/#core-principles). - See our LangGraph Academy Course [here](https://academy.langchain.com/courses/intro-to-langgraph). diff --git a/docs/core_docs/src/theme/RedirectAnchors.js b/docs/core_docs/src/theme/RedirectAnchors.js index 0de000e37977..9a852cc03e2c 100644 --- a/docs/core_docs/src/theme/RedirectAnchors.js +++ b/docs/core_docs/src/theme/RedirectAnchors.js @@ -12,7 +12,7 @@ function RedirectAnchors() { "#langchain": "/docs/concepts/architecture/#langchain", "#langchaincommunity": "/docs/concepts/architecture/#langchaincommunity", "#partner-packages": "/docs/concepts/architecture/#integration-packages", - "#langgraph": "/docs/concepts/architecture/#langchianlanggraph", + "#langgraph": "/docs/concepts/architecture/#langchainlanggraph", "#langsmith": "/docs/concepts/architecture/#langsmith", "#langchain-expression-language-lcel": "/docs/concepts/lcel", "#langchain-expression-language": "/docs/concepts/lcel", From b7e61fb3d7e25e8b90073f46221cea3e534b861b Mon Sep 17 00:00:00 2001 From: vbarda Date: Thu, 31 Oct 2024 13:50:01 -0400 Subject: [PATCH 043/100] docs: use canonical loop in streaming concepts --- docs/core_docs/docs/concepts/streaming.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/core_docs/docs/concepts/streaming.mdx b/docs/core_docs/docs/concepts/streaming.mdx index a7e1f2a21a46..2a89d2ea23b7 100644 --- a/docs/core_docs/docs/concepts/streaming.mdx +++ b/docs/core_docs/docs/concepts/streaming.mdx @@ -59,7 +59,7 @@ The type of chunk yielded by the `stream()` methods depends on the component bei The `stream()` method returns an iterator that yields these chunks as they are produced. For example, ```typescript -for await (const chunk in await component.stream(someInput)) { +for await (const chunk of await component.stream(someInput)) { // IMPORTANT: Keep the processing of each chunk as efficient as possible. // While you're processing the current chunk, the upstream component is // waiting to produce the next one. For example, if working with LangGraph, From 90692bab1954eb7a28b93521d6c9119936637b29 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Fri, 1 Nov 2024 17:19:41 -0700 Subject: [PATCH 044/100] feat(google-genai): Add support for search retrieval and code execution tools (#7138) --- docs/core_docs/docs/concepts/streaming.mdx | 14 +- .../chat/google_generativeai.ipynb | 370 +++++++++++++++++- .../src/language_models/chat_models.ts | 2 +- libs/langchain-google-genai/package.json | 2 +- .../langchain-google-genai/src/chat_models.ts | 74 +--- .../src/tests/chat_models.int.test.ts | 160 +++++++- libs/langchain-google-genai/src/types.ts | 10 +- .../src/utils/common.ts | 90 ++++- .../langchain-google-genai/src/utils/tools.ts | 123 ++++++ .../src/utils/zod_to_genai_parameters.ts | 2 +- yarn.lock | 9 +- 11 files changed, 771 insertions(+), 85 deletions(-) create mode 100644 libs/langchain-google-genai/src/utils/tools.ts diff --git a/docs/core_docs/docs/concepts/streaming.mdx b/docs/core_docs/docs/concepts/streaming.mdx index 2a89d2ea23b7..bc725f12ec8a 100644 --- a/docs/core_docs/docs/concepts/streaming.mdx +++ b/docs/core_docs/docs/concepts/streaming.mdx @@ -60,13 +60,13 @@ The `stream()` method returns an iterator that yields these chunks as they are p ```typescript for await (const chunk of await component.stream(someInput)) { - // IMPORTANT: Keep the processing of each chunk as efficient as possible. - // While you're processing the current chunk, the upstream component is - // waiting to produce the next one. For example, if working with LangGraph, - // graph execution is paused while the current chunk is being processed. - // In extreme cases, this could even result in timeouts (e.g., when llm outputs are - // streamed from an API that has a timeout). - console.log(chunk) + // IMPORTANT: Keep the processing of each chunk as efficient as possible. + // While you're processing the current chunk, the upstream component is + // waiting to produce the next one. For example, if working with LangGraph, + // graph execution is paused while the current chunk is being processed. + // In extreme cases, this could even result in timeouts (e.g., when llm outputs are + // streamed from an API that has a timeout). + console.log(chunk); } ``` diff --git a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb index 39d017da6ddc..f3d0e81ed3ce 100644 --- a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb +++ b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb @@ -428,6 +428,374 @@ "console.log(toolRes.tool_calls);" ] }, + { + "cell_type": "markdown", + "id": "9049ee37", + "metadata": {}, + "source": [ + "### Built in Google Search Retrieval\n", + "\n", + "Google also offers a built in search tool which you can use to ground content generation in real-world information. Here's an example of how to use it:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "43da673d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The Los Angeles Dodgers won the 2024 World Series, defeating the New York Yankees in Game 5 on October 30, 2024, by a score of 7-6. This victory marks the Dodgers' eighth World Series title and their first in a full season since 1988. They achieved this win by overcoming a 5-0 deficit, making them the first team in World Series history to win a clinching game after being behind by such a margin. The Dodgers also became the first team in MLB postseason history to overcome a five-run deficit, fall behind again, and still win. Walker Buehler earned the save in the final game, securing the championship for the Dodgers.\n", + "\n" + ] + } + ], + "source": [ + "import { DynamicRetrievalMode, GoogleSearchRetrievalTool } from \"@google/generative-ai\";\n", + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", + "\n", + "const searchRetrievalTool: GoogleSearchRetrievalTool = {\n", + " googleSearchRetrieval: {\n", + " dynamicRetrievalConfig: {\n", + " mode: DynamicRetrievalMode.MODE_DYNAMIC,\n", + " dynamicThreshold: 0.7, // default is 0.7\n", + " }\n", + " }\n", + "};\n", + "const searchRetrievalModel = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-1.5-pro\",\n", + " temperature: 0,\n", + " maxRetries: 0,\n", + "}).bindTools([searchRetrievalTool]);\n", + "\n", + "const searchRetrievalResult = await searchRetrievalModel.invoke(\"Who won the 2024 MLB World Series?\");\n", + "\n", + "console.log(searchRetrievalResult.content);" + ] + }, + { + "cell_type": "markdown", + "id": "6cc5f529", + "metadata": {}, + "source": [ + "The response also includes metadata about the search result:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ae5ab86c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " searchEntryPoint: {\n", + " renderedContent: '\\n' +\n", + " '

\\n'\n", + " },\n", + " groundingChunks: [\n", + " {\n", + " web: {\n", + " uri: 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AZnLMfwvs0gpiM4BbIcNXZnnp4d4ED_rLnIYz2ZwM-lwFnoUxXNlKzy7ZSbbs_E27yhARG6Gx2AuW7DsoqkWPfDFMqPdXfvG3n0qFOQxQ4MBQ9Ox9mTk3KH5KPRJ79m8V118RQRyhi6oK5qg5-fLQunXUVn_a42K7eMk7Kjb8VpZ4onl8Glv1lQQsAK7YWyYkQ7WkTHDHVGB-vrL2U2yRQ==',\n", + " title: 'foxsports.com'\n", + " }\n", + " },\n", + " {\n", + " web: {\n", + " uri: 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AZnLMfwxwBq8VYgKAhf3UC8U6U5D-i0lK4TwP-2Jf8ClqB-sI0iptm9GxgeaH1iHFbSi-j_C3UqYj8Ok0YDTyvg87S7JamU48pndrd467ZQbI2sI0yWxsCCZ_dosXHwemBHFL5TW2hbAqasq93CfJ09cp1jU',\n", + " title: 'mlb.com'\n", + " }\n", + " }\n", + " ],\n", + " groundingSupports: [\n", + " {\n", + " segment: {\n", + " endIndex: 131,\n", + " text: 'The Los Angeles Dodgers won the 2024 World Series, defeating the New York Yankees in Game 5 on October 30, 2024, by a score of 7-6.'\n", + " },\n", + " groundingChunkIndices: [ 0, 1 ],\n", + " confidenceScores: [ 0.7652759, 0.7652759 ]\n", + " },\n", + " {\n", + " segment: {\n", + " startIndex: 401,\n", + " endIndex: 531,\n", + " text: 'The Dodgers also became the first team in MLB postseason history to overcome a five-run deficit, fall behind again, and still win.'\n", + " },\n", + " groundingChunkIndices: [ 1 ],\n", + " confidenceScores: [ 0.8487609 ]\n", + " }\n", + " ],\n", + " retrievalMetadata: { googleSearchDynamicRetrievalScore: 0.93359375 },\n", + " webSearchQueries: [ 'who won the 2024 mlb world series' ]\n", + "}\n" + ] + } + ], + "source": [ + "console.dir(searchRetrievalResult.response_metadata?.groundingMetadata, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "7696a4a5", + "metadata": {}, + "source": [ + "### Code Execution\n", + "\n", + "Google Generative AI also supports code execution. Using the built in `CodeExecutionTool`, you can make the model generate code, execute it, and use the results in a final completion:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "08dde86b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " type: 'text',\n", + " text: \"Here's how to find the sum of the first and last three numbers in the given list using Python:\\n\" +\n", + " '\\n'\n", + " },\n", + " {\n", + " type: 'executableCode',\n", + " executableCode: {\n", + " language: 'PYTHON',\n", + " code: '\\n' +\n", + " 'my_list = [1, 2, 3, 72638, 8, 727, 4, 5, 6]\\n' +\n", + " '\\n' +\n", + " 'first_three_sum = sum(my_list[:3])\\n' +\n", + " 'last_three_sum = sum(my_list[-3:])\\n' +\n", + " 'total_sum = first_three_sum + last_three_sum\\n' +\n", + " '\\n' +\n", + " 'print(f\"{first_three_sum=}\")\\n' +\n", + " 'print(f\"{last_three_sum=}\")\\n' +\n", + " 'print(f\"{total_sum=}\")\\n' +\n", + " '\\n'\n", + " }\n", + " },\n", + " {\n", + " type: 'codeExecutionResult',\n", + " codeExecutionResult: {\n", + " outcome: 'OUTCOME_OK',\n", + " output: 'first_three_sum=6\\nlast_three_sum=15\\ntotal_sum=21\\n'\n", + " }\n", + " },\n", + " {\n", + " type: 'text',\n", + " text: 'Therefore, the sum of the first three numbers (1, 2, 3) is 6, the sum of the last three numbers (4, 5, 6) is 15, and their total sum is 21.\\n'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { CodeExecutionTool } from \"@google/generative-ai\";\n", + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", + "\n", + "const codeExecutionTool: CodeExecutionTool = {\n", + " codeExecution: {}, // Simply pass an empty object to enable it.\n", + "};\n", + "const codeExecutionModel = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-1.5-pro\",\n", + " temperature: 0,\n", + " maxRetries: 0,\n", + "}).bindTools([codeExecutionTool]);\n", + "\n", + "const codeExecutionResult = await codeExecutionModel.invoke(\"Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]\");\n", + "\n", + "console.dir(codeExecutionResult.content, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "9a76cf18", + "metadata": {}, + "source": [ + "You can also pass this generation back to the model as chat history:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "b14518de", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You asked for the sum of the first three and the last three numbers in the list `[1, 2, 3, 72638, 8, 727, 4, 5, 6]`.\n", + "\n", + "Here's a breakdown of the code:\n", + "\n", + "1. **`my_list = [1, 2, 3, 72638, 8, 727, 4, 5, 6]`**: This line defines the list of numbers you provided.\n", + "\n", + "2. **`first_three_sum = sum(my_list[:3])`**: This calculates the sum of the first three numbers. `my_list[:3]` is a slice of the list that takes elements from the beginning up to (but not including) the index 3. So, it takes elements at indices 0, 1, and 2, which are 1, 2, and 3. The `sum()` function then adds these numbers together.\n", + "\n", + "3. **`last_three_sum = sum(my_list[-3:])`**: This calculates the sum of the last three numbers. `my_list[-3:]` is a slice that takes elements starting from the third element from the end and goes to the end of the list. So it takes elements at indices -3, -2, and -1 which correspond to 4, 5, and 6. The `sum()` function adds these numbers.\n", + "\n", + "4. **`total_sum = first_three_sum + last_three_sum`**: This adds the sum of the first three numbers and the sum of the last three numbers to get the final result.\n", + "\n", + "5. **`print(f\"{first_three_sum=}\")`**, **`print(f\"{last_three_sum=}\")`**, and **`print(f\"{total_sum=}\")`**: These lines print the calculated sums in a clear and readable format.\n", + "\n", + "\n", + "The output of the code was:\n", + "\n", + "* `first_three_sum=6`\n", + "* `last_three_sum=15`\n", + "* `total_sum=21`\n", + "\n", + "Therefore, the answer to your question is 21.\n", + "\n" + ] + } + ], + "source": [ + "const codeExecutionExplanation = await codeExecutionModel.invoke([\n", + " codeExecutionResult,\n", + " {\n", + " role: \"user\",\n", + " content: \"Please explain the question I asked, the code you wrote, and the answer you got.\",\n", + " }\n", + "])\n", + "\n", + "console.log(codeExecutionExplanation.content);" + ] + }, { "cell_type": "markdown", "id": "0c6a950f", @@ -474,4 +842,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/langchain-core/src/language_models/chat_models.ts b/langchain-core/src/language_models/chat_models.ts index c53f4b4fea76..c3824a3cbd95 100644 --- a/langchain-core/src/language_models/chat_models.ts +++ b/langchain-core/src/language_models/chat_models.ts @@ -51,7 +51,7 @@ import { RunnablePassthrough } from "../runnables/passthrough.js"; import { isZodSchema } from "../utils/types/is_zod_schema.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any -type ToolChoice = string | Record | "auto" | "any"; +export type ToolChoice = string | Record | "auto" | "any"; /** * Represents a serialized chat model. diff --git a/libs/langchain-google-genai/package.json b/libs/langchain-google-genai/package.json index e8db02867ea2..a93fc11e6095 100644 --- a/libs/langchain-google-genai/package.json +++ b/libs/langchain-google-genai/package.json @@ -35,7 +35,7 @@ "author": "LangChain", "license": "MIT", "dependencies": { - "@google/generative-ai": "^0.7.0", + "@google/generative-ai": "^0.21.0", "zod-to-json-schema": "^3.22.4" }, "peerDependencies": { diff --git a/libs/langchain-google-genai/src/chat_models.ts b/libs/langchain-google-genai/src/chat_models.ts index 3211bd1724df..6fd1be59cf36 100644 --- a/libs/langchain-google-genai/src/chat_models.ts +++ b/libs/langchain-google-genai/src/chat_models.ts @@ -7,9 +7,6 @@ import { GenerateContentRequest, SafetySetting, Part as GenerativeAIPart, - Tool as GenerativeAITool, - ToolConfig, - FunctionCallingMode, } from "@google/generative-ai"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { @@ -30,7 +27,6 @@ import { BaseLanguageModelInput, StructuredOutputMethodOptions, } from "@langchain/core/language_models/base"; -import { StructuredToolInterface } from "@langchain/core/tools"; import { Runnable, RunnablePassthrough, @@ -43,11 +39,11 @@ import { zodToGenerativeAIParameters } from "./utils/zod_to_genai_parameters.js" import { convertBaseMessagesToContent, convertResponseContentToChatGenerationChunk, - convertToGenerativeAITools, mapGenerateContentResultToChatResult, } from "./utils/common.js"; import { GoogleGenerativeAIToolsOutputParser } from "./output_parsers.js"; import { GoogleGenerativeAIToolType } from "./types.js"; +import { convertToolsToGenAI } from "./utils/tools.js"; interface TokenUsage { completionTokens?: number; @@ -682,70 +678,24 @@ export class ChatGoogleGenerativeAI AIMessageChunk, GoogleGenerativeAIChatCallOptions > { - return this.bind({ tools: convertToGenerativeAITools(tools), ...kwargs }); + return this.bind({ tools: convertToolsToGenAI(tools)?.tools, ...kwargs }); } invocationParams( options?: this["ParsedCallOptions"] ): Omit { - let genaiTools: GenerativeAITool[] | undefined; - if ( - Array.isArray(options?.tools) && - !options?.tools.some( - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (t: any) => !("lc_namespace" in t) - ) - ) { - // Tools are in StructuredToolInterface format. Convert to GenAI format - genaiTools = convertToGenerativeAITools( - options?.tools as StructuredToolInterface[] - ); - } else { - genaiTools = options?.tools as GenerativeAITool[]; - } - - let toolConfig: ToolConfig | undefined; - if (genaiTools?.length && options?.tool_choice) { - if (["any", "auto", "none"].some((c) => c === options.tool_choice)) { - const modeMap: Record = { - any: FunctionCallingMode.ANY, - auto: FunctionCallingMode.AUTO, - none: FunctionCallingMode.NONE, - }; - - toolConfig = { - functionCallingConfig: { - mode: - modeMap[options.tool_choice as keyof typeof modeMap] ?? - "MODE_UNSPECIFIED", - allowedFunctionNames: options.allowedFunctionNames, - }, - }; - } else if (typeof options.tool_choice === "string") { - toolConfig = { - functionCallingConfig: { - mode: FunctionCallingMode.ANY, - allowedFunctionNames: [ - ...(options.allowedFunctionNames ?? []), - options.tool_choice, - ], - }, - }; - } - - if (!options.tool_choice && options.allowedFunctionNames) { - toolConfig = { - functionCallingConfig: { - mode: FunctionCallingMode.ANY, - allowedFunctionNames: options.allowedFunctionNames, - }, - }; - } - } + const toolsAndConfig = options?.tools?.length + ? convertToolsToGenAI(options.tools, { + toolChoice: options.tool_choice, + allowedFunctionNames: options.allowedFunctionNames, + }) + : undefined; return { - tools: genaiTools, - toolConfig, + ...(toolsAndConfig?.tools ? { tools: toolsAndConfig.tools } : {}), + ...(toolsAndConfig?.toolConfig + ? { toolConfig: toolsAndConfig.toolConfig } + : {}), }; } diff --git a/libs/langchain-google-genai/src/tests/chat_models.int.test.ts b/libs/langchain-google-genai/src/tests/chat_models.int.test.ts index 44ab24819786..f9108801ffc3 100644 --- a/libs/langchain-google-genai/src/tests/chat_models.int.test.ts +++ b/libs/langchain-google-genai/src/tests/chat_models.int.test.ts @@ -17,7 +17,13 @@ import { } from "@langchain/core/prompts"; import { StructuredTool } from "@langchain/core/tools"; import { z } from "zod"; -import { FunctionDeclarationSchemaType } from "@google/generative-ai"; +import { + CodeExecutionTool, + DynamicRetrievalMode, + SchemaType as FunctionDeclarationSchemaType, + GoogleSearchRetrievalTool, +} from "@google/generative-ai"; +import { concat } from "@langchain/core/utils/stream"; import { ChatGoogleGenerativeAI } from "../chat_models.js"; // Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable @@ -567,3 +573,155 @@ test("Supports tool_choice", async () => { ); expect(response.tool_calls?.length).toBe(1); }); + +describe("GoogleSearchRetrievalTool", () => { + test("Supports GoogleSearchRetrievalTool", async () => { + const searchRetrievalTool: GoogleSearchRetrievalTool = { + googleSearchRetrieval: { + dynamicRetrievalConfig: { + mode: DynamicRetrievalMode.MODE_DYNAMIC, + dynamicThreshold: 0.7, // default is 0.7 + }, + }, + }; + const model = new ChatGoogleGenerativeAI({ + model: "gemini-1.5-pro", + temperature: 0, + maxRetries: 0, + }).bindTools([searchRetrievalTool]); + + const result = await model.invoke("Who won the 2024 MLB World Series?"); + + expect(result.response_metadata?.groundingMetadata).toBeDefined(); + expect(result.content as string).toContain("Dodgers"); + }); + + test("Can stream GoogleSearchRetrievalTool", async () => { + const searchRetrievalTool: GoogleSearchRetrievalTool = { + googleSearchRetrieval: { + dynamicRetrievalConfig: { + mode: DynamicRetrievalMode.MODE_DYNAMIC, + dynamicThreshold: 0.7, // default is 0.7 + }, + }, + }; + const model = new ChatGoogleGenerativeAI({ + model: "gemini-1.5-pro", + temperature: 0, + maxRetries: 0, + }).bindTools([searchRetrievalTool]); + + const stream = await model.stream("Who won the 2024 MLB World Series?"); + let finalMsg: AIMessageChunk | undefined; + for await (const msg of stream) { + finalMsg = finalMsg ? concat(finalMsg, msg) : msg; + } + if (!finalMsg) { + throw new Error("finalMsg is undefined"); + } + expect(finalMsg.response_metadata?.groundingMetadata).toBeDefined(); + expect(finalMsg.content as string).toContain("Dodgers"); + }); +}); + +describe("CodeExecutionTool", () => { + test("Supports CodeExecutionTool", async () => { + const codeExecutionTool: CodeExecutionTool = { + codeExecution: {}, // Simply pass an empty object to enable it. + }; + const model = new ChatGoogleGenerativeAI({ + model: "gemini-1.5-pro", + temperature: 0, + maxRetries: 0, + }).bindTools([codeExecutionTool]); + + const result = await model.invoke( + "Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]" + ); + + expect(Array.isArray(result.content)).toBeTruthy(); + if (!Array.isArray(result.content)) { + throw new Error("Content is not an array"); + } + const texts = result.content + .flatMap((item) => ("text" in item ? [item.text] : [])) + .join("\n"); + expect(texts).toContain("21"); + + const executableCode = result.content.find( + (item) => item.type === "executableCode" + ); + expect(executableCode).toBeDefined(); + const codeResult = result.content.find( + (item) => item.type === "codeExecutionResult" + ); + expect(codeResult).toBeDefined(); + }); + + test("CodeExecutionTool contents can be passed in chat history", async () => { + const codeExecutionTool: CodeExecutionTool = { + codeExecution: {}, // Simply pass an empty object to enable it. + }; + const model = new ChatGoogleGenerativeAI({ + model: "gemini-1.5-pro", + temperature: 0, + maxRetries: 0, + }).bindTools([codeExecutionTool]); + + const codeResult = await model.invoke( + "Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]" + ); + + const explanation = await model.invoke([ + codeResult, + { + role: "user", + content: + "Please explain the question I asked, the code you wrote, and the answer you got.", + }, + ]); + + expect(typeof explanation.content).toBe("string"); + expect(explanation.content.length).toBeGreaterThan(10); + }); + + test("Can stream CodeExecutionTool", async () => { + const codeExecutionTool: CodeExecutionTool = { + codeExecution: {}, // Simply pass an empty object to enable it. + }; + const model = new ChatGoogleGenerativeAI({ + model: "gemini-1.5-pro", + temperature: 0, + maxRetries: 0, + }).bindTools([codeExecutionTool]); + + const stream = await model.stream( + "Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]" + ); + let finalMsg: AIMessageChunk | undefined; + for await (const msg of stream) { + finalMsg = finalMsg ? concat(finalMsg, msg) : msg; + } + + if (!finalMsg) { + throw new Error("finalMsg is undefined"); + } + expect(Array.isArray(finalMsg.content)).toBeTruthy(); + if (!Array.isArray(finalMsg.content)) { + throw new Error("Content is not an array"); + } + const texts = finalMsg.content + .flatMap((item) => ("text" in item ? [item.text] : [])) + .join("\n"); + expect(texts).toContain("21"); + + const executableCode = finalMsg.content.find( + (item) => item.type === "executableCode" + ); + expect(executableCode).toBeDefined(); + const codeResult = finalMsg.content.find( + (item) => item.type === "codeExecutionResult" + ); + expect(codeResult).toBeDefined(); + }); +}); diff --git a/libs/langchain-google-genai/src/types.ts b/libs/langchain-google-genai/src/types.ts index 8cc4e3eb3c50..d1b248f13898 100644 --- a/libs/langchain-google-genai/src/types.ts +++ b/libs/langchain-google-genai/src/types.ts @@ -1,6 +1,12 @@ -import { FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool } from "@google/generative-ai"; +import { + CodeExecutionTool, + FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, + GoogleSearchRetrievalTool, +} from "@google/generative-ai"; import { BindToolsInput } from "@langchain/core/language_models/chat_models"; export type GoogleGenerativeAIToolType = | BindToolsInput - | GoogleGenerativeAIFunctionDeclarationsTool; + | GoogleGenerativeAIFunctionDeclarationsTool + | CodeExecutionTool + | GoogleSearchRetrievalTool; diff --git a/libs/langchain-google-genai/src/utils/common.ts b/libs/langchain-google-genai/src/utils/common.ts index 738bc62f479c..2670760c7115 100644 --- a/libs/langchain-google-genai/src/utils/common.ts +++ b/libs/langchain-google-genai/src/utils/common.ts @@ -13,6 +13,7 @@ import { AIMessageChunk, BaseMessage, ChatMessage, + MessageContent, MessageContentComplex, UsageMetadata, isBaseMessage, @@ -106,7 +107,7 @@ export function convertMessageContentToParts( args: tc.args, }, })); - } else if (message._getType() === "tool" && message.name && message.content) { + } else if (message.getType() === "tool" && message.name && message.content) { functionResponses = [ { functionResponse: { @@ -121,6 +122,14 @@ export function convertMessageContentToParts( return { text: c.text, }; + } else if (c.type === "executableCode") { + return { + executableCode: c.executableCode, + }; + } else if (c.type === "codeExecutionResult") { + return { + codeExecutionResult: c.codeExecutionResult, + }; } if (c.type === "image_url") { @@ -253,13 +262,43 @@ export function mapGenerateContentResultToChatResult( const functionCalls = response.functionCalls(); const [candidate] = response.candidates; - const { content, ...generationInfo } = candidate; - const text = content?.parts[0]?.text ?? ""; + const { content: candidateContent, ...generationInfo } = candidate; + let content: MessageContent; + if (candidateContent?.parts.length === 1 && candidateContent.parts[0].text) { + content = candidateContent.parts[0].text; + } else { + content = candidateContent.parts.map((p) => { + if ("text" in p) { + return { + type: "text", + text: p.text, + }; + } else if ("executableCode" in p) { + return { + type: "executableCode", + executableCode: p.executableCode, + }; + } else if ("codeExecutionResult" in p) { + return { + type: "codeExecutionResult", + codeExecutionResult: p.codeExecutionResult, + }; + } + return p; + }); + } + + let text = ""; + if (typeof content === "string") { + text = content; + } else if ("text" in content[0]) { + text = content[0].text; + } const generation: ChatGeneration = { text, message: new AIMessage({ - content: text, + content, tool_calls: functionCalls?.map((fc) => ({ ...fc, type: "tool_call", @@ -289,8 +328,42 @@ export function convertResponseContentToChatGenerationChunk( } const functionCalls = response.functionCalls(); const [candidate] = response.candidates; - const { content, ...generationInfo } = candidate; - const text = content?.parts?.[0]?.text ?? ""; + const { content: candidateContent, ...generationInfo } = candidate; + let content: MessageContent | undefined; + // Checks if some parts do not have text. If false, it means that the content is a string. + if ( + candidateContent?.parts && + candidateContent.parts.every((p) => "text" in p) + ) { + content = candidateContent.parts.map((p) => p.text).join(""); + } else if (candidateContent.parts) { + content = candidateContent.parts.map((p) => { + if ("text" in p) { + return { + type: "text", + text: p.text, + }; + } else if ("executableCode" in p) { + return { + type: "executableCode", + executableCode: p.executableCode, + }; + } else if ("codeExecutionResult" in p) { + return { + type: "codeExecutionResult", + codeExecutionResult: p.codeExecutionResult, + }; + } + return p; + }); + } + + let text = ""; + if (content && typeof content === "string") { + text = content; + } else if (content && typeof content === "object" && "text" in content[0]) { + text = content[0].text; + } const toolCallChunks: ToolCallChunk[] = []; if (functionCalls) { @@ -303,11 +376,12 @@ export function convertResponseContentToChatGenerationChunk( })) ); } + return new ChatGenerationChunk({ text, message: new AIMessageChunk({ - content: text, - name: !content ? undefined : content.role, + content: content || "", + name: !candidateContent ? undefined : candidateContent.role, tool_call_chunks: toolCallChunks, // Each chunk can have unique "generationInfo", and merging strategy is unclear, // so leave blank for now. diff --git a/libs/langchain-google-genai/src/utils/tools.ts b/libs/langchain-google-genai/src/utils/tools.ts new file mode 100644 index 000000000000..8e362d2a7877 --- /dev/null +++ b/libs/langchain-google-genai/src/utils/tools.ts @@ -0,0 +1,123 @@ +import { + Tool as GenerativeAITool, + ToolConfig, + FunctionCallingMode, + FunctionDeclaration, +} from "@google/generative-ai"; +import { ToolChoice } from "@langchain/core/language_models/chat_models"; +import { StructuredToolInterface } from "@langchain/core/tools"; +import { isLangChainTool } from "@langchain/core/utils/function_calling"; +import { convertToGenerativeAITools } from "./common.js"; +import { GoogleGenerativeAIToolType } from "../types.js"; + +export function convertToolsToGenAI( + tools: GoogleGenerativeAIToolType[], + extra?: { + toolChoice?: ToolChoice; + allowedFunctionNames?: string[]; + } +): { + tools: GenerativeAITool[]; + toolConfig?: ToolConfig; +} { + // Extract function declaration processing to a separate function + const genAITools = processTools(tools); + + // Simplify tool config creation + const toolConfig = createToolConfig(genAITools, extra); + + return { tools: genAITools, toolConfig }; +} + +function processTools(tools: GoogleGenerativeAIToolType[]): GenerativeAITool[] { + let functionDeclarationTools: FunctionDeclaration[] = []; + const genAITools: GenerativeAITool[] = []; + + tools.forEach((tool) => { + if (isLangChainTool(tool)) { + const [convertedTool] = convertToGenerativeAITools([ + tool as StructuredToolInterface, + ]); + if (convertedTool.functionDeclarations) { + functionDeclarationTools.push(...convertedTool.functionDeclarations); + } + } else { + genAITools.push(tool as GenerativeAITool); + } + }); + + const genAIFunctionDeclaration = genAITools.find( + (t) => "functionDeclarations" in t + ); + if (genAIFunctionDeclaration) { + return genAITools.map((tool) => { + if ( + functionDeclarationTools?.length > 0 && + "functionDeclarations" in tool + ) { + const newTool = { + functionDeclarations: [ + ...(tool.functionDeclarations || []), + ...functionDeclarationTools, + ], + }; + // Clear the functionDeclarationTools array so it is not passed again + functionDeclarationTools = []; + return newTool; + } + return tool; + }); + } + + return [ + ...genAITools, + ...(functionDeclarationTools.length > 0 + ? [ + { + functionDeclarations: functionDeclarationTools, + }, + ] + : []), + ]; +} + +function createToolConfig( + genAITools: GenerativeAITool[], + extra?: { + toolChoice?: ToolChoice; + allowedFunctionNames?: string[]; + } +): ToolConfig | undefined { + if (!genAITools.length || !extra) return undefined; + + const { toolChoice, allowedFunctionNames } = extra; + + const modeMap: Record = { + any: FunctionCallingMode.ANY, + auto: FunctionCallingMode.AUTO, + none: FunctionCallingMode.NONE, + }; + + if (toolChoice && ["any", "auto", "none"].includes(toolChoice as string)) { + return { + functionCallingConfig: { + mode: modeMap[toolChoice as keyof typeof modeMap] ?? "MODE_UNSPECIFIED", + allowedFunctionNames, + }, + }; + } + + if (typeof toolChoice === "string" || allowedFunctionNames) { + return { + functionCallingConfig: { + mode: FunctionCallingMode.ANY, + allowedFunctionNames: [ + ...(allowedFunctionNames ?? []), + ...(toolChoice && typeof toolChoice === "string" ? [toolChoice] : []), + ], + }, + }; + } + + return undefined; +} diff --git a/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts b/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts index dd59665164c8..7323612504a9 100644 --- a/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts +++ b/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts @@ -4,7 +4,7 @@ import type { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema, - FunctionDeclarationSchemaType, + type SchemaType as FunctionDeclarationSchemaType, } from "@google/generative-ai"; export interface GenerativeAIJsonSchema extends Record { diff --git a/yarn.lock b/yarn.lock index 8c11df3598a7..7b1a9e7c6717 100644 --- a/yarn.lock +++ b/yarn.lock @@ -10306,6 +10306,13 @@ __metadata: languageName: node linkType: hard +"@google/generative-ai@npm:^0.21.0": + version: 0.21.0 + resolution: "@google/generative-ai@npm:0.21.0" + checksum: 91345a8399b5e71382193d0eac47a4b264613a9d7e48a431290b523e3fbb44a207a33bdead304f181987e5a0127a84168c4e21cf461c1087cd3b0ebc5125d13d + languageName: node + linkType: hard + "@google/generative-ai@npm:^0.7.0": version: 0.7.1 resolution: "@google/generative-ai@npm:0.7.1" @@ -12140,7 +12147,7 @@ __metadata: version: 0.0.0-use.local resolution: "@langchain/google-genai@workspace:libs/langchain-google-genai" dependencies: - "@google/generative-ai": ^0.7.0 + "@google/generative-ai": ^0.21.0 "@jest/globals": ^29.5.0 "@langchain/core": "workspace:*" "@langchain/scripts": ">=0.1.0 <0.2.0" From 73f3176ebfd414591636b611ce787a0e61518324 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Fri, 1 Nov 2024 17:28:46 -0700 Subject: [PATCH 045/100] feat(core): Release 0.3.17 (#7139) --- langchain-core/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain-core/package.json b/langchain-core/package.json index c77599697a18..ec40b3e6419d 100644 --- a/langchain-core/package.json +++ b/langchain-core/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/core", - "version": "0.3.16", + "version": "0.3.17", "description": "Core LangChain.js abstractions and schemas", "type": "module", "engines": { From 2e192776e1190ce56fd5b70cd7698ffea088fd2c Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Fri, 1 Nov 2024 17:50:08 -0700 Subject: [PATCH 046/100] feat(google-genai): Release 0.1.3-rc.0 (#7140) --- libs/langchain-google-genai/package.json | 4 ++-- yarn.lock | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/libs/langchain-google-genai/package.json b/libs/langchain-google-genai/package.json index a93fc11e6095..f1d23e56578b 100644 --- a/libs/langchain-google-genai/package.json +++ b/libs/langchain-google-genai/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-genai", - "version": "0.1.2", + "version": "0.1.3-rc.0", "description": "Google Generative AI integration for LangChain.js", "type": "module", "engines": { @@ -39,7 +39,7 @@ "zod-to-json-schema": "^3.22.4" }, "peerDependencies": { - "@langchain/core": ">=0.2.21 <0.4.0" + "@langchain/core": ">=0.3.17 <0.4.0" }, "devDependencies": { "@jest/globals": "^29.5.0", diff --git a/yarn.lock b/yarn.lock index 7b1a9e7c6717..640a4363d665 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12143,7 +12143,19 @@ __metadata: languageName: unknown linkType: soft -"@langchain/google-genai@*, @langchain/google-genai@workspace:*, @langchain/google-genai@workspace:libs/langchain-google-genai": +"@langchain/google-genai@npm:*": + version: 0.1.2 + resolution: "@langchain/google-genai@npm:0.1.2" + dependencies: + "@google/generative-ai": ^0.7.0 + zod-to-json-schema: ^3.22.4 + peerDependencies: + "@langchain/core": ">=0.2.21 <0.4.0" + checksum: 69113d66898e7e7fe2c852a844edbd440f72caf7445b555ce4dc1bdd6cd530b404dc3631662d70458604bf9ee395bba8c2c1d4c5a024b29498d7bd977573051a + languageName: node + linkType: hard + +"@langchain/google-genai@workspace:*, @langchain/google-genai@workspace:libs/langchain-google-genai": version: 0.0.0-use.local resolution: "@langchain/google-genai@workspace:libs/langchain-google-genai" dependencies: @@ -12176,7 +12188,7 @@ __metadata: zod: ^3.22.4 zod-to-json-schema: ^3.22.4 peerDependencies: - "@langchain/core": ">=0.2.21 <0.4.0" + "@langchain/core": ">=0.3.17 <0.4.0" languageName: unknown linkType: soft From fcef78476e470729cf76b1a046e5195fcbbd1d84 Mon Sep 17 00:00:00 2001 From: Kaiwei Zhang <134007383+KevinZJN@users.noreply.github.com> Date: Mon, 4 Nov 2024 23:29:51 -0500 Subject: [PATCH 047/100] fix(community): Add id in chroma search result (#7120) Co-authored-by: jacoblee93 --- .../src/vectorstores/chroma.ts | 1 + .../src/vectorstores/tests/chroma.test.ts | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/libs/langchain-community/src/vectorstores/chroma.ts b/libs/langchain-community/src/vectorstores/chroma.ts index 3f94808a25ae..f7dd250872b2 100644 --- a/libs/langchain-community/src/vectorstores/chroma.ts +++ b/libs/langchain-community/src/vectorstores/chroma.ts @@ -398,6 +398,7 @@ export class Chroma extends VectorStore { new Document({ pageContent: firstDocuments?.[i] ?? "", metadata, + id: firstIds[i], }), firstDistances[i], ]); diff --git a/libs/langchain-community/src/vectorstores/tests/chroma.test.ts b/libs/langchain-community/src/vectorstores/tests/chroma.test.ts index 8fdab40d095e..023c36560df1 100644 --- a/libs/langchain-community/src/vectorstores/tests/chroma.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/chroma.test.ts @@ -132,4 +132,23 @@ describe("Chroma", () => { }); expect(results).toHaveLength(5); }); + + test("should return id properly when adding documents", async () => { + const document1 = { + pageContent: "Document 1", + metadata: { source: "https://example.com" }, + }; + + const documents = [document1]; + + const chroma = new Chroma(new FakeEmbeddings(), { + collectionName: "new-test-collection", + index: mockClient, + }); + + await chroma.addDocuments(documents, { ids: ["0"] }); + const result = await chroma.similaritySearch(document1.pageContent, 1); + + expect(result[0]).toHaveProperty("id", "0"); + }); }); From 338a668bba56d2d08f9a13c6e1be27152ace781e Mon Sep 17 00:00:00 2001 From: Apurav Chauhan <1810431+apuravchauhan@users.noreply.github.com> Date: Tue, 5 Nov 2024 12:01:16 -0500 Subject: [PATCH 048/100] docs: Update pdf_qa.ipynb (#7137) Co-authored-by: Jacob Lee --- docs/core_docs/docs/tutorials/pdf_qa.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/core_docs/docs/tutorials/pdf_qa.ipynb b/docs/core_docs/docs/tutorials/pdf_qa.ipynb index 6408a3bb669e..1c4efbb868b1 100644 --- a/docs/core_docs/docs/tutorials/pdf_qa.ipynb +++ b/docs/core_docs/docs/tutorials/pdf_qa.ipynb @@ -287,7 +287,7 @@ " [\"human\", \"{input}\"],\n", "]);\n", "\n", - "const questionAnswerChain = await createStuffDocumentsChain({ llm, prompt });\n", + "const questionAnswerChain = await createStuffDocumentsChain({ llm: model, prompt });\n", "const ragChain = await createRetrievalChain({ retriever, combineDocsChain: questionAnswerChain });\n", "\n", "const results = await ragChain.invoke({\n", @@ -417,4 +417,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} From 3b8cc03df1b60ab90531d81779f084536b6cd1c5 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Tue, 5 Nov 2024 09:16:40 -0800 Subject: [PATCH 049/100] feat(google-genai): Release 0.1.3 (#7152) --- libs/langchain-google-genai/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-google-genai/package.json b/libs/langchain-google-genai/package.json index f1d23e56578b..2e4243fa9c34 100644 --- a/libs/langchain-google-genai/package.json +++ b/libs/langchain-google-genai/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-genai", - "version": "0.1.3-rc.0", + "version": "0.1.3", "description": "Google Generative AI integration for LangChain.js", "type": "module", "engines": { From a71dbc3a7e75fb18679ed3750a01aed97e376672 Mon Sep 17 00:00:00 2001 From: oleg Date: Tue, 5 Nov 2024 18:29:47 +0100 Subject: [PATCH 050/100] fix(langchain): Correct typo in format instructions for conversational agent (#7149) --- langchain/src/agents/chat_convo/prompt.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain/src/agents/chat_convo/prompt.ts b/langchain/src/agents/chat_convo/prompt.ts index 6778be151099..1a14ae013434 100644 --- a/langchain/src/agents/chat_convo/prompt.ts +++ b/langchain/src/agents/chat_convo/prompt.ts @@ -30,7 +30,7 @@ Use this if you want to respond directly and conversationally to the human. Mark \`\`\`json {{{{ "action": "Final Answer", - "action_input": string // You should put what you want to return to use here and make sure to use valid json newline characters. + "action_input": string // You should put what you want to return to user here and make sure to use valid json newline characters. }}}} \`\`\` From e13925fea6677ed503d9bbc08184d4fa42807279 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Tue, 5 Nov 2024 09:32:56 -0800 Subject: [PATCH 051/100] fix: Update yarn.lock (#7153) --- yarn.lock | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/yarn.lock b/yarn.lock index 640a4363d665..367f4903bf51 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12143,19 +12143,7 @@ __metadata: languageName: unknown linkType: soft -"@langchain/google-genai@npm:*": - version: 0.1.2 - resolution: "@langchain/google-genai@npm:0.1.2" - dependencies: - "@google/generative-ai": ^0.7.0 - zod-to-json-schema: ^3.22.4 - peerDependencies: - "@langchain/core": ">=0.2.21 <0.4.0" - checksum: 69113d66898e7e7fe2c852a844edbd440f72caf7445b555ce4dc1bdd6cd530b404dc3631662d70458604bf9ee395bba8c2c1d4c5a024b29498d7bd977573051a - languageName: node - linkType: hard - -"@langchain/google-genai@workspace:*, @langchain/google-genai@workspace:libs/langchain-google-genai": +"@langchain/google-genai@*, @langchain/google-genai@workspace:*, @langchain/google-genai@workspace:libs/langchain-google-genai": version: 0.0.0-use.local resolution: "@langchain/google-genai@workspace:libs/langchain-google-genai" dependencies: From c6440b67c79647324e220edac849919a95583e26 Mon Sep 17 00:00:00 2001 From: aditishree1 <141712869+aditishree1@users.noreply.github.com> Date: Tue, 5 Nov 2024 23:20:01 +0530 Subject: [PATCH 052/100] feat(cosmosdbnosql): Add Semantic Cache Integration (#7033) Co-authored-by: Yohan Lasorsa Co-authored-by: jacoblee93 --- .../llm_caching/azure_cosmosdb_nosql.mdx | 40 +++ .../docs/integrations/llm_caching/index.mdx | 14 + .../docs/integrations/platforms/microsoft.mdx | 18 ++ docs/core_docs/sidebars.js | 16 ++ .../azure_cosmosdb_nosql.ts | 49 ++++ .../src/azure_cosmosdb_nosql.ts | 6 +- libs/langchain-azure-cosmosdb/src/caches.ts | 191 ++++++++++++++ libs/langchain-azure-cosmosdb/src/index.ts | 1 + .../src/tests/caches.int.test.ts | 244 ++++++++++++++++++ .../src/tests/caches.test.ts | 67 +++++ 10 files changed, 643 insertions(+), 3 deletions(-) create mode 100644 docs/core_docs/docs/integrations/llm_caching/azure_cosmosdb_nosql.mdx create mode 100644 docs/core_docs/docs/integrations/llm_caching/index.mdx create mode 100644 examples/src/caches/azure_cosmosdb_nosql/azure_cosmosdb_nosql.ts create mode 100644 libs/langchain-azure-cosmosdb/src/caches.ts create mode 100644 libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts create mode 100644 libs/langchain-azure-cosmosdb/src/tests/caches.test.ts diff --git a/docs/core_docs/docs/integrations/llm_caching/azure_cosmosdb_nosql.mdx b/docs/core_docs/docs/integrations/llm_caching/azure_cosmosdb_nosql.mdx new file mode 100644 index 000000000000..ecf82513a7ae --- /dev/null +++ b/docs/core_docs/docs/integrations/llm_caching/azure_cosmosdb_nosql.mdx @@ -0,0 +1,40 @@ +# Azure Cosmos DB NoSQL Semantic Cache + +> The Semantic Cache feature is supported with Azure Cosmos DB for NoSQL integration, enabling users to retrieve cached responses based on semantic similarity between the user input and previously cached results. It leverages [AzureCosmosDBNoSQLVectorStore](/docs/integrations/vectorstores/azure_cosmosdb_nosql), which stores vector embeddings of cached prompts. These embeddings enable similarity-based searches, allowing the system to retrieve relevant cached results. + +If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started. + +## Setup + +You'll first need to install the [`@langchain/azure-cosmosdb`](https://www.npmjs.com/package/@langchain/azure-cosmosdb) package: + +import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; + + + +```bash npm2yarn +npm install @langchain/azure-cosmosdb @langchain/core +``` + +You'll also need to have an Azure Cosmos DB for NoSQL instance running. You can deploy a free version on Azure Portal without any cost, following [this guide](https://learn.microsoft.com/azure/cosmos-db/nosql/quickstart-portal). + +Once you have your instance running, make sure you have the connection string. If you are using Managed Identity, you need to have the endpoint. You can find them in the Azure Portal, under the "Settings / Keys" section of your instance. + +import CodeBlock from "@theme/CodeBlock"; + +:::info + +When using Azure Managed Identity and role-based access control, you must ensure that the database and container have been created beforehand. RBAC does not provide permissions to create databases and containers. You can get more information about the permission model in the [Azure Cosmos DB documentation](https://learn.microsoft.com/azure/cosmos-db/how-to-setup-rbac#permission-model). + +::: + +## Usage example + +import Example from "@examples/caches/azure_cosmosdb_nosql/azure_cosmosdb_nosql.ts"; + +{Example} + +## Related + +- Vector store [conceptual guide](/docs/concepts/#vectorstores) +- Vector store [how-to guides](/docs/how_to/#vectorstores) diff --git a/docs/core_docs/docs/integrations/llm_caching/index.mdx b/docs/core_docs/docs/integrations/llm_caching/index.mdx new file mode 100644 index 000000000000..f1f5f6702ad8 --- /dev/null +++ b/docs/core_docs/docs/integrations/llm_caching/index.mdx @@ -0,0 +1,14 @@ +--- +sidebar_class_name: hidden +hide_table_of_contents: true +--- + +# Model caches + +[Caching LLM calls](/docs/how_to/chat_model_caching) can be useful for testing, cost savings, and speed. + +Below are some integrations that allow you to cache results of individual LLM calls using different caches with different strategies. + +import { IndexTable } from "@theme/FeatureTables"; + + diff --git a/docs/core_docs/docs/integrations/platforms/microsoft.mdx b/docs/core_docs/docs/integrations/platforms/microsoft.mdx index b048323e04a4..e9f3d7fd4922 100644 --- a/docs/core_docs/docs/integrations/platforms/microsoft.mdx +++ b/docs/core_docs/docs/integrations/platforms/microsoft.mdx @@ -132,6 +132,24 @@ See a [usage example](/docs/integrations/vectorstores/azure_cosmosdb_mongodb). import { AzureCosmosDBMongoDBVectorStore } from "@langchain/azure-cosmosdb"; ``` +## Semantic Cache + +### Azure Cosmos DB NoSQL Semantic Cache + +> The Semantic Cache feature is supported with Azure Cosmos DB for NoSQL integration, enabling users to retrieve cached responses based on semantic similarity between the user input and previously cached results. It leverages [AzureCosmosDBNoSQLVectorStore](/docs/integrations/vectorstores/azure_cosmosdb_nosql), which stores vector embeddings of cached prompts. These embeddings enable similarity-based searches, allowing the system to retrieve relevant cached results. + + + +```bash npm2yarn +npm install @langchain/azure-cosmosdb @langchain/core +``` + +See a [usage example](/docs/integrations/llm_caching/azure_cosmosdb_nosql). + +```typescript +import { AzureCosmosDBNoSQLSemanticCache } from "@langchain/azure-cosmosdb"; +``` + ## Document loaders ### Azure Blob Storage diff --git a/docs/core_docs/sidebars.js b/docs/core_docs/sidebars.js index 851912174a54..95bf57ec5859 100644 --- a/docs/core_docs/sidebars.js +++ b/docs/core_docs/sidebars.js @@ -347,6 +347,22 @@ module.exports = { slug: "integrations/document_transformers", }, }, + { + type: "category", + label: "Model caches", + collapsible: false, + items: [ + { + type: "autogenerated", + dirName: "integrations/llm_caching", + className: "hidden", + }, + ], + link: { + type: "doc", + id: "integrations/llm_caching/index", + }, + }, { type: "category", label: "Graphs", diff --git a/examples/src/caches/azure_cosmosdb_nosql/azure_cosmosdb_nosql.ts b/examples/src/caches/azure_cosmosdb_nosql/azure_cosmosdb_nosql.ts new file mode 100644 index 000000000000..3797b11b1144 --- /dev/null +++ b/examples/src/caches/azure_cosmosdb_nosql/azure_cosmosdb_nosql.ts @@ -0,0 +1,49 @@ +import { + AzureCosmosDBNoSQLConfig, + AzureCosmosDBNoSQLSemanticCache, +} from "@langchain/azure-cosmosdb"; +import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; + +const embeddings = new OpenAIEmbeddings(); +const config: AzureCosmosDBNoSQLConfig = { + databaseName: "", + containerName: "", + // use endpoint to initiate client with managed identity + connectionString: "", +}; + +/** + * Sets the threshold similarity score for returning cached results based on vector distance. + * Cached output is returned only if the similarity score meets or exceeds this threshold; + * otherwise, a new result is generated. Default is 0.6, adjustable via the constructor + * to suit various distance functions and use cases. + * (see: https://learn.microsoft.com/azure/cosmos-db/nosql/query/vectordistance). + */ + +const similarityScoreThreshold = 0.5; +const cache = new AzureCosmosDBNoSQLSemanticCache( + embeddings, + config, + similarityScoreThreshold +); + +const model = new ChatOpenAI({ cache }); + +// Invoke the model to perform an action +const response1 = await model.invoke("Do something random!"); +console.log(response1); +/* + AIMessage { + content: "Sure! I'll generate a random number for you: 37", + additional_kwargs: {} + } +*/ + +const response2 = await model.invoke("Do something random!"); +console.log(response2); +/* + AIMessage { + content: "Sure! I'll generate a random number for you: 37", + additional_kwargs: {} + } +*/ diff --git a/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts b/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts index fdd287047278..618d43ab64c9 100644 --- a/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts +++ b/libs/langchain-azure-cosmosdb/src/azure_cosmosdb_nosql.ts @@ -78,7 +78,7 @@ export interface AzureCosmosDBNoSQLConfig readonly metadataKey?: string; } -const USER_AGENT_PREFIX = "langchainjs-azure-cosmosdb-nosql"; +const USER_AGENT_SUFFIX = "langchainjs-cdbnosql-vectorstore-javascript"; /** * Azure Cosmos DB for NoSQL vCore vector store. @@ -151,14 +151,14 @@ export class AzureCosmosDBNoSQLVectorStore extends VectorStore { this.client = new CosmosClient({ endpoint, key, - userAgentSuffix: USER_AGENT_PREFIX, + userAgentSuffix: USER_AGENT_SUFFIX, }); } else { // Use managed identity this.client = new CosmosClient({ endpoint, aadCredentials: dbConfig.credentials ?? new DefaultAzureCredential(), - userAgentSuffix: USER_AGENT_PREFIX, + userAgentSuffix: USER_AGENT_SUFFIX, } as CosmosClientOptions); } } diff --git a/libs/langchain-azure-cosmosdb/src/caches.ts b/libs/langchain-azure-cosmosdb/src/caches.ts new file mode 100644 index 000000000000..da7619c5ff96 --- /dev/null +++ b/libs/langchain-azure-cosmosdb/src/caches.ts @@ -0,0 +1,191 @@ +import { + BaseCache, + deserializeStoredGeneration, + getCacheKey, + serializeGeneration, +} from "@langchain/core/caches"; +import { Generation } from "@langchain/core/outputs"; +import { Document } from "@langchain/core/documents"; +import { EmbeddingsInterface } from "@langchain/core/embeddings"; +import { CosmosClient, CosmosClientOptions } from "@azure/cosmos"; +import { DefaultAzureCredential } from "@azure/identity"; +import { getEnvironmentVariable } from "@langchain/core/utils/env"; +import { + AzureCosmosDBNoSQLConfig, + AzureCosmosDBNoSQLVectorStore, +} from "./azure_cosmosdb_nosql.js"; + +const USER_AGENT_SUFFIX = "langchainjs-cdbnosql-semanticcache-javascript"; +const DEFAULT_CONTAINER_NAME = "semanticCacheContainer"; + +/** + * Represents a Semantic Cache that uses CosmosDB NoSQL backend as the underlying + * storage system. + * + * @example + * ```typescript + * const embeddings = new OpenAIEmbeddings(); + * const cache = new AzureCosmosDBNoSQLSemanticCache(embeddings, { + * databaseName: DATABASE_NAME, + * containerName: CONTAINER_NAME + * }); + * const model = new ChatOpenAI({cache}); + * + * // Invoke the model to perform an action + * const response = await model.invoke("Do something random!"); + * console.log(response); + * ``` + */ +export class AzureCosmosDBNoSQLSemanticCache extends BaseCache { + private embeddings: EmbeddingsInterface; + + private config: AzureCosmosDBNoSQLConfig; + + private similarityScoreThreshold: number; + + private cacheDict: { [key: string]: AzureCosmosDBNoSQLVectorStore } = {}; + + private vectorDistanceFunction: string; + + constructor( + embeddings: EmbeddingsInterface, + dbConfig: AzureCosmosDBNoSQLConfig, + similarityScoreThreshold: number = 0.6 + ) { + super(); + let client: CosmosClient; + + const connectionString = + dbConfig.connectionString ?? + getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_CONNECTION_STRING"); + + const endpoint = + dbConfig.endpoint ?? + getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_ENDPOINT"); + + if (!dbConfig.client && !connectionString && !endpoint) { + throw new Error( + "AzureCosmosDBNoSQLSemanticCache client, connection string or endpoint must be set." + ); + } + + if (!dbConfig.client) { + if (connectionString) { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + let [endpoint, key] = connectionString!.split(";"); + [, endpoint] = endpoint.split("="); + [, key] = key.split("="); + + client = new CosmosClient({ + endpoint, + key, + userAgentSuffix: USER_AGENT_SUFFIX, + }); + } else { + // Use managed identity + client = new CosmosClient({ + endpoint, + aadCredentials: dbConfig.credentials ?? new DefaultAzureCredential(), + userAgentSuffix: USER_AGENT_SUFFIX, + } as CosmosClientOptions); + } + } else { + client = dbConfig.client; + } + + this.vectorDistanceFunction = + dbConfig.vectorEmbeddingPolicy?.vectorEmbeddings[0].distanceFunction ?? + "cosine"; + + this.config = { + ...dbConfig, + client, + databaseName: dbConfig.databaseName, + containerName: dbConfig.containerName ?? DEFAULT_CONTAINER_NAME, + }; + this.embeddings = embeddings; + this.similarityScoreThreshold = similarityScoreThreshold; + } + + private getLlmCache(llmKey: string) { + const key = getCacheKey(llmKey); + if (!this.cacheDict[key]) { + this.cacheDict[key] = new AzureCosmosDBNoSQLVectorStore( + this.embeddings, + this.config + ); + } + return this.cacheDict[key]; + } + + /** + * Retrieves data from the cache. + * + * @param prompt The prompt for lookup. + * @param llmKey The LLM key used to construct the cache key. + * @returns An array of Generations if found, null otherwise. + */ + public async lookup(prompt: string, llmKey: string) { + const llmCache = this.getLlmCache(llmKey); + + const results = await llmCache.similaritySearchWithScore(prompt, 1); + if (!results.length) return null; + + const generations = results + .flatMap(([document, score]) => { + const isSimilar = + (this.vectorDistanceFunction === "euclidean" && + score <= this.similarityScoreThreshold) || + (this.vectorDistanceFunction !== "euclidean" && + score >= this.similarityScoreThreshold); + + if (!isSimilar) return undefined; + + return document.metadata.return_value.map((gen: string) => + deserializeStoredGeneration(JSON.parse(gen)) + ); + }) + .filter((gen) => gen !== undefined); + + return generations.length > 0 ? generations : null; + } + + /** + * Updates the cache with new data. + * + * @param prompt The prompt for update. + * @param llmKey The LLM key used to construct the cache key. + * @param value The value to be stored in the cache. + */ + public async update( + prompt: string, + llmKey: string, + returnValue: Generation[] + ) { + const serializedGenerations = returnValue.map((generation) => + JSON.stringify(serializeGeneration(generation)) + ); + const llmCache = this.getLlmCache(llmKey); + const metadata = { + llm_string: llmKey, + prompt, + return_value: serializedGenerations, + }; + const doc = new Document({ + pageContent: prompt, + metadata, + }); + await llmCache.addDocuments([doc]); + } + + /** + * deletes the semantic cache for a given llmKey + * @param llmKey + */ + public async clear(llmKey: string) { + const key = getCacheKey(llmKey); + if (this.cacheDict[key]) { + await this.cacheDict[key].delete(); + } + } +} diff --git a/libs/langchain-azure-cosmosdb/src/index.ts b/libs/langchain-azure-cosmosdb/src/index.ts index 04a6453c00c3..e1160c548ef9 100644 --- a/libs/langchain-azure-cosmosdb/src/index.ts +++ b/libs/langchain-azure-cosmosdb/src/index.ts @@ -1,2 +1,3 @@ export * from "./azure_cosmosdb_mongodb.js"; export * from "./azure_cosmosdb_nosql.js"; +export * from "./caches.js"; diff --git a/libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts b/libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts new file mode 100644 index 000000000000..d6b66ddaac05 --- /dev/null +++ b/libs/langchain-azure-cosmosdb/src/tests/caches.int.test.ts @@ -0,0 +1,244 @@ +/* eslint-disable no-process-env */ +/* eslint-disable @typescript-eslint/no-explicit-any */ + +import { + CosmosClient, + IndexingMode, + VectorEmbeddingPolicy, +} from "@azure/cosmos"; +import { DefaultAzureCredential } from "@azure/identity"; +import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; +import { AzureCosmosDBNoSQLSemanticCache } from "../caches.js"; + +const DATABASE_NAME = "langchainTestCacheDB"; +const CONTAINER_NAME = "testContainer"; + +function indexingPolicy(indexType: any) { + return { + indexingMode: IndexingMode.consistent, + includedPaths: [{ path: "/*" }], + excludedPaths: [{ path: '/"_etag"/?' }], + vectorIndexes: [{ path: "/embedding", type: indexType }], + }; +} + +function vectorEmbeddingPolicy( + distanceFunction: "euclidean" | "cosine" | "dotproduct", + dimension: number +): VectorEmbeddingPolicy { + return { + vectorEmbeddings: [ + { + path: "/embedding", + dataType: "float32", + distanceFunction, + dimensions: dimension, + }, + ], + }; +} + +async function initializeCache( + indexType: any, + distanceFunction: any, + similarityThreshold?: number +): Promise { + let cache: AzureCosmosDBNoSQLSemanticCache; + const embeddingModel = new OpenAIEmbeddings(); + const testEmbedding = await embeddingModel.embedDocuments(["sample text"]); + const dimension = Math.min( + testEmbedding[0].length, + indexType === "flat" ? 505 : 4096 + ); + if (process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING) { + cache = new AzureCosmosDBNoSQLSemanticCache( + new OpenAIEmbeddings(), + { + databaseName: DATABASE_NAME, + containerName: CONTAINER_NAME, + connectionString: process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING, + indexingPolicy: indexingPolicy(indexType), + vectorEmbeddingPolicy: vectorEmbeddingPolicy( + distanceFunction, + dimension + ), + }, + similarityThreshold + ); + } else if (process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT) { + cache = new AzureCosmosDBNoSQLSemanticCache( + new OpenAIEmbeddings(), + { + databaseName: DATABASE_NAME, + containerName: CONTAINER_NAME, + endpoint: process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT, + indexingPolicy: indexingPolicy(indexType), + vectorEmbeddingPolicy: vectorEmbeddingPolicy( + distanceFunction, + dimension + ), + }, + similarityThreshold + ); + } else { + throw new Error( + "Please set the environment variable AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT" + ); + } + return cache; +} + +/* + * To run this test, you need have an Azure Cosmos DB for NoSQL instance + * running. You can deploy a free version on Azure Portal without any cost, + * following this guide: + * https://learn.microsoft.com/azure/cosmos-db/nosql/vector-search + * + * You do not need to create a database or collection, it will be created + * automatically by the test. + * + * Once you have the instance running, you need to set the following environment + * variables before running the test: + * - AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT + * - AZURE_OPENAI_API_KEY + * - AZURE_OPENAI_API_INSTANCE_NAME + * - AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME + * - AZURE_OPENAI_API_VERSION + */ +describe("Azure CosmosDB NoSQL Semantic Cache", () => { + beforeEach(async () => { + let client: CosmosClient; + + if (process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING) { + client = new CosmosClient( + process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING + ); + } else if (process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT) { + client = new CosmosClient({ + endpoint: process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT, + aadCredentials: new DefaultAzureCredential(), + }); + } else { + throw new Error( + "Please set the environment variable AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT" + ); + } + + // Make sure the database does not exists + try { + await client.database(DATABASE_NAME).delete(); + } catch { + // Ignore error if the database does not exist + } + }); + + it("test AzureCosmosDBNoSqlSemanticCache with cosine quantizedFlat", async () => { + const cache = await initializeCache("quantizedFlat", "cosine"); + const model = new ChatOpenAI({ cache }); + const llmString = JSON.stringify(model._identifyingParams); + await cache.update("foo", llmString, [{ text: "fizz" }]); + + let cacheOutput = await cache.lookup("foo", llmString); + expect(cacheOutput).toEqual([{ text: "fizz" }]); + + cacheOutput = await cache.lookup("bar", llmString); + expect(cacheOutput).toEqual(null); + + await cache.clear(llmString); + }); + + it("test AzureCosmosDBNoSqlSemanticCache with cosine flat", async () => { + const cache = await initializeCache("flat", "cosine"); + const model = new ChatOpenAI({ cache }); + const llmString = JSON.stringify(model._identifyingParams); + await cache.update("foo", llmString, [{ text: "fizz" }]); + + let cacheOutput = await cache.lookup("foo", llmString); + expect(cacheOutput).toEqual([{ text: "fizz" }]); + + cacheOutput = await cache.lookup("bar", llmString); + expect(cacheOutput).toEqual(null); + + await cache.clear(llmString); + }); + + it("test AzureCosmosDBNoSqlSemanticCache with dotProduct quantizedFlat", async () => { + const cache = await initializeCache("quantizedFlat", "dotproduct"); + const model = new ChatOpenAI({ cache }); + const llmString = JSON.stringify(model._identifyingParams); + await cache.update("foo", llmString, [{ text: "fizz" }]); + + let cacheOutput = await cache.lookup("foo", llmString); + expect(cacheOutput).toEqual([{ text: "fizz" }]); + + cacheOutput = await cache.lookup("bar", llmString); + expect(cacheOutput).toEqual(null); + + await cache.clear(llmString); + }); + + it("test AzureCosmosDBNoSqlSemanticCache with dotProduct flat", async () => { + const cache = await initializeCache("flat", "cosine"); + const model = new ChatOpenAI({ cache }); + const llmString = JSON.stringify(model._identifyingParams); + await cache.update("foo", llmString, [{ text: "fizz" }]); + + let cacheOutput = await cache.lookup("foo", llmString); + expect(cacheOutput).toEqual([{ text: "fizz" }]); + + cacheOutput = await cache.lookup("bar", llmString); + expect(cacheOutput).toEqual(null); + + await cache.clear(llmString); + }); + + it("test AzureCosmosDBNoSqlSemanticCache with euclidean quantizedFlat", async () => { + const cache = await initializeCache("quantizedFlat", "euclidean"); + const model = new ChatOpenAI({ cache }); + const llmString = JSON.stringify(model._identifyingParams); + await cache.update("foo", llmString, [{ text: "fizz" }]); + + let cacheOutput = await cache.lookup("foo", llmString); + expect(cacheOutput).toEqual([{ text: "fizz" }]); + + cacheOutput = await cache.lookup("bar", llmString); + expect(cacheOutput).toEqual(null); + + await cache.clear(llmString); + }); + + it("test AzureCosmosDBNoSqlSemanticCache with euclidean flat", async () => { + const cache = await initializeCache("flat", "euclidean"); + const model = new ChatOpenAI({ cache }); + const llmString = JSON.stringify(model._identifyingParams); + await cache.update("foo", llmString, [{ text: "fizz" }]); + + let cacheOutput = await cache.lookup("foo", llmString); + expect(cacheOutput).toEqual([{ text: "fizz" }]); + + cacheOutput = await cache.lookup("bar", llmString); + expect(cacheOutput).toEqual(null); + + await cache.clear(llmString); + }); + + it("test AzureCosmosDBNoSqlSemanticCache response according to similarity score", async () => { + const cache = await initializeCache("quantizedFlat", "cosine"); + const model = new ChatOpenAI({ cache }); + const response1 = await model.invoke( + "Where is the headquarter of Microsoft?" + ); + console.log(response1.content); + // gives similarity score of 0.56 which is less than the threshold of 0.6. The cache + // will retun null which will allow the model to generate result. + const response2 = await model.invoke( + "List all Microsoft offices in India." + ); + expect(response2.content).not.toEqual(response1.content); + console.log(response2.content); + // gives similarity score of .63 > 0.6 + const response3 = await model.invoke("Tell me something about Microsoft"); + expect(response3.content).toEqual(response1.content); + console.log(response3.content); + }); +}); diff --git a/libs/langchain-azure-cosmosdb/src/tests/caches.test.ts b/libs/langchain-azure-cosmosdb/src/tests/caches.test.ts new file mode 100644 index 000000000000..9de3f507acc0 --- /dev/null +++ b/libs/langchain-azure-cosmosdb/src/tests/caches.test.ts @@ -0,0 +1,67 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +import { jest } from "@jest/globals"; +import { FakeEmbeddings, FakeLLM } from "@langchain/core/utils/testing"; +import { AzureCosmosDBNoSQLSemanticCache } from "../index.js"; + +// Create the mock Cosmos DB client +const createMockClient = () => { + let id = 0; + const client = { + databases: { + createIfNotExists: jest.fn().mockReturnThis(), + get database() { + return this; + }, + containers: { + createIfNotExists: jest.fn().mockReturnThis(), + get container() { + return this; + }, + items: { + create: jest.fn().mockImplementation((doc: any) => ({ + // eslint-disable-next-line no-plusplus + resource: { id: doc.id ?? `${id++}` }, + })), + query: jest.fn().mockReturnThis(), + fetchAll: jest.fn().mockImplementation(() => ({ + resources: [ + { + metadata: { + return_value: ['{"text": "fizz"}'], // Simulate stored serialized generation + }, + similarityScore: 0.8, + }, + ], + })), + }, + item: jest.fn().mockReturnThis(), + delete: jest.fn(), + }, + }, + }; + return client; +}; + +describe("AzureCosmosDBNoSQLSemanticCache", () => { + it("should store, retrieve, and clear cache", async () => { + const client = createMockClient(); + const embeddings = new FakeEmbeddings(); + const cache = new AzureCosmosDBNoSQLSemanticCache(embeddings, { + client: client as any, + }); + expect(cache).toBeDefined(); + + const llm = new FakeLLM({}); + const llmString = JSON.stringify(llm._identifyingParams()); + + await cache.update("foo", llmString, [{ text: "fizz" }]); + expect(client.databases.containers.items.create).toHaveBeenCalled(); + + const result = await cache.lookup("foo", llmString); + expect(result).toEqual([{ text: "fizz" }]); + expect(client.databases.containers.items.query).toHaveBeenCalled(); + + await cache.clear(llmString); + expect(client.databases.containers.delete).toHaveBeenCalled(); + }); +}); From bcaa1aa1e8a08f909f09007df9ba39b84ee01748 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 10:02:55 -0800 Subject: [PATCH 053/100] chore(azure-cosmosdb): Release 0.2.1 (#7154) --- libs/langchain-azure-cosmosdb/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-azure-cosmosdb/package.json b/libs/langchain-azure-cosmosdb/package.json index 8b3e38f30e4c..025ae69ed90b 100644 --- a/libs/langchain-azure-cosmosdb/package.json +++ b/libs/langchain-azure-cosmosdb/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/azure-cosmosdb", - "version": "0.2.0", + "version": "0.2.1", "description": "Azure CosmosDB integration for LangChain.js", "type": "module", "engines": { From c8aa29669750c726f3e60a6816a6f5e3dd8cc174 Mon Sep 17 00:00:00 2001 From: Ala Date: Tue, 5 Nov 2024 19:03:11 +0100 Subject: [PATCH 054/100] feat(aws): Ability to override client on chat_models class for more granular options (#7136) Co-authored-by: jacoblee93 --- libs/langchain-aws/src/chat_models.ts | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/libs/langchain-aws/src/chat_models.ts b/libs/langchain-aws/src/chat_models.ts index 10e6f35d2afa..56be9b097b4d 100644 --- a/libs/langchain-aws/src/chat_models.ts +++ b/libs/langchain-aws/src/chat_models.ts @@ -59,6 +59,13 @@ import { export interface ChatBedrockConverseInput extends BaseChatModelParams, Partial { + /** + * The BedrockRuntimeClient to use. + * It gives ability to override the default client with a custom one, allowing you to pass requestHandler {NodeHttpHandler} parameter + * in case it is not provided here. + */ + client?: BedrockRuntimeClient; + /** * Whether or not to stream responses */ @@ -687,10 +694,13 @@ export class ChatBedrockConverse ); } - this.client = new BedrockRuntimeClient({ - region, - credentials, - }); + this.client = + fields?.client ?? + new BedrockRuntimeClient({ + region, + credentials, + }); + this.region = region; this.model = rest?.model ?? this.model; this.streaming = rest?.streaming ?? this.streaming; From 8553113c6bf91a436556043d728c2a3fe3b9f97f Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 10:05:13 -0800 Subject: [PATCH 055/100] chore(aws): Release 0.1.2 (#7155) --- libs/langchain-aws/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-aws/package.json b/libs/langchain-aws/package.json index ce8979edcc2d..dfa7745f56bc 100644 --- a/libs/langchain-aws/package.json +++ b/libs/langchain-aws/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/aws", - "version": "0.1.1", + "version": "0.1.2", "description": "LangChain AWS integration", "type": "module", "engines": { From a59a4c8cb078af54eed0557ab1a0958844235e21 Mon Sep 17 00:00:00 2001 From: HyphenHook <113998959+HyphenHook@users.noreply.github.com> Date: Tue, 5 Nov 2024 13:09:14 -0500 Subject: [PATCH 056/100] feat(community) Add support for Tool Calling and Stop Token to ChatDeepInfra (#7126) --- .../src/chat_models/deepinfra.ts | 181 +++++++++++++++--- .../tests/chatdeepinfra.int.test.ts | 32 ++++ 2 files changed, 184 insertions(+), 29 deletions(-) diff --git a/libs/langchain-community/src/chat_models/deepinfra.ts b/libs/langchain-community/src/chat_models/deepinfra.ts index 82626ecc0a9f..20e8835ee922 100644 --- a/libs/langchain-community/src/chat_models/deepinfra.ts +++ b/libs/langchain-community/src/chat_models/deepinfra.ts @@ -1,23 +1,53 @@ import { BaseChatModel, type BaseChatModelParams, + BindToolsInput, + type BaseChatModelCallOptions, } from "@langchain/core/language_models/chat_models"; -import { AIMessage, type BaseMessage } from "@langchain/core/messages"; -import { type ChatResult } from "@langchain/core/outputs"; +import { + AIMessage, + type BaseMessage, + type ToolMessage, + isAIMessage, + type UsageMetadata, + ChatMessage, + type AIMessageChunk, +} from "@langchain/core/messages"; +import { + convertLangChainToolCallToOpenAI, + makeInvalidToolCall, + parseToolCall, +} from "@langchain/core/output_parsers/openai_tools"; +import { type ChatResult, type ChatGeneration } from "@langchain/core/outputs"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; +import { Runnable } from "@langchain/core/runnables"; +import { convertToOpenAITool } from "@langchain/core/utils/function_calling"; +import { BaseLanguageModelInput } from "@langchain/core/language_models/base"; export const DEFAULT_MODEL = "meta-llama/Meta-Llama-3-70B-Instruct"; -export type DeepInfraMessageRole = "system" | "assistant" | "user"; +export type DeepInfraMessageRole = "system" | "assistant" | "user" | "tool"; export const API_BASE_URL = "https://api.deepinfra.com/v1/openai/chat/completions"; export const ENV_VARIABLE_API_KEY = "DEEPINFRA_API_TOKEN"; +type DeepInfraFinishReason = "stop" | "length" | "tool_calls" | "null" | null; + +interface DeepInfraToolCall { + id: string; + type: "function"; + function: { + name: string; + arguments: string; + }; +} + interface DeepInfraMessage { role: DeepInfraMessageRole; content: string; + tool_calls?: DeepInfraToolCall[]; } interface ChatCompletionRequest { @@ -26,6 +56,8 @@ interface ChatCompletionRequest { stream?: boolean; max_tokens?: number | null; temperature?: number | null; + tools?: BindToolsInput[]; + stop?: string[]; } interface BaseResponse { @@ -36,11 +68,12 @@ interface BaseResponse { interface ChoiceMessage { role: string; content: string; + tool_calls?: DeepInfraToolCall[]; } interface ResponseChoice { index: number; - finish_reason: "stop" | "length" | "null" | null; + finish_reason: DeepInfraFinishReason; delta: ChoiceMessage; message: ChoiceMessage; } @@ -54,10 +87,15 @@ interface ChatCompletionResponse extends BaseResponse { }; output: { text: string; - finish_reason: "stop" | "length" | "null" | null; + finish_reason: DeepInfraFinishReason; }; } +export interface DeepInfraCallOptions extends BaseChatModelCallOptions { + stop?: string[]; + tools?: BindToolsInput[]; +} + export interface ChatDeepInfraParams { model: string; apiKey?: string; @@ -74,13 +112,76 @@ function messageToRole(message: BaseMessage): DeepInfraMessageRole { return "user"; case "system": return "system"; + case "tool": + return "tool"; default: throw new Error(`Unknown message type: ${type}`); } } +function convertMessagesToDeepInfraParams( + messages: BaseMessage[] +): DeepInfraMessage[] { + return messages.map((message): DeepInfraMessage => { + if (typeof message.content !== "string") { + throw new Error("Non string message content not supported"); + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const completionParam: Record = { + role: messageToRole(message), + content: message.content, + }; + if (message.name != null) { + completionParam.name = message.name; + } + if (isAIMessage(message) && !!message.tool_calls?.length) { + completionParam.tool_calls = message.tool_calls.map( + convertLangChainToolCallToOpenAI + ); + completionParam.content = null; + } else { + if (message.additional_kwargs.tool_calls != null) { + completionParam.tool_calls = message.additional_kwargs.tool_calls; + } + if ((message as ToolMessage).tool_call_id != null) { + completionParam.tool_call_id = (message as ToolMessage).tool_call_id; + } + } + return completionParam as DeepInfraMessage; + }); +} + +function deepInfraResponseToChatMessage( + message: ChoiceMessage, + usageMetadata?: UsageMetadata +): BaseMessage { + switch (message.role) { + case "assistant": { + const toolCalls = []; + const invalidToolCalls = []; + for (const rawToolCall of message.tool_calls ?? []) { + try { + toolCalls.push(parseToolCall(rawToolCall, { returnId: true })); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } catch (e: any) { + invalidToolCalls.push(makeInvalidToolCall(rawToolCall, e.message)); + } + } + return new AIMessage({ + content: message.content || "", + additional_kwargs: { tool_calls: message.tool_calls ?? [] }, + tool_calls: toolCalls, + invalid_tool_calls: invalidToolCalls, + usage_metadata: usageMetadata, + }); + } + default: + return new ChatMessage(message.content || "", message.role ?? "unknown"); + } +} + export class ChatDeepInfra - extends BaseChatModel + extends BaseChatModel implements ChatDeepInfraParams { static lc_name() { @@ -88,7 +189,7 @@ export class ChatDeepInfra } get callKeys() { - return ["stop", "signal", "options"]; + return ["stop", "signal", "options", "tools"]; } apiKey?: string; @@ -118,12 +219,21 @@ export class ChatDeepInfra this.maxTokens = fields.maxTokens; } - invocationParams(): Omit { + invocationParams( + options?: this["ParsedCallOptions"] + ): Omit { + if (options?.tool_choice) { + throw new Error( + "Tool choice is not supported for ChatDeepInfra currently." + ); + } return { model: this.model, stream: false, temperature: this.temperature, max_tokens: this.maxTokens, + tools: options?.tools, + stop: options?.stop, }; } @@ -135,28 +245,14 @@ export class ChatDeepInfra messages: BaseMessage[], options?: this["ParsedCallOptions"] ): Promise { - const parameters = this.invocationParams(); - - const messagesMapped: DeepInfraMessage[] = messages.map((message) => ({ - role: messageToRole(message), - content: message.content as string, - })); + const parameters = this.invocationParams(options); + const messagesMapped = convertMessagesToDeepInfraParams(messages); - const data = await this.completionWithRetry( + const data: ChatCompletionResponse = await this.completionWithRetry( { ...parameters, messages: messagesMapped }, false, options?.signal - ).then((data) => { - if (data?.code) { - throw new Error(data?.message); - } - const { finish_reason, message } = data.choices[0]; - const text = message.content; - return { - ...data, - output: { text, finish_reason }, - }; - }); + ); const { prompt_tokens = 0, @@ -164,10 +260,27 @@ export class ChatDeepInfra total_tokens = 0, } = data.usage ?? {}; - const { text } = data.output; + const usageMetadata: UsageMetadata = { + input_tokens: prompt_tokens, + output_tokens: completion_tokens, + total_tokens, + }; + const generations: ChatGeneration[] = []; + + for (const part of data?.choices ?? []) { + const text = part.message?.content ?? ""; + const generation: ChatGeneration = { + text, + message: deepInfraResponseToChatMessage(part.message, usageMetadata), + }; + if (part.finish_reason) { + generation.generationInfo = { finish_reason: part.finish_reason }; + } + generations.push(generation); + } return { - generations: [{ text, message: new AIMessage(text) }], + generations, llmOutput: { tokenUsage: { promptTokens: prompt_tokens, @@ -182,7 +295,7 @@ export class ChatDeepInfra request: ChatCompletionRequest, stream: boolean, signal?: AbortSignal - ) { + ): Promise { const body = { temperature: this.temperature, max_tokens: this.maxTokens, @@ -209,6 +322,16 @@ export class ChatDeepInfra return this.caller.call(makeCompletionRequest); } + override bindTools( + tools: BindToolsInput[], + kwargs?: Partial + ): Runnable { + return this.bind({ + tools: tools.map((tool) => convertToOpenAITool(tool)), + ...kwargs, + } as DeepInfraCallOptions); + } + _llmType(): string { return "DeepInfra"; } diff --git a/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts index b2b324e6744e..0db5184c2419 100644 --- a/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatdeepinfra.int.test.ts @@ -1,4 +1,6 @@ import { test } from "@jest/globals"; +import { z } from "zod"; +import { zodToJsonSchema } from "zod-to-json-schema"; import { HumanMessage } from "@langchain/core/messages"; import { ChatDeepInfra } from "../deepinfra.js"; @@ -20,4 +22,34 @@ describe("ChatDeepInfra", () => { const res = await deepInfraChat.generate([[message]]); // console.log(JSON.stringify(res, null, 2)); }); + + test("Tool calling", async () => { + const zodSchema = z + .object({ + location: z + .string() + .describe("The name of city to get the weather for."), + }) + .describe( + "Get the weather of a specific location and return the temperature in Celsius." + ); + const deepInfraChat = new ChatDeepInfra().bind({ + tools: [ + { + type: "function", + function: { + name: "get_current_weather", + description: "Get the current weather in a given location", + parameters: zodToJsonSchema(zodSchema), + }, + }, + ], + }); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var + const res = await deepInfraChat.invoke( + "What is the current weather in SF?" + ); + // console.log({ res }); + }); }); From e54c10124d793614031664367622271da8e35114 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 10:33:39 -0800 Subject: [PATCH 057/100] fix(core): Use a single shared client instance for all runs (#7110) --- langchain-core/src/callbacks/manager.ts | 32 ++----------------- langchain-core/src/singletons/tracer.ts | 18 +++++++++++ .../src/tracers/tracer_langchain.ts | 10 ++---- 3 files changed, 22 insertions(+), 38 deletions(-) create mode 100644 langchain-core/src/singletons/tracer.ts diff --git a/langchain-core/src/callbacks/manager.ts b/langchain-core/src/callbacks/manager.ts index 3437bfd7307a..6e517b83aac3 100644 --- a/langchain-core/src/callbacks/manager.ts +++ b/langchain-core/src/callbacks/manager.ts @@ -1267,36 +1267,8 @@ export function ensureHandler( } /** - * @example - * ```typescript - * const prompt = PromptTemplate.fromTemplate(`What is the answer to {question}?`); - * - * // Example of using LLMChain to process a series of questions - * const chain = new LLMChain({ - * llm: new ChatOpenAI({ temperature: 0.9 }), - * prompt, - * }); - * - * // Process questions using the chain - * const processQuestions = async (questions) => { - * for (const question of questions) { - * const result = await chain.call({ question }); - * console.log(result); - * } - * }; - * - * // Example questions - * const questions = [ - * "What is your name?", - * "What is your quest?", - * "What is your favorite color?", - * ]; - * - * // Run the example -const logFunction = handler.raiseError ? console.error : console.warn; -* processQuestions(questions).catch(consolelogFunction; - * - * ``` + * @deprecated Use [`traceable`](https://docs.smith.langchain.com/observability/how_to_guides/tracing/annotate_code) + * from "langsmith" instead. */ export class TraceGroup { private runManager?: CallbackManagerForChainRun; diff --git a/langchain-core/src/singletons/tracer.ts b/langchain-core/src/singletons/tracer.ts new file mode 100644 index 000000000000..89696240705d --- /dev/null +++ b/langchain-core/src/singletons/tracer.ts @@ -0,0 +1,18 @@ +import { Client } from "langsmith"; +import { getEnvironmentVariable } from "../utils/env.js"; + +let client: Client; + +export const getDefaultLangChainClientSingleton = () => { + if (client === undefined) { + const clientParams = + getEnvironmentVariable("LANGCHAIN_CALLBACKS_BACKGROUND") === "false" + ? { + // LangSmith has its own backgrounding system + blockOnRootRunFinalization: true, + } + : {}; + client = new Client(clientParams); + } + return client; +}; diff --git a/langchain-core/src/tracers/tracer_langchain.ts b/langchain-core/src/tracers/tracer_langchain.ts index ad31c309d562..e5719125df51 100644 --- a/langchain-core/src/tracers/tracer_langchain.ts +++ b/langchain-core/src/tracers/tracer_langchain.ts @@ -11,6 +11,7 @@ import { import { getEnvironmentVariable, getRuntimeEnvironment } from "../utils/env.js"; import { BaseTracer } from "./base.js"; import { BaseCallbackHandlerInput } from "../callbacks/base.js"; +import { getDefaultLangChainClientSingleton } from "../singletons/tracer.js"; export interface Run extends BaseRun { id: string; @@ -59,14 +60,7 @@ export class LangChainTracer getEnvironmentVariable("LANGCHAIN_PROJECT") ?? getEnvironmentVariable("LANGCHAIN_SESSION"); this.exampleId = exampleId; - const clientParams = - getEnvironmentVariable("LANGCHAIN_CALLBACKS_BACKGROUND") === "false" - ? { - // LangSmith has its own backgrounding system - blockOnRootRunFinalization: true, - } - : {}; - this.client = client ?? new Client(clientParams); + this.client = client ?? getDefaultLangChainClientSingleton(); const traceableTree = LangChainTracer.getTraceableRunTree(); if (traceableTree) { From 7a9eac7bf760f8cd12690ad40e51e5d8dd8a2322 Mon Sep 17 00:00:00 2001 From: Pavlo Sobchuk Date: Tue, 5 Nov 2024 19:17:55 +0000 Subject: [PATCH 058/100] docs(core): VectorStore and Retriever: types, interfaces, classes (#7141) --- docs/core_docs/.gitignore | 34 +- langchain-core/src/retrievers/index.ts | 94 ++++- langchain-core/src/vectorstores.ts | 530 ++++++++++++++++++++++++- 3 files changed, 626 insertions(+), 32 deletions(-) diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore index 55864ace8b3e..099af2f2d642 100644 --- a/docs/core_docs/.gitignore +++ b/docs/core_docs/.gitignore @@ -252,12 +252,6 @@ docs/integrations/vectorstores/elasticsearch.md docs/integrations/vectorstores/elasticsearch.mdx docs/integrations/vectorstores/chroma.md docs/integrations/vectorstores/chroma.mdx -docs/integrations/toolkits/vectorstore.md -docs/integrations/toolkits/vectorstore.mdx -docs/integrations/toolkits/sql.md -docs/integrations/toolkits/sql.mdx -docs/integrations/toolkits/openapi.md -docs/integrations/toolkits/openapi.mdx docs/integrations/tools/tavily_search.md docs/integrations/tools/tavily_search.mdx docs/integrations/tools/serpapi.md @@ -266,6 +260,12 @@ docs/integrations/tools/exa_search.md docs/integrations/tools/exa_search.mdx docs/integrations/tools/duckduckgo_search.md docs/integrations/tools/duckduckgo_search.mdx +docs/integrations/toolkits/vectorstore.md +docs/integrations/toolkits/vectorstore.mdx +docs/integrations/toolkits/sql.md +docs/integrations/toolkits/sql.mdx +docs/integrations/toolkits/openapi.md +docs/integrations/toolkits/openapi.mdx docs/integrations/text_embedding/togetherai.md docs/integrations/text_embedding/togetherai.mdx docs/integrations/text_embedding/openai.md @@ -376,16 +376,6 @@ docs/integrations/retrievers/self_query/hnswlib.md docs/integrations/retrievers/self_query/hnswlib.mdx docs/integrations/retrievers/self_query/chroma.md docs/integrations/retrievers/self_query/chroma.mdx -docs/integrations/document_loaders/file_loaders/unstructured.md -docs/integrations/document_loaders/file_loaders/unstructured.mdx -docs/integrations/document_loaders/file_loaders/text.md -docs/integrations/document_loaders/file_loaders/text.mdx -docs/integrations/document_loaders/file_loaders/pdf.md -docs/integrations/document_loaders/file_loaders/pdf.mdx -docs/integrations/document_loaders/file_loaders/directory.md -docs/integrations/document_loaders/file_loaders/directory.mdx -docs/integrations/document_loaders/file_loaders/csv.md -docs/integrations/document_loaders/file_loaders/csv.mdx docs/integrations/document_loaders/web_loaders/web_puppeteer.md docs/integrations/document_loaders/web_loaders/web_puppeteer.mdx docs/integrations/document_loaders/web_loaders/web_cheerio.md @@ -397,4 +387,14 @@ docs/integrations/document_loaders/web_loaders/pdf.mdx docs/integrations/document_loaders/web_loaders/langsmith.md docs/integrations/document_loaders/web_loaders/langsmith.mdx docs/integrations/document_loaders/web_loaders/firecrawl.md -docs/integrations/document_loaders/web_loaders/firecrawl.mdx \ No newline at end of file +docs/integrations/document_loaders/web_loaders/firecrawl.mdx +docs/integrations/document_loaders/file_loaders/unstructured.md +docs/integrations/document_loaders/file_loaders/unstructured.mdx +docs/integrations/document_loaders/file_loaders/text.md +docs/integrations/document_loaders/file_loaders/text.mdx +docs/integrations/document_loaders/file_loaders/pdf.md +docs/integrations/document_loaders/file_loaders/pdf.mdx +docs/integrations/document_loaders/file_loaders/directory.md +docs/integrations/document_loaders/file_loaders/directory.mdx +docs/integrations/document_loaders/file_loaders/csv.md +docs/integrations/document_loaders/file_loaders/csv.mdx \ No newline at end of file diff --git a/langchain-core/src/retrievers/index.ts b/langchain-core/src/retrievers/index.ts index a544dedce7b9..81798e364039 100644 --- a/langchain-core/src/retrievers/index.ts +++ b/langchain-core/src/retrievers/index.ts @@ -10,7 +10,24 @@ import { Runnable, type RunnableInterface } from "../runnables/base.js"; import { RunnableConfig, ensureConfig } from "../runnables/config.js"; /** - * Base Retriever class. All indexes should extend this class. + * Input configuration options for initializing a retriever that extends + * the `BaseRetriever` class. This interface provides base properties + * common to all retrievers, allowing customization of callback functions, + * tagging, metadata, and logging verbosity. + * + * Fields: + * - `callbacks` (optional): An array of callback functions that handle various + * events during retrieval, such as logging, error handling, or progress updates. + * + * - `tags` (optional): An array of strings used to add contextual tags to + * retrieval operations, allowing for easier categorization and tracking. + * + * - `metadata` (optional): A record of key-value pairs to store additional + * contextual information for retrieval operations, which can be useful + * for logging or auditing purposes. + * + * - `verbose` (optional): A boolean flag that, if set to `true`, enables + * detailed logging and output during the retrieval process. Defaults to `false`. */ export interface BaseRetrieverInput { callbacks?: Callbacks; @@ -19,10 +36,30 @@ export interface BaseRetrieverInput { verbose?: boolean; } +/** + * Interface for a base retriever that defines core functionality for + * retrieving relevant documents from a source based on a query. + * + * The `BaseRetrieverInterface` standardizes the `getRelevantDocuments` method, + * enabling retrieval of documents that match the query criteria. + * + * @template Metadata - The type of metadata associated with each document, + * defaulting to `Record`. + */ export interface BaseRetrieverInterface< // eslint-disable-next-line @typescript-eslint/no-explicit-any Metadata extends Record = Record > extends RunnableInterface[]> { + /** + * Retrieves documents relevant to a given query, allowing optional + * configurations for customization. + * + * @param query - A string representing the query to search for relevant documents. + * @param config - (optional) Configuration options for the retrieval process, + * which may include callbacks and additional context settings. + * @returns A promise that resolves to an array of `DocumentInterface` instances, + * each containing metadata specified by the `Metadata` type parameter. + */ getRelevantDocuments( query: string, config?: Callbacks | BaseCallbackConfig @@ -30,9 +67,16 @@ export interface BaseRetrieverInterface< } /** - * Abstract base class for a Document retrieval system. A retrieval system - * is defined as something that can take string queries and return the - * most 'relevant' Documents from some source. + * Abstract base class for a document retrieval system, designed to + * process string queries and return the most relevant documents from a source. + * + * `BaseRetriever` provides common properties and methods for derived retrievers, + * such as callbacks, tagging, and verbose logging. Custom retrieval systems + * should extend this class and implement `_getRelevantDocuments` to define + * the specific retrieval logic. + * + * @template Metadata - The type of metadata associated with each document, + * defaulting to `Record`. */ export abstract class BaseRetriever< // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -41,14 +85,33 @@ export abstract class BaseRetriever< extends Runnable[]> implements BaseRetrieverInterface { + /** + * Optional callbacks to handle various events in the retrieval process. + */ callbacks?: Callbacks; + /** + * Tags to label or categorize the retrieval operation. + */ tags?: string[]; + /** + * Metadata to provide additional context or information about the retrieval + * operation. + */ metadata?: Record; + /** + * If set to `true`, enables verbose logging for the retrieval process. + */ verbose?: boolean; + /** + * Constructs a new `BaseRetriever` instance with optional configuration fields. + * + * @param fields - Optional input configuration that can include `callbacks`, + * `tags`, `metadata`, and `verbose` settings for custom retriever behavior. + */ constructor(fields?: BaseRetrieverInput) { super(fields); this.callbacks = fields?.callbacks; @@ -62,6 +125,20 @@ export abstract class BaseRetriever< * changes to people currently using subclassed custom retrievers. * Change it on next major release. */ + /** + * Placeholder method for retrieving relevant documents based on a query. + * + * This method is intended to be implemented by subclasses and will be + * converted to an abstract method in the next major release. Currently, it + * throws an error if not implemented, ensuring that custom retrievers define + * the specific retrieval logic. + * + * @param _query - The query string used to search for relevant documents. + * @param _callbacks - (optional) Callback manager for managing callbacks + * during retrieval. + * @returns A promise resolving to an array of `DocumentInterface` instances relevant to the query. + * @throws {Error} Throws an error indicating the method is not implemented. + */ _getRelevantDocuments( _query: string, _callbacks?: CallbackManagerForRetrieverRun @@ -69,6 +146,15 @@ export abstract class BaseRetriever< throw new Error("Not implemented!"); } + /** + * Executes a retrieval operation. + * + * @param input - The query string used to search for relevant documents. + * @param options - (optional) Configuration options for the retrieval run, + * which may include callbacks, tags, and metadata. + * @returns A promise that resolves to an array of `DocumentInterface` instances + * representing the most relevant documents to the query. + */ async invoke( input: string, options?: RunnableConfig diff --git a/langchain-core/src/vectorstores.ts b/langchain-core/src/vectorstores.ts index da5ef1ce2e72..5bfa07464779 100644 --- a/langchain-core/src/vectorstores.ts +++ b/langchain-core/src/vectorstores.ts @@ -18,7 +18,34 @@ import { type AddDocumentOptions = Record; /** - * Type for options when performing a maximal marginal relevance search. + * Options for configuring a maximal marginal relevance (MMR) search. + * + * MMR search optimizes for both similarity to the query and diversity + * among the results, balancing the retrieval of relevant documents + * with variation in the content returned. + * + * Fields: + * + * - `fetchK` (optional): The initial number of documents to retrieve from the + * vector store before applying the MMR algorithm. This larger set provides a + * pool of documents from which the algorithm can select the most diverse + * results based on relevance to the query. + * + * - `filter` (optional): A filter of type `FilterType` to refine the search + * results, allowing additional conditions to target specific subsets + * of documents. + * + * - `k`: The number of documents to return in the final results. This is the + * primary count of documents that are most relevant to the query. + * + * - `lambda` (optional): A value between 0 and 1 that determines the balance + * between relevance and diversity: + * - A `lambda` of 0 emphasizes diversity, maximizing content variation. + * - A `lambda` of 1 emphasizes similarity to the query, focusing on relevance. + * Values between 0 and 1 provide a mix of relevance and diversity. + * + * @template FilterType - The type used for filtering results, as defined + * by the vector store. */ export type MaxMarginalRelevanceSearchOptions = { k: number; @@ -28,8 +55,23 @@ export type MaxMarginalRelevanceSearchOptions = { }; /** - * Type for options when performing a maximal marginal relevance search - * with the VectorStoreRetriever. + * Options for configuring a maximal marginal relevance (MMR) search + * when using the `VectorStoreRetriever`. + * + * These parameters control how the MMR algorithm balances relevance to the + * query and diversity among the retrieved documents. + * + * Fields: + * - `fetchK` (optional): Specifies the initial number of documents to fetch + * before applying the MMR algorithm. This larger set provides a pool of + * documents from which the algorithm can select the most diverse results + * based on relevance to the query. + * + * - `lambda` (optional): A value between 0 and 1 that determines the balance + * between relevance and diversity: + * - A `lambda` of 0 maximizes diversity among the results, prioritizing varied content. + * - A `lambda` of 1 maximizes similarity to the query, prioritizing relevance. + * Values between 0 and 1 provide a mix of relevance and diversity. */ export type VectorStoreRetrieverMMRSearchKwargs = { fetchK?: number; @@ -37,7 +79,50 @@ export type VectorStoreRetrieverMMRSearchKwargs = { }; /** - * Type for input when creating a VectorStoreRetriever instance. + * Input configuration options for creating a `VectorStoreRetriever` instance. + * + * This type combines properties from `BaseRetrieverInput` with specific settings + * for the `VectorStoreRetriever`, including options for similarity or maximal + * marginal relevance (MMR) search types. + * + * Fields: + * + * - `callbacks` (optional): An array of callback functions that handle various + * events during retrieval, such as logging, error handling, or progress updates. + * + * - `tags` (optional): An array of strings used to add contextual tags to + * retrieval operations, allowing for easier categorization and tracking. + * + * - `metadata` (optional): A record of key-value pairs to store additional + * contextual information for retrieval operations, which can be useful + * for logging or auditing purposes. + * + * - `verbose` (optional): A boolean flag that, if set to `true`, enables + * detailed logging and output during the retrieval process. Defaults to `false`. + * + * - `vectorStore`: The `VectorStore` instance implementing `VectorStoreInterface` + * that will be used for document storage and retrieval. + * + * - `k` (optional): Specifies the number of documents to retrieve per search + * query. Defaults to 4 if not specified. + * + * - `filter` (optional): A filter of type `FilterType` (defined by the vector store) + * to refine the set of documents returned, allowing for targeted search results. + * + * - `searchType`: Determines the type of search to perform: + * - `"similarity"`: Executes a similarity search, retrieving documents based purely + * on vector similarity to the query. + * - `"mmr"`: Executes a maximal marginal relevance (MMR) search, balancing similarity + * and diversity in the search results. + * + * - `searchKwargs` (optional): Used only if `searchType` is `"mmr"`, this object + * provides additional options for MMR search, including: + * - `fetchK`: Specifies the number of documents to initially fetch before applying + * the MMR algorithm, providing a pool from which the most diverse results are selected. + * - `lambda`: A diversity parameter, where 0 emphasizes diversity and 1 emphasizes + * relevance to the query. Values between 0 and 1 provide a balance of relevance and diversity. + * + * @template V - The type of vector store implementing `VectorStoreInterface`. */ export type VectorStoreRetrieverInput = BaseRetrieverInput & @@ -57,11 +142,34 @@ export type VectorStoreRetrieverInput = } ); +/** + * Interface for a retriever that uses a vector store to store and retrieve + * document embeddings. This retriever interface allows for adding documents + * to the underlying vector store and conducting retrieval operations. + * + * `VectorStoreRetrieverInterface` extends `BaseRetrieverInterface` to provide + * document retrieval capabilities based on vector similarity. + * + * @interface VectorStoreRetrieverInterface + * @extends BaseRetrieverInterface + */ export interface VectorStoreRetrieverInterface< V extends VectorStoreInterface = VectorStoreInterface > extends BaseRetrieverInterface { vectorStore: V; + /** + * Adds an array of documents to the vector store. + * + * This method embeds the provided documents and stores them within the + * vector store. Additional options can be specified for custom behavior + * during the addition process. + * + * @param documents - An array of documents to embed and add to the vector store. + * @param options - Optional settings to customize document addition. + * @returns A promise that resolves to an array of document IDs or `void`, + * depending on the implementation. + */ addDocuments( documents: DocumentInterface[], options?: AddDocumentOptions @@ -69,8 +177,17 @@ export interface VectorStoreRetrieverInterface< } /** - * Class for performing document retrieval from a VectorStore. Can perform - * similarity search or maximal marginal relevance search. + * Class for retrieving documents from a `VectorStore` based on vector similarity + * or maximal marginal relevance (MMR). + * + * `VectorStoreRetriever` extends `BaseRetriever`, implementing methods for + * adding documents to the underlying vector store and performing document + * retrieval with optional configurations. + * + * @class VectorStoreRetriever + * @extends BaseRetriever + * @implements VectorStoreRetrieverInterface + * @template V - Type of vector store implementing `VectorStoreInterface`. */ export class VectorStoreRetriever< V extends VectorStoreInterface = VectorStoreInterface @@ -86,20 +203,99 @@ export class VectorStoreRetriever< return ["langchain_core", "vectorstores"]; } + /** + * The instance of `VectorStore` used for storing and retrieving document embeddings. + * This vector store must implement the `VectorStoreInterface` to be compatible + * with the retriever’s operations. + */ vectorStore: V; + /** + * Specifies the number of documents to retrieve for each search query. + * Defaults to 4 if not specified, providing a basic result count for similarity or MMR searches. + */ k = 4; + /** + * Determines the type of search operation to perform on the vector store. + * + * - `"similarity"` (default): Conducts a similarity search based purely on vector similarity + * to the query. + * - `"mmr"`: Executes a maximal marginal relevance (MMR) search, balancing relevance and + * diversity in the retrieved results. + */ searchType = "similarity"; + /** + * Additional options specific to maximal marginal relevance (MMR) search, applicable + * only if `searchType` is set to `"mmr"`. + * + * Includes: + * - `fetchK`: The initial number of documents fetched before applying the MMR algorithm, + * allowing for a larger selection from which to choose the most diverse results. + * - `lambda`: A parameter between 0 and 1 to adjust the relevance-diversity balance, + * where 0 prioritizes diversity and 1 prioritizes relevance. + */ searchKwargs?: VectorStoreRetrieverMMRSearchKwargs; + /** + * Optional filter applied to search results, defined by the `FilterType` of the vector store. + * Allows for refined, targeted results by restricting the returned documents based + * on specified filter criteria. + */ filter?: V["FilterType"]; + /** + * Returns the type of vector store, as defined by the `vectorStore` instance. + * + * @returns {string} The vector store type. + */ _vectorstoreType(): string { return this.vectorStore._vectorstoreType(); } + /** + * Initializes a new instance of `VectorStoreRetriever` with the specified configuration. + * + * This constructor configures the retriever to interact with a given `VectorStore` + * and supports different retrieval strategies, including similarity search and maximal + * marginal relevance (MMR) search. Various options allow customization of the number + * of documents retrieved per query, filtering based on conditions, and fine-tuning + * MMR-specific parameters. + * + * @param fields - Configuration options for setting up the retriever: + * + * - `vectorStore` (required): The `VectorStore` instance implementing `VectorStoreInterface` + * that will be used to store and retrieve document embeddings. This is the core component + * of the retriever, enabling vector-based similarity and MMR searches. + * + * - `k` (optional): Specifies the number of documents to retrieve per search query. If not + * provided, defaults to 4. This count determines the number of most relevant documents returned + * for each search operation, balancing performance with comprehensiveness. + * + * - `searchType` (optional): Defines the search approach used by the retriever, allowing for + * flexibility between two methods: + * - `"similarity"` (default): A similarity-based search, retrieving documents with high vector + * similarity to the query. This type prioritizes relevance and is often used when diversity + * among results is less critical. + * - `"mmr"`: Maximal Marginal Relevance search, which combines relevance with diversity. MMR + * is useful for scenarios where varied content is essential, as it selects results that + * both match the query and introduce content diversity. + * + * - `filter` (optional): A filter of type `FilterType`, defined by the vector store, that allows + * for refined and targeted search results. This filter applies specified conditions to limit + * which documents are eligible for retrieval, offering control over the scope of results. + * + * - `searchKwargs` (optional, applicable only if `searchType` is `"mmr"`): Additional settings + * for configuring MMR-specific behavior. These parameters allow further tuning of the MMR + * search process: + * - `fetchK`: The initial number of documents fetched from the vector store before the MMR + * algorithm is applied. Fetching a larger set enables the algorithm to select a more + * diverse subset of documents. + * - `lambda`: A parameter controlling the relevance-diversity balance, where 0 emphasizes + * diversity and 1 prioritizes relevance. Intermediate values provide a blend of the two, + * allowing customization based on the importance of content variety relative to query relevance. + */ constructor(fields: VectorStoreRetrieverInput) { super(fields); this.vectorStore = fields.vectorStore; @@ -111,6 +307,22 @@ export class VectorStoreRetriever< } } + /** + * Retrieves relevant documents based on the specified query, using either + * similarity or maximal marginal relevance (MMR) search. + * + * If `searchType` is set to `"mmr"`, performs an MMR search to balance + * similarity and diversity among results. If `searchType` is `"similarity"`, + * retrieves results purely based on similarity to the query. + * + * @param query - The query string used to find relevant documents. + * @param runManager - Optional callback manager for tracking retrieval progress. + * @returns A promise that resolves to an array of `DocumentInterface` instances + * representing the most relevant documents to the query. + * @throws {Error} Throws an error if MMR search is requested but not supported + * by the vector store. + * @protected + */ async _getRelevantDocuments( query: string, runManager?: CallbackManagerForRetrieverRun @@ -139,6 +351,18 @@ export class VectorStoreRetriever< ); } + /** + * Adds an array of documents to the vector store, embedding them as part of + * the storage process. + * + * This method delegates document embedding and storage to the `addDocuments` + * method of the underlying vector store. + * + * @param documents - An array of documents to embed and add to the vector store. + * @param options - Optional settings to customize document addition. + * @returns A promise that resolves to an array of document IDs or `void`, + * depending on the vector store's implementation. + */ async addDocuments( documents: DocumentInterface[], options?: AddDocumentOptions @@ -147,33 +371,101 @@ export class VectorStoreRetriever< } } +/** + * Interface defining the structure and operations of a vector store, which + * facilitates the storage, retrieval, and similarity search of document vectors. + * + * `VectorStoreInterface` provides methods for adding, deleting, and searching + * documents based on vector embeddings, including support for similarity + * search with optional filtering and relevance-based retrieval. + * + * @extends Serializable + */ export interface VectorStoreInterface extends Serializable { + /** + * Defines the filter type used in search and delete operations. Can be an + * object for structured conditions or a string for simpler filtering. + */ FilterType: object | string; + /** + * Instance of `EmbeddingsInterface` used to generate vector embeddings for + * documents, enabling vector-based search operations. + */ embeddings: EmbeddingsInterface; + /** + * Returns a string identifying the type of vector store implementation, + * useful for distinguishing between different vector storage backends. + * + * @returns {string} A string indicating the vector store type. + */ _vectorstoreType(): string; + /** + * Adds precomputed vectors and their corresponding documents to the vector store. + * + * @param vectors - An array of vectors, with each vector representing a document. + * @param documents - An array of `DocumentInterface` instances corresponding to each vector. + * @param options - Optional configurations for adding documents, potentially covering indexing or metadata handling. + * @returns A promise that resolves to an array of document IDs or void, depending on implementation. + */ addVectors( vectors: number[][], documents: DocumentInterface[], options?: AddDocumentOptions ): Promise; + /** + * Adds an array of documents to the vector store. + * + * @param documents - An array of documents to be embedded and stored in the vector store. + * @param options - Optional configurations for embedding and storage operations. + * @returns A promise that resolves to an array of document IDs or void, depending on implementation. + */ addDocuments( documents: DocumentInterface[], options?: AddDocumentOptions ): Promise; + /** + * Deletes documents from the vector store based on the specified parameters. + * + * @param _params - A flexible object containing key-value pairs that define + * the conditions for selecting documents to delete. + * @returns A promise that resolves once the deletion operation is complete. + */ // eslint-disable-next-line @typescript-eslint/no-explicit-any delete(_params?: Record): Promise; + /** + * Searches for documents similar to a given vector query and returns them + * with similarity scores. + * + * @param query - A vector representing the query for similarity search. + * @param k - The number of similar documents to return. + * @param filter - Optional filter based on `FilterType` to restrict results. + * @returns A promise that resolves to an array of tuples, each containing a + * `DocumentInterface` and its corresponding similarity score. + */ similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[DocumentInterface, number][]>; + /** + * Searches for documents similar to a text query, embedding the query + * and retrieving documents based on vector similarity. + * + * @param query - The text query to search for. + * @param k - Optional number of similar documents to return. + * @param filter - Optional filter based on `FilterType` to restrict results. + * @param callbacks - Optional callbacks for tracking progress or events + * during the search process. + * @returns A promise that resolves to an array of `DocumentInterface` + * instances representing similar documents. + */ similaritySearch( query: string, k?: number, @@ -181,6 +473,18 @@ export interface VectorStoreInterface extends Serializable { callbacks?: Callbacks ): Promise; + /** + * Searches for documents similar to a text query and includes similarity + * scores in the result. + * + * @param query - The text query to search for. + * @param k - Optional number of similar documents to return. + * @param filter - Optional filter based on `FilterType` to restrict results. + * @param callbacks - Optional callbacks for tracking progress or events + * during the search process. + * @returns A promise that resolves to an array of tuples, each containing + * a `DocumentInterface` and its similarity score. + */ similaritySearchWithScore( query: string, k?: number, @@ -209,6 +513,20 @@ export interface VectorStoreInterface extends Serializable { callbacks: Callbacks | undefined ): Promise; + /** + * Converts the vector store into a retriever, making it suitable for use in + * retrieval-based workflows and allowing additional configuration. + * + * @param kOrFields - Optional parameter for specifying either the number of + * documents to retrieve or partial retriever configurations. + * @param filter - Optional filter based on `FilterType` for retrieval restriction. + * @param callbacks - Optional callbacks for tracking retrieval events or progress. + * @param tags - General-purpose tags to add contextual information to the retriever. + * @param metadata - General-purpose metadata providing additional context + * for retrieval. + * @param verbose - If `true`, enables detailed logging during retrieval. + * @returns An instance of `VectorStoreRetriever` configured with the specified options. + */ asRetriever( kOrFields?: number | Partial>, filter?: this["FilterType"], @@ -220,9 +538,17 @@ export interface VectorStoreInterface extends Serializable { } /** - * Abstract class representing a store of vectors. Provides methods for - * adding vectors and documents, deleting from the store, and searching - * the store. + * Abstract class representing a vector storage system for performing + * similarity searches on embedded documents. + * + * `VectorStore` provides methods for adding precomputed vectors or documents, + * removing documents based on criteria, and performing similarity searches + * with optional scoring. Subclasses are responsible for implementing specific + * storage mechanisms and the exact behavior of certain abstract methods. + * + * @abstract + * @extends Serializable + * @implements VectorStoreInterface */ export abstract class VectorStore extends Serializable @@ -230,41 +556,108 @@ export abstract class VectorStore { declare FilterType: object | string; + /** + * Namespace within LangChain to uniquely identify this vector store's + * location, based on the vector store type. + * + * @internal + */ // Only ever instantiated in main LangChain lc_namespace = ["langchain", "vectorstores", this._vectorstoreType()]; + /** + * Embeddings interface for generating vector embeddings from text queries, + * enabling vector-based similarity searches. + */ embeddings: EmbeddingsInterface; + /** + * Initializes a new vector store with embeddings and database configuration. + * + * @param embeddings - Instance of `EmbeddingsInterface` used to embed queries. + * @param dbConfig - Configuration settings for the database or storage system. + */ // eslint-disable-next-line @typescript-eslint/no-explicit-any constructor(embeddings: EmbeddingsInterface, dbConfig: Record) { super(dbConfig); this.embeddings = embeddings; } + /** + * Returns a string representing the type of vector store, which subclasses + * must implement to identify their specific vector storage type. + * + * @returns {string} A string indicating the vector store type. + * @abstract + */ abstract _vectorstoreType(): string; + /** + * Adds precomputed vectors and corresponding documents to the vector store. + * + * @param vectors - An array of vectors representing each document. + * @param documents - Array of documents associated with each vector. + * @param options - Optional configuration for adding vectors, such as indexing. + * @returns A promise resolving to an array of document IDs or void, based on implementation. + * @abstract + */ abstract addVectors( vectors: number[][], documents: DocumentInterface[], options?: AddDocumentOptions ): Promise; + /** + * Adds documents to the vector store, embedding them first through the + * `embeddings` instance. + * + * @param documents - Array of documents to embed and add. + * @param options - Optional configuration for embedding and storing documents. + * @returns A promise resolving to an array of document IDs or void, based on implementation. + * @abstract + */ abstract addDocuments( documents: DocumentInterface[], options?: AddDocumentOptions ): Promise; + /** + * Deletes documents from the vector store based on the specified parameters. + * + * @param _params - Flexible key-value pairs defining conditions for document deletion. + * @returns A promise that resolves once the deletion is complete. + */ // eslint-disable-next-line @typescript-eslint/no-explicit-any async delete(_params?: Record): Promise { throw new Error("Not implemented."); } + /** + * Performs a similarity search using a vector query and returns results + * along with their similarity scores. + * + * @param query - Vector representing the search query. + * @param k - Number of similar results to return. + * @param filter - Optional filter based on `FilterType` to restrict results. + * @returns A promise resolving to an array of tuples containing documents and their similarity scores. + * @abstract + */ abstract similaritySearchVectorWithScore( query: number[], k: number, filter?: this["FilterType"] ): Promise<[DocumentInterface, number][]>; + /** + * Searches for documents similar to a text query by embedding the query and + * performing a similarity search on the resulting vector. + * + * @param query - Text query for finding similar documents. + * @param k - Number of similar results to return. Defaults to 4. + * @param filter - Optional filter based on `FilterType`. + * @param _callbacks - Optional callbacks for monitoring search progress + * @returns A promise resolving to an array of `DocumentInterface` instances representing similar documents. + */ async similaritySearch( query: string, k = 4, @@ -280,6 +673,17 @@ export abstract class VectorStore return results.map((result) => result[0]); } + /** + * Searches for documents similar to a text query by embedding the query, + * and returns results with similarity scores. + * + * @param query - Text query for finding similar documents. + * @param k - Number of similar results to return. Defaults to 4. + * @param filter - Optional filter based on `FilterType`. + * @param _callbacks - Optional callbacks for monitoring search progress + * @returns A promise resolving to an array of tuples, each containing a + * document and its similarity score. + */ async similaritySearchWithScore( query: string, k = 4, @@ -314,6 +718,21 @@ export abstract class VectorStore _callbacks: Callbacks | undefined // implement passing to embedQuery later ): Promise; + /** + * Creates a `VectorStore` instance from an array of text strings and optional + * metadata, using the specified embeddings and database configuration. + * + * Subclasses must implement this method to define how text and metadata + * are embedded and stored in the vector store. Throws an error if not overridden. + * + * @param _texts - Array of strings representing the text documents to be stored. + * @param _metadatas - Metadata for the texts, either as an array (one for each text) + * or a single object (applied to all texts). + * @param _embeddings - Instance of `EmbeddingsInterface` to embed the texts. + * @param _dbConfig - Database configuration settings. + * @returns A promise that resolves to a new `VectorStore` instance. + * @throws {Error} Throws an error if this method is not overridden by a subclass. + */ static fromTexts( _texts: string[], _metadatas: object[] | object, @@ -326,6 +745,19 @@ export abstract class VectorStore ); } + /** + * Creates a `VectorStore` instance from an array of documents, using the specified + * embeddings and database configuration. + * + * Subclasses must implement this method to define how documents are embedded + * and stored. Throws an error if not overridden. + * + * @param _docs - Array of `DocumentInterface` instances representing the documents to be stored. + * @param _embeddings - Instance of `EmbeddingsInterface` to embed the documents. + * @param _dbConfig - Database configuration settings. + * @returns A promise that resolves to a new `VectorStore` instance. + * @throws {Error} Throws an error if this method is not overridden by a subclass. + */ static fromDocuments( _docs: DocumentInterface[], _embeddings: EmbeddingsInterface, @@ -337,6 +769,44 @@ export abstract class VectorStore ); } + /** + * Creates a `VectorStoreRetriever` instance with flexible configuration options. + * + * @param kOrFields + * - If a number is provided, it sets the `k` parameter (number of items to retrieve). + * - If an object is provided, it should contain various configuration options. + * @param filter + * - Optional filter criteria to limit the items retrieved based on the specified filter type. + * @param callbacks + * - Optional callbacks that may be triggered at specific stages of the retrieval process. + * @param tags + * - Tags to categorize or label the `VectorStoreRetriever`. Defaults to an empty array if not provided. + * @param metadata + * - Additional metadata as key-value pairs to add contextual information for the retrieval process. + * @param verbose + * - If `true`, enables detailed logging for the retrieval process. Defaults to `false`. + * + * @returns + * - A configured `VectorStoreRetriever` instance based on the provided parameters. + * + * @example + * Basic usage with a `k` value: + * ```typescript + * const retriever = myVectorStore.asRetriever(5); + * ``` + * + * Usage with a configuration object: + * ```typescript + * const retriever = myVectorStore.asRetriever({ + * k: 10, + * filter: myFilter, + * tags: ['example', 'test'], + * verbose: true, + * searchType: 'mmr', + * searchKwargs: { alpha: 0.5 }, + * }); + * ``` + */ asRetriever( kOrFields?: number | Partial>, filter?: this["FilterType"], @@ -378,12 +848,50 @@ export abstract class VectorStore } /** - * Abstract class extending VectorStore with functionality for saving and - * loading the vector store. + * Abstract class extending `VectorStore` that defines a contract for saving + * and loading vector store instances. + * + * The `SaveableVectorStore` class allows vector store implementations to + * persist their data and retrieve it when needed.The format for saving and + * loading data is left to the implementing subclass. + * + * Subclasses must implement the `save` method to handle their custom + * serialization logic, while the `load` method enables reconstruction of a + * vector store from saved data, requiring compatible embeddings through the + * `EmbeddingsInterface`. + * + * @abstract + * @extends VectorStore */ export abstract class SaveableVectorStore extends VectorStore { + /** + * Saves the current state of the vector store to the specified directory. + * + * This method must be implemented by subclasses to define their own + * serialization process for persisting vector data. The implementation + * determines the structure and format of the saved data. + * + * @param directory - The directory path where the vector store data + * will be saved. + * @abstract + */ abstract save(directory: string): Promise; + /** + * Loads a vector store instance from the specified directory, using the + * provided embeddings to ensure compatibility. + * + * This static method reconstructs a `SaveableVectorStore` from previously + * saved data. Implementations should interpret the saved data format to + * recreate the vector store instance. + * + * @param _directory - The directory path from which the vector store + * data will be loaded. + * @param _embeddings - An instance of `EmbeddingsInterface` to align + * the embeddings with the loaded vector data. + * @returns A promise that resolves to a `SaveableVectorStore` instance + * constructed from the saved data. + */ static load( _directory: string, _embeddings: EmbeddingsInterface From 6ffa5f9c95cfedbe15da367e919c5ace4a5a6a94 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 12:06:01 -0800 Subject: [PATCH 059/100] fix(ci): Fix CI (#7157) --- environment_tests/test-exports-tsc/main.ts | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/environment_tests/test-exports-tsc/main.ts b/environment_tests/test-exports-tsc/main.ts index 2303896a168f..c2638104dff5 100644 --- a/environment_tests/test-exports-tsc/main.ts +++ b/environment_tests/test-exports-tsc/main.ts @@ -1,20 +1,22 @@ import { ChatOpenAI } from "@langchain/openai"; import { createOpenAIToolsAgent, AgentExecutor } from "langchain/agents"; -import { pull } from "langchain/hub"; -import type { ChatPromptTemplate } from "@langchain/core/prompts"; +import { ChatPromptTemplate } from "@langchain/core/prompts"; const model = new ChatOpenAI({ openAIApiKey: "sk-XXXX", }); -const prompt = await pull( - "hwchase17/openai-functions-agent" -); +const prompt = ChatPromptTemplate.fromMessages([ + ["system", "You are a helpful assistant"], + ["placeholder", "{chat_history}"], + ["human", "{input}"], + ["placeholder", "{agent_scratchpad}"], +]); const agent = await createOpenAIToolsAgent({ llm: model, prompt, - tools: [] + tools: [], }); const agentExecutor = new AgentExecutor({ From 7102edde46c2d239e465423de15187ea6879bf7e Mon Sep 17 00:00:00 2001 From: Clemens Peters <13015002+clemenspeters@users.noreply.github.com> Date: Tue, 5 Nov 2024 21:06:31 +0100 Subject: [PATCH 060/100] fix(community): Fix logic issue (#7151) --- CONTRIBUTING.md | 2 +- .../langchain-community/src/vectorstores/pgvector.ts | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66afe82b8794..58a721a4d940 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -170,7 +170,7 @@ yarn Then, you will need to switch directories into `langchain-core` and build core by running: ```bash -cd ../langchain-core +cd ../../langchain-core yarn yarn build ``` diff --git a/libs/langchain-community/src/vectorstores/pgvector.ts b/libs/langchain-community/src/vectorstores/pgvector.ts index 4b5e39b95827..b8c0b924c8cb 100644 --- a/libs/langchain-community/src/vectorstores/pgvector.ts +++ b/libs/langchain-community/src/vectorstores/pgvector.ts @@ -261,9 +261,15 @@ export class PGVectorStore extends VectorStore { this.chunkSize = config.chunkSize ?? 500; this.distanceStrategy = config.distanceStrategy ?? this.distanceStrategy; - this._verbose = - getEnvironmentVariable("LANGCHAIN_VERBOSE") === "true" ?? - !!config.verbose; + const langchainVerbose = getEnvironmentVariable("LANGCHAIN_VERBOSE"); + + if (langchainVerbose === "true") { + this._verbose = true; + } else if (langchainVerbose === "false") { + this._verbose = false; + } else { + this._verbose = config.verbose; + } } get computedTableName() { From 50eb6ec6b6391db26067d9d2be8f0970a8429268 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 12:18:12 -0800 Subject: [PATCH 061/100] chore(community): Release 0.3.12 (#7158) --- libs/langchain-community/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index e530bac5b532..a1f60050f981 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/community", - "version": "0.3.11", + "version": "0.3.12", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { From c75373ac6267c598c335a79c081f1be8c6c2fee7 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 12:46:35 -0800 Subject: [PATCH 062/100] feat(ollama): Allow passthrough of other options (#7159) --- libs/langchain-community/src/embeddings/ollama.ts | 3 +++ libs/langchain-ollama/src/embeddings.ts | 5 ++++- .../langchain-ollama/src/tests/embeddings.test.ts | 15 +++++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 libs/langchain-ollama/src/tests/embeddings.test.ts diff --git a/libs/langchain-community/src/embeddings/ollama.ts b/libs/langchain-community/src/embeddings/ollama.ts index c1e1f36ec9a9..845af4669f8a 100644 --- a/libs/langchain-community/src/embeddings/ollama.ts +++ b/libs/langchain-community/src/embeddings/ollama.ts @@ -113,6 +113,9 @@ export class OllamaEmbeddings extends Embeddings { const snakeCasedOption = mapping[key as keyof CamelCasedRequestOptions]; if (snakeCasedOption) { snakeCasedOptions[snakeCasedOption] = value; + } else { + // Just pass unknown options through + snakeCasedOptions[key] = value; } } return snakeCasedOptions; diff --git a/libs/langchain-ollama/src/embeddings.ts b/libs/langchain-ollama/src/embeddings.ts index 5e878322a6d9..f330bb8c9580 100644 --- a/libs/langchain-ollama/src/embeddings.ts +++ b/libs/langchain-ollama/src/embeddings.ts @@ -42,7 +42,7 @@ interface OllamaEmbeddingsParams extends EmbeddingsParams { * https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values * for details of the available parameters. */ - requestOptions?: OllamaCamelCaseOptions; + requestOptions?: OllamaCamelCaseOptions & Partial; } export class OllamaEmbeddings extends Embeddings { @@ -121,6 +121,9 @@ export class OllamaEmbeddings extends Embeddings { const snakeCasedOption = mapping[key as keyof OllamaCamelCaseOptions]; if (snakeCasedOption) { snakeCasedOptions[snakeCasedOption as keyof OllamaOptions] = value; + } else { + // Just pass unknown options through + snakeCasedOptions[key as keyof OllamaOptions] = value; } } return snakeCasedOptions; diff --git a/libs/langchain-ollama/src/tests/embeddings.test.ts b/libs/langchain-ollama/src/tests/embeddings.test.ts new file mode 100644 index 000000000000..68c872090424 --- /dev/null +++ b/libs/langchain-ollama/src/tests/embeddings.test.ts @@ -0,0 +1,15 @@ +import { test, expect } from "@jest/globals"; +import { OllamaEmbeddings } from "../embeddings.js"; + +test("Test OllamaEmbeddings allows passthrough of request options", async () => { + const embeddings = new OllamaEmbeddings({ + requestOptions: { + num_ctx: 1234, + numPredict: 4321, + }, + }); + expect(embeddings.requestOptions).toEqual({ + num_ctx: 1234, + num_predict: 4321, + }); +}); From 1ba488f2e3edc884606ce19fc2a2306d7144ab92 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 13:24:10 -0800 Subject: [PATCH 063/100] chore(ollama): Release 0.1.2 (#7163) --- libs/langchain-ollama/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-ollama/package.json b/libs/langchain-ollama/package.json index 2b5fc852e605..b40194cdbf47 100644 --- a/libs/langchain-ollama/package.json +++ b/libs/langchain-ollama/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/ollama", - "version": "0.1.1", + "version": "0.1.2", "description": "Ollama integration for LangChain.js", "type": "module", "engines": { From 3f8d6fcc72bc0c9c8290273745381659003d52c9 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 14:34:17 -0800 Subject: [PATCH 064/100] feat(openai): Adds prediction support to OpenAI (#7162) --- .../docs/integrations/chat/openai.ipynb | 128 +++++++++++++++++- libs/langchain-openai/package.json | 2 +- libs/langchain-openai/src/chat_models.ts | 8 ++ .../tests/chat_models-extended.int.test.ts | 55 ++++++++ yarn.lock | 10 +- 5 files changed, 196 insertions(+), 7 deletions(-) diff --git a/docs/core_docs/docs/integrations/chat/openai.ipynb b/docs/core_docs/docs/integrations/chat/openai.ipynb index e70ca09d5c43..441580334fc1 100644 --- a/docs/core_docs/docs/integrations/chat/openai.ipynb +++ b/docs/core_docs/docs/integrations/chat/openai.ipynb @@ -1028,6 +1028,132 @@ "console.log(\"USAGE:\", resWitCaching.response_metadata.usage);" ] }, + { + "cell_type": "markdown", + "id": "f755a0b3", + "metadata": {}, + "source": [ + "## Predicted output\n", + "\n", + "Some OpenAI models (such as their `gpt-4o` and `gpt-4o-mini` series) support [Predicted Outputs](https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs), which allow you to pass in a known portion of the LLM's expected output ahead of time to reduce latency. This is useful for cases such as editing text or code, where only a small part of the model's output will change.\n", + "\n", + "Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "4d5a5582", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-AQLyQKnazr7lEV7ejLTo1UqhzHDBl\",\n", + " \"content\": \"/// \\n/// Represents a user with a first name, last name, and email.\\n/// \\npublic class User\\n{\\n/// \\n/// Gets or sets the user's first name.\\n/// \\npublic string FirstName { get; set; }\\n\\n/// \\n/// Gets or sets the user's last name.\\n/// \\npublic string LastName { get; set; }\\n\\n/// \\n/// Gets or sets the user's email.\\n/// \\npublic string Email { get; set; }\\n}\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"promptTokens\": 148,\n", + " \"completionTokens\": 217,\n", + " \"totalTokens\": 365\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"usage\": {\n", + " \"prompt_tokens\": 148,\n", + " \"completion_tokens\": 217,\n", + " \"total_tokens\": 365,\n", + " \"prompt_tokens_details\": {\n", + " \"cached_tokens\": 0\n", + " },\n", + " \"completion_tokens_details\": {\n", + " \"reasoning_tokens\": 0,\n", + " \"accepted_prediction_tokens\": 36,\n", + " \"rejected_prediction_tokens\": 116\n", + " }\n", + " },\n", + " \"system_fingerprint\": \"fp_0ba0d124f1\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"output_tokens\": 217,\n", + " \"input_tokens\": 148,\n", + " \"total_tokens\": 365,\n", + " \"input_token_details\": {\n", + " \"cache_read\": 0\n", + " },\n", + " \"output_token_details\": {\n", + " \"reasoning\": 0\n", + " }\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const modelWithPredictions = new ChatOpenAI({\n", + " model: \"gpt-4o-mini\",\n", + "});\n", + "\n", + "const codeSample = `\n", + "/// \n", + "/// Represents a user with a first name, last name, and username.\n", + "/// \n", + "public class User\n", + "{\n", + "/// \n", + "/// Gets or sets the user's first name.\n", + "/// \n", + "public string FirstName { get; set; }\n", + "\n", + "/// \n", + "/// Gets or sets the user's last name.\n", + "/// \n", + "public string LastName { get; set; }\n", + "\n", + "/// \n", + "/// Gets or sets the user's username.\n", + "/// \n", + "public string Username { get; set; }\n", + "}\n", + "`;\n", + "\n", + "// Can also be attached ahead of time\n", + "// using `model.bind({ prediction: {...} })`;\n", + "await modelWithPredictions.invoke(\n", + " [\n", + " {\n", + " role: \"user\",\n", + " content:\n", + " \"Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.\",\n", + " },\n", + " {\n", + " role: \"user\",\n", + " content: codeSample,\n", + " },\n", + " ],\n", + " {\n", + " prediction: {\n", + " type: \"content\",\n", + " content: codeSample,\n", + " },\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "81f901e4", + "metadata": {}, + "source": [ + "Note that currently predictions are billed as additional tokens and will increase your usage and costs in exchange for this reduced latency." + ] + }, { "cell_type": "markdown", "id": "cc8b3c94", @@ -1212,4 +1338,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/libs/langchain-openai/package.json b/libs/langchain-openai/package.json index 82012d7d9e4f..c1b11c05f082 100644 --- a/libs/langchain-openai/package.json +++ b/libs/langchain-openai/package.json @@ -36,7 +36,7 @@ "license": "MIT", "dependencies": { "js-tiktoken": "^1.0.12", - "openai": "^4.68.0", + "openai": "^4.71.0", "zod": "^3.22.4", "zod-to-json-schema": "^3.22.3" }, diff --git a/libs/langchain-openai/src/chat_models.ts b/libs/langchain-openai/src/chat_models.ts index 4a0d438f0ba5..803dd13ca1d7 100644 --- a/libs/langchain-openai/src/chat_models.ts +++ b/libs/langchain-openai/src/chat_models.ts @@ -423,6 +423,11 @@ export interface ChatOpenAICallOptions * [Learn more](https://platform.openai.com/docs/guides/audio). */ audio?: OpenAIClient.Chat.ChatCompletionAudioParam; + /** + * Static predicted output content, such as the content of a text file that is being regenerated. + * [Learn more](https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs). + */ + prediction?: OpenAIClient.ChatCompletionPredictionContent; } export interface ChatOpenAIFields @@ -1329,6 +1334,9 @@ export class ChatOpenAI< : {}), ...this.modelKwargs, }; + if (options?.prediction !== undefined) { + params.prediction = options.prediction; + } return params; } diff --git a/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts b/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts index 970400634d4f..42fde850b896 100644 --- a/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts +++ b/libs/langchain-openai/src/tests/chat_models-extended.int.test.ts @@ -636,3 +636,58 @@ test.skip("system prompt caching", async () => { aggregate?.response_metadata?.usage.prompt_tokens_details.cached_tokens ).toBeGreaterThan(0); }); + +test("predicted output", async () => { + const model = new ChatOpenAI({ + model: "gpt-4o-mini", + }); + const code = ` +/// +/// Represents a user with a first name, last name, and username. +/// +public class User +{ + /// + /// Gets or sets the user's first name. + /// + public string FirstName { get; set; } + + /// + /// Gets or sets the user's last name. + /// + public string LastName { get; set; } + + /// + /// Gets or sets the user's username. + /// + public string Username { get; set; } +} +`; + const res = await model.invoke( + [ + { + role: "user", + content: + "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.", + }, + { + role: "user", + content: code, + }, + ], + { + prediction: { + type: "content", + content: code, + }, + } + ); + expect( + typeof res.response_metadata?.usage?.completion_tokens_details + .accepted_prediction_tokens + ).toBe("number"); + expect( + typeof res.response_metadata?.usage?.completion_tokens_details + .rejected_prediction_tokens + ).toBe("number"); +}); diff --git a/yarn.lock b/yarn.lock index 367f4903bf51..2fd5980a162b 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12552,7 +12552,7 @@ __metadata: jest: ^29.5.0 jest-environment-node: ^29.6.4 js-tiktoken: ^1.0.12 - openai: ^4.68.0 + openai: ^4.71.0 prettier: ^2.8.3 release-it: ^17.6.0 rimraf: ^5.0.1 @@ -35695,9 +35695,9 @@ __metadata: languageName: node linkType: hard -"openai@npm:^4.68.0": - version: 4.68.0 - resolution: "openai@npm:4.68.0" +"openai@npm:^4.71.0": + version: 4.71.0 + resolution: "openai@npm:4.71.0" dependencies: "@types/node": ^18.11.18 "@types/node-fetch": ^2.6.4 @@ -35713,7 +35713,7 @@ __metadata: optional: true bin: openai: bin/cli - checksum: 2866e54ac1b34e074055dde7cc809bcc33d1172f0ab289dacd54ced04a62ab3c2b9f584fdb84ece981edc5c30939497af4e91fe33646f71d5c6ced5d7106a797 + checksum: ba4b3772e806c59b1ea1235a40486392c797906e45dd97914f2cd819b4be2996e207c7b7c67d43236692300354f4e9ffa8ebfca6e97d3555655ebf0f3f01e3f2 languageName: node linkType: hard From c8e4c61f4f08fb72f9481be78e1bf89019d34b34 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 14:44:39 -0800 Subject: [PATCH 065/100] chore(openai): Release 0.3.12 (#7165) --- libs/langchain-openai/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-openai/package.json b/libs/langchain-openai/package.json index c1b11c05f082..94244ad02932 100644 --- a/libs/langchain-openai/package.json +++ b/libs/langchain-openai/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/openai", - "version": "0.3.11", + "version": "0.3.12", "description": "OpenAI integrations for LangChain.js", "type": "module", "engines": { From 5d1ea91dadd69157d6e8675f49b8ede74ff41693 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Tue, 5 Nov 2024 14:59:46 -0800 Subject: [PATCH 066/100] docs: Fix import (#7166) --- docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx b/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx index 6778824dbb3b..d55f46dd11e6 100644 --- a/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/tensorflow.mdx @@ -8,7 +8,7 @@ npm install @langchain/community @langchain/core @tensorflow/tfjs-core@3.6.0 @te ```typescript import "@tensorflow/tfjs-backend-cpu"; -import { TensorFlowEmbeddings } from "langchain/embeddings/tensorflow"; +import { TensorFlowEmbeddings } from "@langchain/community/embeddings/tensorflow"; const embeddings = new TensorFlowEmbeddings(); ``` From 7ccc8d5fe8db9b2d294c8ecafcf3293d03d47630 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Tue, 5 Nov 2024 17:24:14 -0800 Subject: [PATCH 067/100] feat(xai): Add xAI integration package (#7156) --- .../docs/integrations/chat/index.mdx | 1 + .../docs/integrations/chat/xai.ipynb | 301 +++++++++++ examples/package.json | 1 + libs/langchain-xai/.eslintrc.cjs | 74 +++ libs/langchain-xai/.gitignore | 7 + libs/langchain-xai/.prettierrc | 19 + libs/langchain-xai/.release-it.json | 12 + libs/langchain-xai/LICENSE | 21 + libs/langchain-xai/README.md | 76 +++ libs/langchain-xai/jest.config.cjs | 20 + libs/langchain-xai/jest.env.cjs | 12 + libs/langchain-xai/langchain.config.js | 21 + libs/langchain-xai/package.json | 94 ++++ .../scripts/jest-setup-after-env.js | 9 + libs/langchain-xai/src/chat_models.ts | 497 ++++++++++++++++++ libs/langchain-xai/src/index.ts | 1 + .../src/tests/chat_models.int.test.ts | 233 ++++++++ .../tests/chat_models.standard.int.test.ts | 37 ++ .../src/tests/chat_models.standard.test.ts | 39 ++ .../src/tests/chat_models.test.ts | 20 + .../chat_models_structured_output.int.test.ts | 269 ++++++++++ libs/langchain-xai/tsconfig.cjs.json | 8 + libs/langchain-xai/tsconfig.json | 23 + libs/langchain-xai/turbo.json | 11 + yarn.lock | 37 ++ 25 files changed, 1843 insertions(+) create mode 100644 docs/core_docs/docs/integrations/chat/xai.ipynb create mode 100644 libs/langchain-xai/.eslintrc.cjs create mode 100644 libs/langchain-xai/.gitignore create mode 100644 libs/langchain-xai/.prettierrc create mode 100644 libs/langchain-xai/.release-it.json create mode 100644 libs/langchain-xai/LICENSE create mode 100644 libs/langchain-xai/README.md create mode 100644 libs/langchain-xai/jest.config.cjs create mode 100644 libs/langchain-xai/jest.env.cjs create mode 100644 libs/langchain-xai/langchain.config.js create mode 100644 libs/langchain-xai/package.json create mode 100644 libs/langchain-xai/scripts/jest-setup-after-env.js create mode 100644 libs/langchain-xai/src/chat_models.ts create mode 100644 libs/langchain-xai/src/index.ts create mode 100644 libs/langchain-xai/src/tests/chat_models.int.test.ts create mode 100644 libs/langchain-xai/src/tests/chat_models.standard.int.test.ts create mode 100644 libs/langchain-xai/src/tests/chat_models.standard.test.ts create mode 100644 libs/langchain-xai/src/tests/chat_models.test.ts create mode 100644 libs/langchain-xai/src/tests/chat_models_structured_output.int.test.ts create mode 100644 libs/langchain-xai/tsconfig.cjs.json create mode 100644 libs/langchain-xai/tsconfig.json create mode 100644 libs/langchain-xai/turbo.json diff --git a/docs/core_docs/docs/integrations/chat/index.mdx b/docs/core_docs/docs/integrations/chat/index.mdx index a94f1ccd6076..3432ab9aa4bf 100644 --- a/docs/core_docs/docs/integrations/chat/index.mdx +++ b/docs/core_docs/docs/integrations/chat/index.mdx @@ -29,6 +29,7 @@ If you'd like to write your own chat model, see [this how-to](/docs/how_to/custo | [ChatOllama](/docs/integrations/chat/ollama/) | ✅ | ✅ | ✅ | ✅ | ✅ | | [ChatOpenAI](/docs/integrations/chat/openai/) | ✅ | ✅ | ✅ | ✅ | ✅ | | [ChatTogetherAI](/docs/integrations/chat/togetherai/) | ✅ | ✅ | ✅ | ✅ | ✅ | +| [ChatXAI](/docs/integrations/chat/xai/) | ✅ | ✅ | ✅ | ✅ | ❌ | ## All chat models diff --git a/docs/core_docs/docs/integrations/chat/xai.ipynb b/docs/core_docs/docs/integrations/chat/xai.ipynb new file mode 100644 index 000000000000..e07a8079dd0c --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/xai.ipynb @@ -0,0 +1,301 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: xAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatXAI\n", + "\n", + "[xAI](https://x.ai/) is an artificial intelligence company that develops large language models (LLMs). Their flagship model, Grok, is trained on real-time X (formerly Twitter) data and aims to provide witty, personality-rich responses while maintaining high capability on technical tasks.\n", + "\n", + "This guide will help you getting started with `ChatXAI` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatXAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatXAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | PY support | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatXAI](https://api.js.langchain.com/classes/_langchain_xai.ChatXAI.html) | [`@langchain/xai`](https://www.npmjs.com/package/@langchain/xai) | ❌ | ✅ | ❌ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/xai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/xai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "\n", + "See the links in the table headers below for guides on how to use specific features.\n", + "\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access `ChatXAI` models you'll need to create an xAI account, [get an API key](https://console.x.ai/), and install the `@langchain/xai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [the xAI website](https://x.ai) to sign up to xAI and generate an API key. Once you've done this set the `XAI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export XAI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `ChatXAI` integration lives in the `@langchain/xai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/xai @langchain/core\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatXAI } from \"@langchain/xai\" \n", + "\n", + "const llm = new ChatXAI({\n", + " model: \"grok-beta\", // default\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"71d7e3d8-30dd-472c-8038-b6b283dcee63\",\n", + " \"content\": \"J'adore programmer.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"promptTokens\": 30,\n", + " \"completionTokens\": 6,\n", + " \"totalTokens\": 36\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"usage\": {\n", + " \"prompt_tokens\": 30,\n", + " \"completion_tokens\": 6,\n", + " \"total_tokens\": 36\n", + " },\n", + " \"system_fingerprint\": \"fp_3e3898d4ce\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"output_tokens\": 6,\n", + " \"input_tokens\": 30,\n", + " \"total_tokens\": 36,\n", + " \"input_token_details\": {},\n", + " \"output_token_details\": {}\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "console.log(aiMsg)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore programmer.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"b2738008-8247-40e1-81dc-d9bf437a1a0c\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"promptTokens\": 25,\n", + " \"completionTokens\": 7,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"usage\": {\n", + " \"prompt_tokens\": 25,\n", + " \"completion_tokens\": 7,\n", + " \"total_tokens\": 32\n", + " },\n", + " \"system_fingerprint\": \"fp_3e3898d4ce\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"output_tokens\": 7,\n", + " \"input_tokens\": 25,\n", + " \"total_tokens\": 32,\n", + " \"input_token_details\": {},\n", + " \"output_token_details\": {}\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "Behind the scenes, xAI uses the OpenAI SDK and OpenAI compatible API." + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatXAI features and configurations head to the API reference: https://api.js.langchain.com/classes/_langchain_xai.ChatXAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/package.json b/examples/package.json index 48ad17ca960b..7ace75f8b9d2 100644 --- a/examples/package.json +++ b/examples/package.json @@ -61,6 +61,7 @@ "@langchain/scripts": ">=0.1.0 <0.2.0", "@langchain/textsplitters": "workspace:*", "@langchain/weaviate": "workspace:*", + "@langchain/xai": "workspace:*", "@langchain/yandex": "workspace:*", "@layerup/layerup-security": "^1.5.12", "@opensearch-project/opensearch": "^2.2.0", diff --git a/libs/langchain-xai/.eslintrc.cjs b/libs/langchain-xai/.eslintrc.cjs new file mode 100644 index 000000000000..6503be533320 --- /dev/null +++ b/libs/langchain-xai/.eslintrc.cjs @@ -0,0 +1,74 @@ +module.exports = { + extends: [ + "airbnb-base", + "eslint:recommended", + "prettier", + "plugin:@typescript-eslint/recommended", + ], + parserOptions: { + ecmaVersion: 12, + parser: "@typescript-eslint/parser", + project: "./tsconfig.json", + sourceType: "module", + }, + plugins: ["@typescript-eslint", "no-instanceof"], + ignorePatterns: [ + ".eslintrc.cjs", + "scripts", + "node_modules", + "dist", + "dist-cjs", + "*.js", + "*.cjs", + "*.d.ts", + ], + rules: { + "no-process-env": 2, + "no-instanceof/no-instanceof": 2, + "@typescript-eslint/explicit-module-boundary-types": 0, + "@typescript-eslint/no-empty-function": 0, + "@typescript-eslint/no-shadow": 0, + "@typescript-eslint/no-empty-interface": 0, + "@typescript-eslint/no-use-before-define": ["error", "nofunc"], + "@typescript-eslint/no-unused-vars": ["warn", { args: "none" }], + "@typescript-eslint/no-floating-promises": "error", + "@typescript-eslint/no-misused-promises": "error", + camelcase: 0, + "class-methods-use-this": 0, + "import/extensions": [2, "ignorePackages"], + "import/no-extraneous-dependencies": [ + "error", + { devDependencies: ["**/*.test.ts"] }, + ], + "import/no-unresolved": 0, + "import/prefer-default-export": 0, + "keyword-spacing": "error", + "max-classes-per-file": 0, + "max-len": 0, + "no-await-in-loop": 0, + "no-bitwise": 0, + "no-console": 0, + "no-restricted-syntax": 0, + "no-shadow": 0, + "no-continue": 0, + "no-void": 0, + "no-underscore-dangle": 0, + "no-use-before-define": 0, + "no-useless-constructor": 0, + "no-return-await": 0, + "consistent-return": 0, + "no-else-return": 0, + "func-names": 0, + "no-lonely-if": 0, + "prefer-rest-params": 0, + "new-cap": ["error", { properties: false, capIsNew: false }], + }, + overrides: [ + { + files: ['**/*.test.ts'], + rules: { + '@typescript-eslint/no-unused-vars': 'off' + } + } + ] +}; diff --git a/libs/langchain-xai/.gitignore b/libs/langchain-xai/.gitignore new file mode 100644 index 000000000000..c10034e2f1be --- /dev/null +++ b/libs/langchain-xai/.gitignore @@ -0,0 +1,7 @@ +index.cjs +index.js +index.d.ts +index.d.cts +node_modules +dist +.yarn diff --git a/libs/langchain-xai/.prettierrc b/libs/langchain-xai/.prettierrc new file mode 100644 index 000000000000..ba08ff04f677 --- /dev/null +++ b/libs/langchain-xai/.prettierrc @@ -0,0 +1,19 @@ +{ + "$schema": "https://json.schemastore.org/prettierrc", + "printWidth": 80, + "tabWidth": 2, + "useTabs": false, + "semi": true, + "singleQuote": false, + "quoteProps": "as-needed", + "jsxSingleQuote": false, + "trailingComma": "es5", + "bracketSpacing": true, + "arrowParens": "always", + "requirePragma": false, + "insertPragma": false, + "proseWrap": "preserve", + "htmlWhitespaceSensitivity": "css", + "vueIndentScriptAndStyle": false, + "endOfLine": "lf" +} diff --git a/libs/langchain-xai/.release-it.json b/libs/langchain-xai/.release-it.json new file mode 100644 index 000000000000..06850ca85be1 --- /dev/null +++ b/libs/langchain-xai/.release-it.json @@ -0,0 +1,12 @@ +{ + "github": { + "release": true, + "autoGenerate": true, + "tokenRef": "GITHUB_TOKEN_RELEASE" + }, + "npm": { + "versionArgs": [ + "--workspaces-update=false" + ] + } +} diff --git a/libs/langchain-xai/LICENSE b/libs/langchain-xai/LICENSE new file mode 100644 index 000000000000..e7530f5e9e10 --- /dev/null +++ b/libs/langchain-xai/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2024 LangChain + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/libs/langchain-xai/README.md b/libs/langchain-xai/README.md new file mode 100644 index 000000000000..a4db12a6f6d1 --- /dev/null +++ b/libs/langchain-xai/README.md @@ -0,0 +1,76 @@ +# @langchain/xai + +This package contains the LangChain.js integrations for xAI. + +## Installation + +```bash npm2yarn +npm install @langchain/xai @langchain/core +``` + +## Chat models + +This package adds support for xAI chat model inference. + +Set the necessary environment variable (or pass it in via the constructor): + +```bash +export XAI_API_KEY= +``` + +```typescript +import { ChatXAI } from "@langchain/xai"; +import { HumanMessage } from "@langchain/core/messages"; + +const model = new ChatXAI({ + apiKey: process.env.XAI_API_KEY, // Default value. +}); + +const message = new HumanMessage("What color is the sky?"); + +const res = await model.invoke([message]); +``` + +## Development + +To develop the `@langchain/xai` package, you'll need to follow these instructions: + +### Install dependencies + +```bash +yarn install +``` + +### Build the package + +```bash +yarn build +``` + +Or from the repo root: + +```bash +yarn build --filter=@langchain/xai +``` + +### Run tests + +Test files should live within a `tests/` file in the `src/` folder. Unit tests should end in `.test.ts` and integration tests should +end in `.int.test.ts`: + +```bash +$ yarn test +$ yarn test:int +``` + +### Lint & Format + +Run the linter & formatter to ensure your code is up to standard: + +```bash +yarn lint && yarn format +``` + +### Adding new entrypoints + +If you add a new file to be exported, either import & re-export from `src/index.ts`, or add it to the `entrypoints` field in the `config` variable located inside `langchain.config.js` and run `yarn build` to generate the new entrypoint. diff --git a/libs/langchain-xai/jest.config.cjs b/libs/langchain-xai/jest.config.cjs new file mode 100644 index 000000000000..a06cb3338861 --- /dev/null +++ b/libs/langchain-xai/jest.config.cjs @@ -0,0 +1,20 @@ +/** @type {import('ts-jest').JestConfigWithTsJest} */ +module.exports = { + preset: "ts-jest/presets/default-esm", + testEnvironment: "./jest.env.cjs", + modulePathIgnorePatterns: ["dist/", "docs/"], + moduleNameMapper: { + "^(\\.{1,2}/.*)\\.js$": "$1", + }, + transform: { + "^.+\\.tsx?$": ["@swc/jest"], + }, + transformIgnorePatterns: [ + "/node_modules/", + "\\.pnp\\.[^\\/]+$", + "./scripts/jest-setup-after-env.js", + ], + setupFiles: ["dotenv/config"], + testTimeout: 20_000, + passWithNoTests: true, +}; diff --git a/libs/langchain-xai/jest.env.cjs b/libs/langchain-xai/jest.env.cjs new file mode 100644 index 000000000000..2ccedccb8672 --- /dev/null +++ b/libs/langchain-xai/jest.env.cjs @@ -0,0 +1,12 @@ +const { TestEnvironment } = require("jest-environment-node"); + +class AdjustedTestEnvironmentToSupportFloat32Array extends TestEnvironment { + constructor(config, context) { + // Make `instanceof Float32Array` return true in tests + // to avoid https://github.com/xenova/transformers.js/issues/57 and https://github.com/jestjs/jest/issues/2549 + super(config, context); + this.global.Float32Array = Float32Array; + } +} + +module.exports = AdjustedTestEnvironmentToSupportFloat32Array; diff --git a/libs/langchain-xai/langchain.config.js b/libs/langchain-xai/langchain.config.js new file mode 100644 index 000000000000..19512b23d29b --- /dev/null +++ b/libs/langchain-xai/langchain.config.js @@ -0,0 +1,21 @@ +import { resolve, dirname } from "node:path"; +import { fileURLToPath } from "node:url"; + +/** + * @param {string} relativePath + * @returns {string} + */ +function abs(relativePath) { + return resolve(dirname(fileURLToPath(import.meta.url)), relativePath); +} + +export const config = { + internals: [/node\:/, /@langchain\/core\//], + entrypoints: { + index: "index", + }, + tsConfigPath: resolve("./tsconfig.json"), + cjsSource: "./dist-cjs", + cjsDestination: "./dist", + abs, +} \ No newline at end of file diff --git a/libs/langchain-xai/package.json b/libs/langchain-xai/package.json new file mode 100644 index 000000000000..5a690400a871 --- /dev/null +++ b/libs/langchain-xai/package.json @@ -0,0 +1,94 @@ +{ + "name": "@langchain/xai", + "version": "0.0.1", + "description": "xAI integration for LangChain.js", + "type": "module", + "engines": { + "node": ">=18" + }, + "main": "./index.js", + "types": "./index.d.ts", + "repository": { + "type": "git", + "url": "git@github.com:langchain-ai/langchainjs.git" + }, + "homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-xai/", + "scripts": { + "build": "yarn turbo:command build:internal --filter=@langchain/xai", + "build:internal": "yarn lc_build --create-entrypoints --pre --tree-shaking", + "lint:eslint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", + "lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts", + "lint": "yarn lint:eslint && yarn lint:dpdm", + "lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm", + "clean": "rm -rf .turbo dist/", + "prepack": "yarn build", + "test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%", + "test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts", + "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", + "test:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", + "test:standard:unit": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.test.ts --testTimeout 100000 --maxWorkers=50%", + "test:standard:int": "NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.standard\\.int\\.test.ts --testTimeout 100000 --maxWorkers=50%", + "test:standard": "yarn test:standard:unit && yarn test:standard:int", + "format": "prettier --config .prettierrc --write \"src\"", + "format:check": "prettier --config .prettierrc --check \"src\"" + }, + "author": "LangChain", + "license": "MIT", + "dependencies": { + "@langchain/openai": "~0.3.0" + }, + "peerDependencies": { + "@langchain/core": ">=0.2.21 <0.4.0" + }, + "devDependencies": { + "@jest/globals": "^29.5.0", + "@langchain/core": "workspace:*", + "@langchain/openai": "workspace:^", + "@langchain/scripts": ">=0.1.0 <0.2.0", + "@langchain/standard-tests": "0.0.0", + "@swc/core": "^1.3.90", + "@swc/jest": "^0.2.29", + "@tsconfig/recommended": "^1.0.3", + "@types/uuid": "^9", + "@typescript-eslint/eslint-plugin": "^6.12.0", + "@typescript-eslint/parser": "^6.12.0", + "dotenv": "^16.3.1", + "dpdm": "^3.12.0", + "eslint": "^8.33.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-prettier": "^8.6.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-no-instanceof": "^1.0.1", + "eslint-plugin-prettier": "^4.2.1", + "jest": "^29.5.0", + "jest-environment-node": "^29.6.4", + "prettier": "^2.8.3", + "release-it": "^17.6.0", + "rollup": "^4.5.2", + "ts-jest": "^29.1.0", + "typescript": "<5.2.0", + "zod": "^3.22.4" + }, + "publishConfig": { + "access": "public" + }, + "exports": { + ".": { + "types": { + "import": "./index.d.ts", + "require": "./index.d.cts", + "default": "./index.d.ts" + }, + "import": "./index.js", + "require": "./index.cjs" + }, + "./package.json": "./package.json" + }, + "files": [ + "dist/", + "index.cjs", + "index.js", + "index.d.ts", + "index.d.cts" + ] +} diff --git a/libs/langchain-xai/scripts/jest-setup-after-env.js b/libs/langchain-xai/scripts/jest-setup-after-env.js new file mode 100644 index 000000000000..7323083d0ea5 --- /dev/null +++ b/libs/langchain-xai/scripts/jest-setup-after-env.js @@ -0,0 +1,9 @@ +import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; +import { afterAll, jest } from "@jest/globals"; + +afterAll(awaitAllCallbacks); + +// Allow console.log to be disabled in tests +if (process.env.DISABLE_CONSOLE_LOGS === "true") { + console.log = jest.fn(); +} diff --git a/libs/langchain-xai/src/chat_models.ts b/libs/langchain-xai/src/chat_models.ts new file mode 100644 index 000000000000..5a1f5177c246 --- /dev/null +++ b/libs/langchain-xai/src/chat_models.ts @@ -0,0 +1,497 @@ +import { + BaseChatModelCallOptions, + BindToolsInput, + LangSmithParams, + type BaseChatModelParams, +} from "@langchain/core/language_models/chat_models"; +import { Serialized } from "@langchain/core/load/serializable"; +import { getEnvironmentVariable } from "@langchain/core/utils/env"; +import { + type OpenAICoreRequestOptions, + type OpenAIClient, + ChatOpenAI, + OpenAIToolChoice, +} from "@langchain/openai"; + +type ChatXAIToolType = BindToolsInput | OpenAIClient.ChatCompletionTool; + +export interface ChatXAICallOptions extends BaseChatModelCallOptions { + headers?: Record; + tools?: ChatXAIToolType[]; + tool_choice?: OpenAIToolChoice | string | "auto" | "any"; +} + +export interface ChatXAIInput extends BaseChatModelParams { + /** + * The xAI API key to use for requests. + * @default process.env.XAI_API_KEY + */ + apiKey?: string; + /** + * The name of the model to use. + * @default "grok-beta" + */ + model?: string; + /** + * Up to 4 sequences where the API will stop generating further tokens. The + * returned text will not contain the stop sequence. + * Alias for `stopSequences` + */ + stop?: Array; + /** + * Up to 4 sequences where the API will stop generating further tokens. The + * returned text will not contain the stop sequence. + */ + stopSequences?: Array; + /** + * Whether or not to stream responses. + */ + streaming?: boolean; + /** + * The temperature to use for sampling. + * @default 0.7 + */ + temperature?: number; + /** + * The maximum number of tokens that the model can process in a single response. + * This limits ensures computational efficiency and resource management. + */ + maxTokens?: number; +} + +/** + * xAI chat model integration. + * + * The xAI API is compatible to the OpenAI API with some limitations. + * + * Setup: + * Install `@langchain/xai` and set an environment variable named `XAI_API_KEY`. + * + * ```bash + * npm install @langchain/xai + * export XAI_API_KEY="your-api-key" + * ``` + * + * ## [Constructor args](https://api.js.langchain.com/classes/langchain_xai.ChatXAI.html#constructor) + * + * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_xai.ChatXAICallOptions.html) + * + * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. + * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: + * + * ```typescript + * // When calling `.bind`, call options should be passed via the first argument + * const llmWithArgsBound = llm.bind({ + * stop: ["\n"], + * tools: [...], + * }); + * + * // When calling `.bindTools`, call options should be passed via the second argument + * const llmWithTools = llm.bindTools( + * [...], + * { + * tool_choice: "auto", + * } + * ); + * ``` + * + * ## Examples + * + *
+ * Instantiate + * + * ```typescript + * import { ChatXAI } from '@langchain/xai'; + * + * const llm = new ChatXAI({ + * model: "grok-beta", + * temperature: 0, + * // other params... + * }); + * ``` + *
+ * + *
+ * + *
+ * Invoking + * + * ```typescript + * const input = `Translate "I love programming" into French.`; + * + * // Models also accept a list of chat messages or a formatted prompt + * const result = await llm.invoke(input); + * console.log(result); + * ``` + * + * ```txt + * AIMessage { + * "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.", + * "additional_kwargs": {}, + * "response_metadata": { + * "tokenUsage": { + * "completionTokens": 82, + * "promptTokens": 20, + * "totalTokens": 102 + * }, + * "finish_reason": "stop" + * }, + * "tool_calls": [], + * "invalid_tool_calls": [] + * } + * ``` + *
+ * + *
+ * + *
+ * Streaming Chunks + * + * ```typescript + * for await (const chunk of await llm.stream(input)) { + * console.log(chunk); + * } + * ``` + * + * ```txt + * AIMessageChunk { + * "content": "", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * AIMessageChunk { + * "content": "The", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * AIMessageChunk { + * "content": " French", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * AIMessageChunk { + * "content": " translation", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * AIMessageChunk { + * "content": " of", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * AIMessageChunk { + * "content": " \"", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * AIMessageChunk { + * "content": "I", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * AIMessageChunk { + * "content": " love", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * ... + * AIMessageChunk { + * "content": ".", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": null + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * AIMessageChunk { + * "content": "", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": "stop" + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * ``` + *
+ * + *
+ * + *
+ * Aggregate Streamed Chunks + * + * ```typescript + * import { AIMessageChunk } from '@langchain/core/messages'; + * import { concat } from '@langchain/core/utils/stream'; + * + * const stream = await llm.stream(input); + * let full: AIMessageChunk | undefined; + * for await (const chunk of stream) { + * full = !full ? chunk : concat(full, chunk); + * } + * console.log(full); + * ``` + * + * ```txt + * AIMessageChunk { + * "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.", + * "additional_kwargs": {}, + * "response_metadata": { + * "finishReason": "stop" + * }, + * "tool_calls": [], + * "tool_call_chunks": [], + * "invalid_tool_calls": [] + * } + * ``` + *
+ * + *
+ * + *
+ * Bind tools + * + * ```typescript + * import { z } from 'zod'; + * + * const llmForToolCalling = new ChatXAI({ + * model: "grok-beta", + * temperature: 0, + * // other params... + * }); + * + * const GetWeather = { + * name: "GetWeather", + * description: "Get the current weather in a given location", + * schema: z.object({ + * location: z.string().describe("The city and state, e.g. San Francisco, CA") + * }), + * } + * + * const GetPopulation = { + * name: "GetPopulation", + * description: "Get the current population in a given location", + * schema: z.object({ + * location: z.string().describe("The city and state, e.g. San Francisco, CA") + * }), + * } + * + * const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]); + * const aiMsg = await llmWithTools.invoke( + * "Which city is hotter today and which is bigger: LA or NY?" + * ); + * console.log(aiMsg.tool_calls); + * ``` + * + * ```txt + * [ + * { + * name: 'GetWeather', + * args: { location: 'Los Angeles, CA' }, + * type: 'tool_call', + * id: 'call_cd34' + * }, + * { + * name: 'GetWeather', + * args: { location: 'New York, NY' }, + * type: 'tool_call', + * id: 'call_68rf' + * }, + * { + * name: 'GetPopulation', + * args: { location: 'Los Angeles, CA' }, + * type: 'tool_call', + * id: 'call_f81z' + * }, + * { + * name: 'GetPopulation', + * args: { location: 'New York, NY' }, + * type: 'tool_call', + * id: 'call_8byt' + * } + * ] + * ``` + *
+ * + *
+ * + *
+ * Structured Output + * + * ```typescript + * import { z } from 'zod'; + * + * const Joke = z.object({ + * setup: z.string().describe("The setup of the joke"), + * punchline: z.string().describe("The punchline to the joke"), + * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") + * }).describe('Joke to tell user.'); + * + * const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: "Joke" }); + * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); + * console.log(jokeResult); + * ``` + * + * ```txt + * { + * setup: "Why don't cats play poker in the wild?", + * punchline: 'Because there are too many cheetahs.' + * } + * ``` + *
+ * + *
+ */ +export class ChatXAI extends ChatOpenAI { + static lc_name() { + return "ChatXAI"; + } + + _llmType() { + return "xAI"; + } + + get lc_secrets(): { [key: string]: string } | undefined { + return { + apiKey: "XAI_API_KEY", + }; + } + + lc_serializable = true; + + lc_namespace = ["langchain", "chat_models", "xai"]; + + constructor(fields?: Partial) { + const apiKey = fields?.apiKey || getEnvironmentVariable("XAI_API_KEY"); + if (!apiKey) { + throw new Error( + `xAI API key not found. Please set the XAI_API_KEY environment variable or provide the key into "apiKey" field.` + ); + } + + super({ + ...fields, + model: fields?.model || "grok-beta", + apiKey, + configuration: { + baseURL: "https://api.x.ai/v1", + }, + }); + } + + toJSON(): Serialized { + const result = super.toJSON(); + + if ( + "kwargs" in result && + typeof result.kwargs === "object" && + result.kwargs != null + ) { + delete result.kwargs.openai_api_key; + delete result.kwargs.configuration; + } + + return result; + } + + getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { + const params = super.getLsParams(options); + params.ls_provider = "xai"; + return params; + } + + async completionWithRetry( + request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, + options?: OpenAICoreRequestOptions + ): Promise>; + + async completionWithRetry( + request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, + options?: OpenAICoreRequestOptions + ): Promise; + + /** + * Calls the xAI API with retry logic in case of failures. + * @param request The request to send to the xAI API. + * @param options Optional configuration for the API call. + * @returns The response from the xAI API. + */ + async completionWithRetry( + request: + | OpenAIClient.Chat.ChatCompletionCreateParamsStreaming + | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, + options?: OpenAICoreRequestOptions + ): Promise< + | AsyncIterable + | OpenAIClient.Chat.Completions.ChatCompletion + > { + delete request.frequency_penalty; + delete request.presence_penalty; + delete request.logit_bias; + delete request.functions; + + const newRequestMessages = request.messages.map((msg) => { + if (!msg.content) { + return { + ...msg, + content: "", + }; + } + return msg; + }); + + const newRequest = { + ...request, + messages: newRequestMessages, + }; + + if (newRequest.stream === true) { + return super.completionWithRetry(newRequest, options); + } + + return super.completionWithRetry(newRequest, options); + } +} diff --git a/libs/langchain-xai/src/index.ts b/libs/langchain-xai/src/index.ts new file mode 100644 index 000000000000..38c7cea7f478 --- /dev/null +++ b/libs/langchain-xai/src/index.ts @@ -0,0 +1 @@ +export * from "./chat_models.js"; diff --git a/libs/langchain-xai/src/tests/chat_models.int.test.ts b/libs/langchain-xai/src/tests/chat_models.int.test.ts new file mode 100644 index 000000000000..efb6b04371fd --- /dev/null +++ b/libs/langchain-xai/src/tests/chat_models.int.test.ts @@ -0,0 +1,233 @@ +import { test } from "@jest/globals"; +import { + AIMessage, + AIMessageChunk, + HumanMessage, + ToolMessage, +} from "@langchain/core/messages"; +import { tool } from "@langchain/core/tools"; +import { z } from "zod"; +import { concat } from "@langchain/core/utils/stream"; +import { ChatXAI } from "../chat_models.js"; + +test("invoke", async () => { + const chat = new ChatXAI({ + maxRetries: 0, + }); + const message = new HumanMessage("What color is the sky?"); + const res = await chat.invoke([message]); + // console.log({ res }); + expect(res.content.length).toBeGreaterThan(10); +}); + +test("invoke with stop sequence", async () => { + const chat = new ChatXAI({ + maxRetries: 0, + }); + const message = new HumanMessage("Count to ten."); + const res = await chat.bind({ stop: ["5", "five"] }).invoke([message]); + // console.log({ res }); + expect((res.content as string).toLowerCase()).not.toContain("6"); + expect((res.content as string).toLowerCase()).not.toContain("six"); +}); + +test("stream should respect passed headers", async () => { + const chat = new ChatXAI({ + maxRetries: 0, + }); + const message = new HumanMessage("Count to ten."); + await expect(async () => { + await chat.stream([message], { + headers: { Authorization: "badbadbad" }, + }); + }).rejects.toThrowError(); +}); + +test("generate", async () => { + const chat = new ChatXAI(); + const message = new HumanMessage("Hello!"); + const res = await chat.generate([[message]]); + // console.log(JSON.stringify(res, null, 2)); + expect(res.generations[0][0].text.length).toBeGreaterThan(10); +}); + +test("streaming", async () => { + const chat = new ChatXAI(); + const message = new HumanMessage("What color is the sky?"); + const stream = await chat.stream([message]); + let iters = 0; + let finalRes = ""; + for await (const chunk of stream) { + iters += 1; + finalRes += chunk.content; + } + // console.log({ finalRes, iters }); + expect(iters).toBeGreaterThan(1); +}); + +test("invoke with bound tools", async () => { + const chat = new ChatXAI({ + maxRetries: 0, + model: "grok-beta", + }); + const message = new HumanMessage("What is the current weather in Hawaii?"); + const res = await chat + .bind({ + tools: [ + { + type: "function", + function: { + name: "get_current_weather", + description: "Get the current weather in a given location", + parameters: { + type: "object", + properties: { + location: { + type: "string", + description: "The city and state, e.g. San Francisco, CA", + }, + unit: { type: "string", enum: ["celsius", "fahrenheit"] }, + }, + required: ["location"], + }, + }, + }, + ], + tool_choice: "auto", + }) + .invoke([message]); + // console.log(JSON.stringify(res)); + expect(res.additional_kwargs.tool_calls?.length).toEqual(1); + expect( + JSON.parse( + res.additional_kwargs?.tool_calls?.[0].function.arguments ?? "{}" + ) + ).toEqual(res.tool_calls?.[0].args); +}); + +test("stream with bound tools, yielding a single chunk", async () => { + const chat = new ChatXAI({ + maxRetries: 0, + }); + const message = new HumanMessage("What is the current weather in Hawaii?"); + const stream = await chat + .bind({ + tools: [ + { + type: "function", + function: { + name: "get_current_weather", + description: "Get the current weather in a given location", + parameters: { + type: "object", + properties: { + location: { + type: "string", + description: "The city and state, e.g. San Francisco, CA", + }, + unit: { type: "string", enum: ["celsius", "fahrenheit"] }, + }, + required: ["location"], + }, + }, + }, + ], + tool_choice: "auto", + }) + .stream([message]); + // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment + // @ts-expect-error unused var + for await (const chunk of stream) { + // console.log(JSON.stringify(chunk)); + } +}); + +test("Few shotting with tool calls", async () => { + const chat = new ChatXAI({ + model: "grok-beta", + temperature: 0, + }).bind({ + tools: [ + { + type: "function", + function: { + name: "get_current_weather", + description: "Get the current weather in a given location", + parameters: { + type: "object", + properties: { + location: { + type: "string", + description: "The city and state, e.g. San Francisco, CA", + }, + unit: { type: "string", enum: ["celsius", "fahrenheit"] }, + }, + required: ["location"], + }, + }, + }, + ], + tool_choice: "auto", + }); + const res = await chat.invoke([ + new HumanMessage("What is the weather in SF?"), + new AIMessage({ + content: "", + tool_calls: [ + { + id: "12345", + name: "get_current_weather", + args: { + location: "SF", + }, + }, + ], + }), + new ToolMessage({ + tool_call_id: "12345", + content: "It is currently 24 degrees with hail in SF.", + }), + new AIMessage("It is currently 24 degrees in SF with hail in SF."), + new HumanMessage("What did you say the weather was?"), + ]); + // console.log(res); + expect(res.content).toContain("24"); +}); + +test("Groq can stream tool calls", async () => { + const model = new ChatXAI({ + model: "grok-beta", + temperature: 0, + }); + + const weatherTool = tool((_) => "The temperature is 24 degrees with hail.", { + name: "get_current_weather", + schema: z.object({ + location: z + .string() + .describe("The location to get the current weather for."), + }), + description: "Get the current weather in a given location.", + }); + + const modelWithTools = model.bindTools([weatherTool]); + + const stream = await modelWithTools.stream( + "What is the weather in San Francisco?" + ); + + let finalMessage: AIMessageChunk | undefined; + for await (const chunk of stream) { + finalMessage = !finalMessage ? chunk : concat(finalMessage, chunk); + } + + expect(finalMessage).toBeDefined(); + if (!finalMessage) return; + + expect(finalMessage.tool_calls?.[0]).toBeDefined(); + if (!finalMessage.tool_calls?.[0]) return; + + expect(finalMessage.tool_calls?.[0].name).toBe("get_current_weather"); + expect(finalMessage.tool_calls?.[0].args).toHaveProperty("location"); + expect(finalMessage.tool_calls?.[0].id).toBeDefined(); +}); diff --git a/libs/langchain-xai/src/tests/chat_models.standard.int.test.ts b/libs/langchain-xai/src/tests/chat_models.standard.int.test.ts new file mode 100644 index 000000000000..0eb03c4f111f --- /dev/null +++ b/libs/langchain-xai/src/tests/chat_models.standard.int.test.ts @@ -0,0 +1,37 @@ +/* eslint-disable no-process-env */ +import { test, expect } from "@jest/globals"; +import { ChatModelIntegrationTests } from "@langchain/standard-tests"; +import { AIMessageChunk } from "@langchain/core/messages"; +import { ChatXAI, ChatXAICallOptions } from "../chat_models.js"; + +class ChatXAIStandardIntegrationTests extends ChatModelIntegrationTests< + ChatXAICallOptions, + AIMessageChunk +> { + constructor() { + if (!process.env.XAI_API_KEY) { + throw new Error( + "Can not run xAI integration tests because XAI_API_KEY is not set" + ); + } + super({ + Cls: ChatXAI, + chatModelHasToolCalling: true, + chatModelHasStructuredOutput: true, + constructorArgs: { + maxRetries: 1, + temperature: 0, + }, + }); + } +} + +const testClass = new ChatXAIStandardIntegrationTests(); + +test("ChatXAIStandardIntegrationTests", async () => { + console.warn = (..._args: unknown[]) => { + // no-op + }; + const testResults = await testClass.runTests(); + expect(testResults).toBe(true); +}); diff --git a/libs/langchain-xai/src/tests/chat_models.standard.test.ts b/libs/langchain-xai/src/tests/chat_models.standard.test.ts new file mode 100644 index 000000000000..99274a58ca9a --- /dev/null +++ b/libs/langchain-xai/src/tests/chat_models.standard.test.ts @@ -0,0 +1,39 @@ +/* eslint-disable no-process-env */ +import { test, expect } from "@jest/globals"; +import { ChatModelUnitTests } from "@langchain/standard-tests"; +import { AIMessageChunk } from "@langchain/core/messages"; +import { ChatXAI, ChatXAICallOptions } from "../chat_models.js"; + +class ChatXAIStandardUnitTests extends ChatModelUnitTests< + ChatXAICallOptions, + AIMessageChunk +> { + constructor() { + super({ + Cls: ChatXAI, + chatModelHasToolCalling: true, + chatModelHasStructuredOutput: true, + constructorArgs: {}, + }); + // This must be set so method like `.bindTools` or `.withStructuredOutput` + // which we call after instantiating the model will work. + // (constructor will throw if API key is not set) + process.env.XAI_API_KEY = "test"; + } + + testChatModelInitApiKey() { + // Unset the API key env var here so this test can properly check + // the API key class arg. + process.env.XAI_API_KEY = ""; + super.testChatModelInitApiKey(); + // Re-set the API key env var here so other tests can run properly. + process.env.XAI_API_KEY = "test"; + } +} + +const testClass = new ChatXAIStandardUnitTests(); + +test("ChatXAIStandardUnitTests", () => { + const testResults = testClass.runTests(); + expect(testResults).toBe(true); +}); diff --git a/libs/langchain-xai/src/tests/chat_models.test.ts b/libs/langchain-xai/src/tests/chat_models.test.ts new file mode 100644 index 000000000000..0412b08853eb --- /dev/null +++ b/libs/langchain-xai/src/tests/chat_models.test.ts @@ -0,0 +1,20 @@ +/* eslint-disable no-process-env */ +import { test, expect } from "@jest/globals"; +import { ChatXAI } from "../chat_models.js"; + +test("Serialization", () => { + const model = new ChatXAI({ + apiKey: "foo", + }); + expect(JSON.stringify(model)).toEqual( + `{"lc":1,"type":"constructor","id":["langchain","chat_models","xai","ChatXAI"],"kwargs":{"api_key":{"lc":1,"type":"secret","id":["XAI_API_KEY"]}}}` + ); +}); + +test("Serialization with no params", () => { + process.env.GROQ_API_KEY = "foo"; + const model = new ChatXAI(); + expect(JSON.stringify(model)).toEqual( + `{"lc":1,"type":"constructor","id":["langchain","chat_models","xai","ChatXAI"],"kwargs":{"api_key":{"lc":1,"type":"secret","id":["XAI_API_KEY"]}}}` + ); +}); diff --git a/libs/langchain-xai/src/tests/chat_models_structured_output.int.test.ts b/libs/langchain-xai/src/tests/chat_models_structured_output.int.test.ts new file mode 100644 index 000000000000..61071a5b2f4f --- /dev/null +++ b/libs/langchain-xai/src/tests/chat_models_structured_output.int.test.ts @@ -0,0 +1,269 @@ +import { z } from "zod"; +import { zodToJsonSchema } from "zod-to-json-schema"; +import { ChatPromptTemplate } from "@langchain/core/prompts"; +import { AIMessage } from "@langchain/core/messages"; +import { ChatXAI } from "../chat_models.js"; + +test("withStructuredOutput zod schema function calling", async () => { + const model = new ChatXAI({ + temperature: 0, + model: "grok-beta", + }); + + const calculatorSchema = z.object({ + operation: z.enum(["add", "subtract", "multiply", "divide"]), + number1: z.number(), + number2: z.number(), + }); + const modelWithStructuredOutput = model.withStructuredOutput( + calculatorSchema, + { + name: "calculator", + } + ); + + const prompt = ChatPromptTemplate.fromMessages([ + ["system", "You are VERY bad at math and must always use a calculator."], + ["human", "Please help me!! What is 2 + 2?"], + ]); + const chain = prompt.pipe(modelWithStructuredOutput); + const result = await chain.invoke({}); + // console.log(result); + expect("operation" in result).toBe(true); + expect("number1" in result).toBe(true); + expect("number2" in result).toBe(true); +}); + +test("withStructuredOutput zod schema JSON mode", async () => { + const model = new ChatXAI({ + temperature: 0, + model: "grok-beta", + }); + + const calculatorSchema = z.object({ + operation: z.enum(["add", "subtract", "multiply", "divide"]), + number1: z.number(), + number2: z.number(), + }); + const modelWithStructuredOutput = model.withStructuredOutput( + calculatorSchema, + { + name: "calculator", + method: "jsonMode", + } + ); + + const prompt = ChatPromptTemplate.fromMessages([ + [ + "system", + `You are VERY bad at math and must always use a calculator. +Respond with a JSON object containing three keys: +'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', +'number1': the first number to operate on, +'number2': the second number to operate on. +`, + ], + ["human", "Please help me!! What is 2 + 2?"], + ]); + const chain = prompt.pipe(modelWithStructuredOutput); + const result = await chain.invoke({}); + // console.log(result); + expect("operation" in result).toBe(true); + expect("number1" in result).toBe(true); + expect("number2" in result).toBe(true); +}); + +test("withStructuredOutput JSON schema function calling", async () => { + const model = new ChatXAI({ + temperature: 0, + model: "grok-beta", + }); + + const calculatorSchema = z.object({ + operation: z.enum(["add", "subtract", "multiply", "divide"]), + number1: z.number(), + number2: z.number(), + }); + const modelWithStructuredOutput = model.withStructuredOutput( + zodToJsonSchema(calculatorSchema), + { + name: "calculator", + } + ); + + const prompt = ChatPromptTemplate.fromMessages([ + ["system", `You are VERY bad at math and must always use a calculator.`], + ["human", "Please help me!! What is 2 + 2?"], + ]); + const chain = prompt.pipe(modelWithStructuredOutput); + const result = await chain.invoke({}); + // console.log(result); + expect("operation" in result).toBe(true); + expect("number1" in result).toBe(true); + expect("number2" in result).toBe(true); +}); + +test("withStructuredOutput OpenAI function definition function calling", async () => { + const model = new ChatXAI({ + temperature: 0, + model: "grok-beta", + }); + + const calculatorSchema = z.object({ + operation: z.enum(["add", "subtract", "multiply", "divide"]), + number1: z.number(), + number2: z.number(), + }); + const modelWithStructuredOutput = model.withStructuredOutput({ + name: "calculator", + parameters: zodToJsonSchema(calculatorSchema), + }); + + const prompt = ChatPromptTemplate.fromMessages([ + "system", + `You are VERY bad at math and must always use a calculator.`, + "human", + "Please help me!! What is 2 + 2?", + ]); + const chain = prompt.pipe(modelWithStructuredOutput); + const result = await chain.invoke({}); + // console.log(result); + expect("operation" in result).toBe(true); + expect("number1" in result).toBe(true); + expect("number2" in result).toBe(true); +}); + +test("withStructuredOutput JSON schema JSON mode", async () => { + const model = new ChatXAI({ + temperature: 0, + model: "grok-beta", + }); + + const calculatorSchema = z.object({ + operation: z.enum(["add", "subtract", "multiply", "divide"]), + number1: z.number(), + number2: z.number(), + }); + const modelWithStructuredOutput = model.withStructuredOutput( + zodToJsonSchema(calculatorSchema), + { + name: "calculator", + method: "jsonMode", + } + ); + + const prompt = ChatPromptTemplate.fromMessages([ + [ + "system", + `You are VERY bad at math and must always use a calculator. +Respond with a JSON object containing three keys: +'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', +'number1': the first number to operate on, +'number2': the second number to operate on. +`, + ], + ["human", "Please help me!! What is 2 + 2?"], + ]); + const chain = prompt.pipe(modelWithStructuredOutput); + const result = await chain.invoke({}); + // console.log(result); + expect("operation" in result).toBe(true); + expect("number1" in result).toBe(true); + expect("number2" in result).toBe(true); +}); + +test("withStructuredOutput JSON schema", async () => { + const model = new ChatXAI({ + temperature: 0, + model: "grok-beta", + }); + + const jsonSchema = { + title: "calculator", + description: "A simple calculator", + type: "object", + properties: { + operation: { + type: "string", + enum: ["add", "subtract", "multiply", "divide"], + }, + number1: { type: "number" }, + number2: { type: "number" }, + }, + }; + const modelWithStructuredOutput = model.withStructuredOutput(jsonSchema); + + const prompt = ChatPromptTemplate.fromMessages([ + [ + "system", + `You are VERY bad at math and must always use a calculator. +Respond with a JSON object containing three keys: +'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide', +'number1': the first number to operate on, +'number2': the second number to operate on. +`, + ], + ["human", "Please help me!! What is 2 + 2?"], + ]); + const chain = prompt.pipe(modelWithStructuredOutput); + const result = await chain.invoke({}); + // console.log(result); + expect("operation" in result).toBe(true); + expect("number1" in result).toBe(true); + expect("number2" in result).toBe(true); +}); + +test("withStructuredOutput includeRaw true", async () => { + const model = new ChatXAI({ + temperature: 0, + model: "grok-beta", + }); + + const calculatorSchema = z.object({ + operation: z.enum(["add", "subtract", "multiply", "divide"]), + number1: z.number(), + number2: z.number(), + }); + const modelWithStructuredOutput = model.withStructuredOutput( + calculatorSchema, + { + name: "calculator", + includeRaw: true, + } + ); + + const prompt = ChatPromptTemplate.fromMessages([ + ["system", "You are VERY bad at math and must always use a calculator."], + ["human", "Please help me!! What is 2 + 2?"], + ]); + const chain = prompt.pipe(modelWithStructuredOutput); + const result = await chain.invoke({}); + // console.log(result); + + expect("parsed" in result).toBe(true); + // Need to make TS happy :) + if (!("parsed" in result)) { + throw new Error("parsed not in result"); + } + const { parsed } = result; + expect("operation" in parsed).toBe(true); + expect("number1" in parsed).toBe(true); + expect("number2" in parsed).toBe(true); + + expect("raw" in result).toBe(true); + // Need to make TS happy :) + if (!("raw" in result)) { + throw new Error("raw not in result"); + } + const { raw } = result as { raw: AIMessage }; + + expect(raw.tool_calls?.[0].args).toBeDefined(); + if (!raw.tool_calls?.[0].args) { + throw new Error("args not in tool call"); + } + expect(raw.tool_calls?.length).toBeGreaterThan(0); + expect(raw.tool_calls?.[0].name).toBe("calculator"); + expect("operation" in raw.tool_calls[0].args).toBe(true); + expect("number1" in raw.tool_calls[0].args).toBe(true); + expect("number2" in raw.tool_calls[0].args).toBe(true); +}); diff --git a/libs/langchain-xai/tsconfig.cjs.json b/libs/langchain-xai/tsconfig.cjs.json new file mode 100644 index 000000000000..3b7026ea406c --- /dev/null +++ b/libs/langchain-xai/tsconfig.cjs.json @@ -0,0 +1,8 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "module": "commonjs", + "declaration": false + }, + "exclude": ["node_modules", "dist", "docs", "**/tests"] +} diff --git a/libs/langchain-xai/tsconfig.json b/libs/langchain-xai/tsconfig.json new file mode 100644 index 000000000000..bc85d83b6229 --- /dev/null +++ b/libs/langchain-xai/tsconfig.json @@ -0,0 +1,23 @@ +{ + "extends": "@tsconfig/recommended", + "compilerOptions": { + "outDir": "../dist", + "rootDir": "./src", + "target": "ES2021", + "lib": ["ES2021", "ES2022.Object", "DOM"], + "module": "ES2020", + "moduleResolution": "nodenext", + "esModuleInterop": true, + "declaration": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "useDefineForClassFields": true, + "strictPropertyInitialization": false, + "allowJs": true, + "strict": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist", "docs"] +} diff --git a/libs/langchain-xai/turbo.json b/libs/langchain-xai/turbo.json new file mode 100644 index 000000000000..d024cee15c81 --- /dev/null +++ b/libs/langchain-xai/turbo.json @@ -0,0 +1,11 @@ +{ + "extends": ["//"], + "pipeline": { + "build": { + "outputs": ["**/dist/**"] + }, + "build:internal": { + "dependsOn": ["^build:internal"] + } + } +} diff --git a/yarn.lock b/yarn.lock index 2fd5980a162b..17738b1968c9 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12820,6 +12820,42 @@ __metadata: languageName: unknown linkType: soft +"@langchain/xai@workspace:*, @langchain/xai@workspace:libs/langchain-xai": + version: 0.0.0-use.local + resolution: "@langchain/xai@workspace:libs/langchain-xai" + dependencies: + "@jest/globals": ^29.5.0 + "@langchain/core": "workspace:*" + "@langchain/openai": "workspace:^" + "@langchain/scripts": ">=0.1.0 <0.2.0" + "@langchain/standard-tests": 0.0.0 + "@swc/core": ^1.3.90 + "@swc/jest": ^0.2.29 + "@tsconfig/recommended": ^1.0.3 + "@types/uuid": ^9 + "@typescript-eslint/eslint-plugin": ^6.12.0 + "@typescript-eslint/parser": ^6.12.0 + dotenv: ^16.3.1 + dpdm: ^3.12.0 + eslint: ^8.33.0 + eslint-config-airbnb-base: ^15.0.0 + eslint-config-prettier: ^8.6.0 + eslint-plugin-import: ^2.27.5 + eslint-plugin-no-instanceof: ^1.0.1 + eslint-plugin-prettier: ^4.2.1 + jest: ^29.5.0 + jest-environment-node: ^29.6.4 + prettier: ^2.8.3 + release-it: ^17.6.0 + rollup: ^4.5.2 + ts-jest: ^29.1.0 + typescript: <5.2.0 + zod: ^3.22.4 + peerDependencies: + "@langchain/core": ">=0.2.21 <0.4.0" + languageName: unknown + linkType: soft + "@langchain/yandex@workspace:*, @langchain/yandex@workspace:libs/langchain-yandex": version: 0.0.0-use.local resolution: "@langchain/yandex@workspace:libs/langchain-yandex" @@ -27304,6 +27340,7 @@ __metadata: "@langchain/scripts": ">=0.1.0 <0.2.0" "@langchain/textsplitters": "workspace:*" "@langchain/weaviate": "workspace:*" + "@langchain/xai": "workspace:*" "@langchain/yandex": "workspace:*" "@layerup/layerup-security": ^1.5.12 "@opensearch-project/opensearch": ^2.2.0 From 6d025f5f72163ddac8c0b27e2bdb15676691cc91 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Thu, 7 Nov 2024 10:34:56 -0800 Subject: [PATCH 068/100] fix(docs): Add xAI to platforms page (#7170) --- docs/core_docs/docs/integrations/platforms/index.mdx | 1 + libs/langchain-xai/package.json | 3 ++- yarn.lock | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/core_docs/docs/integrations/platforms/index.mdx b/docs/core_docs/docs/integrations/platforms/index.mdx index d4b237086040..546d883e736d 100644 --- a/docs/core_docs/docs/integrations/platforms/index.mdx +++ b/docs/core_docs/docs/integrations/platforms/index.mdx @@ -29,3 +29,4 @@ These providers have standalone `@langchain/{provider}` packages for improved ve - [Weaviate](https://www.npmjs.com/package/@langchain/weaviate) - [Yandex](https://www.npmjs.com/package/@langchain/yandex) - [Azure CosmosDB](https://www.npmjs.com/package/@langchain/azure-cosmosdb) +- [xAI](https://www.npmjs.com/package/@langchain/xai) diff --git a/libs/langchain-xai/package.json b/libs/langchain-xai/package.json index 5a690400a871..62dd7a368cb8 100644 --- a/libs/langchain-xai/package.json +++ b/libs/langchain-xai/package.json @@ -67,7 +67,8 @@ "rollup": "^4.5.2", "ts-jest": "^29.1.0", "typescript": "<5.2.0", - "zod": "^3.22.4" + "zod": "^3.22.4", + "zod-to-json-schema": "^3.23.1" }, "publishConfig": { "access": "public" diff --git a/yarn.lock b/yarn.lock index 17738b1968c9..c1f78f3cc56c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12851,6 +12851,7 @@ __metadata: ts-jest: ^29.1.0 typescript: <5.2.0 zod: ^3.22.4 + zod-to-json-schema: ^3.23.1 peerDependencies: "@langchain/core": ">=0.2.21 <0.4.0" languageName: unknown From 5d21d5e544a9c1dfb87a5599716c90c63a322e14 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 8 Nov 2024 20:31:18 +0100 Subject: [PATCH 069/100] fix(scripts): add missing shebang in bin scripts --- libs/langchain-scripts/bin/build.js | 1 + libs/langchain-scripts/bin/filter_spam_comment.js | 1 + libs/langchain-scripts/bin/validate_notebook.js | 1 + 3 files changed, 3 insertions(+) diff --git a/libs/langchain-scripts/bin/build.js b/libs/langchain-scripts/bin/build.js index ec93df06484c..866af9d6ad53 100755 --- a/libs/langchain-scripts/bin/build.js +++ b/libs/langchain-scripts/bin/build.js @@ -1 +1,2 @@ +#!/usr/bin/env node import "../dist/build/index.js"; diff --git a/libs/langchain-scripts/bin/filter_spam_comment.js b/libs/langchain-scripts/bin/filter_spam_comment.js index 0da333aee677..bd505557026d 100755 --- a/libs/langchain-scripts/bin/filter_spam_comment.js +++ b/libs/langchain-scripts/bin/filter_spam_comment.js @@ -1 +1,2 @@ +#!/usr/bin/env node import "../dist/filter_spam_comment.js"; diff --git a/libs/langchain-scripts/bin/validate_notebook.js b/libs/langchain-scripts/bin/validate_notebook.js index cac5f69d91e8..d0054c5ce957 100755 --- a/libs/langchain-scripts/bin/validate_notebook.js +++ b/libs/langchain-scripts/bin/validate_notebook.js @@ -1 +1,2 @@ +#!/usr/bin/env node import "../dist/notebooks/index.js"; From 45dd3cf5c48c563b5110bc34bee9acdf25a0d5e1 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Sat, 9 Nov 2024 00:28:56 +0100 Subject: [PATCH 070/100] Bump to 0.1.4 --- libs/langchain-scripts/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-scripts/package.json b/libs/langchain-scripts/package.json index c7aca4e17ea6..611c61749b4f 100644 --- a/libs/langchain-scripts/package.json +++ b/libs/langchain-scripts/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/scripts", - "version": "0.1.3", + "version": "0.1.4", "description": "Shared scripts for LangChain.js", "type": "module", "engines": { From 8fc07c99188cc7a2b4286401222a15d7e97279e7 Mon Sep 17 00:00:00 2001 From: nick-w-nick <43578531+nick-w-nick@users.noreply.github.com> Date: Sat, 9 Nov 2024 14:51:19 -0500 Subject: [PATCH 071/100] pinecone[patch]: Update `@pinecone/pinecone-database` version to resolve type errors (#7167) Co-authored-by: jacoblee93 --- examples/package.json | 2 +- libs/langchain-pinecone/package.json | 2 +- yarn.lock | 15 ++++++--------- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/examples/package.json b/examples/package.json index 7ace75f8b9d2..3b3c3340186f 100644 --- a/examples/package.json +++ b/examples/package.json @@ -65,7 +65,7 @@ "@langchain/yandex": "workspace:*", "@layerup/layerup-security": "^1.5.12", "@opensearch-project/opensearch": "^2.2.0", - "@pinecone-database/pinecone": "^3.0.0", + "@pinecone-database/pinecone": "^4.0.0", "@planetscale/database": "^1.8.0", "@prisma/client": "^4.11.0", "@qdrant/js-client-rest": "^1.9.0", diff --git a/libs/langchain-pinecone/package.json b/libs/langchain-pinecone/package.json index 1d06ca43267d..59a7637f00a7 100644 --- a/libs/langchain-pinecone/package.json +++ b/libs/langchain-pinecone/package.json @@ -32,7 +32,7 @@ "author": "Pinecone, Inc", "license": "MIT", "dependencies": { - "@pinecone-database/pinecone": "^3.0.0", + "@pinecone-database/pinecone": "^3.0.0 || ^4.0.0", "flat": "^5.0.2", "uuid": "^10.0.0" }, diff --git a/yarn.lock b/yarn.lock index c1f78f3cc56c..543c3e5940ee 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12574,7 +12574,7 @@ __metadata: "@langchain/core": "workspace:*" "@langchain/openai": "workspace:*" "@langchain/scripts": ">=0.1.0 <0.2.0" - "@pinecone-database/pinecone": ^3.0.0 + "@pinecone-database/pinecone": ^3.0.0 || ^4.0.0 "@swc/core": ^1.3.90 "@swc/jest": ^0.2.29 "@tsconfig/recommended": ^1.0.3 @@ -14121,15 +14121,12 @@ __metadata: languageName: node linkType: hard -"@pinecone-database/pinecone@npm:^3.0.0": - version: 3.0.0 - resolution: "@pinecone-database/pinecone@npm:3.0.0" +"@pinecone-database/pinecone@npm:^3.0.0 || ^4.0.0, @pinecone-database/pinecone@npm:^4.0.0": + version: 4.0.0 + resolution: "@pinecone-database/pinecone@npm:4.0.0" dependencies: - "@sinclair/typebox": ^0.29.0 - ajv: ^8.12.0 - cross-fetch: ^3.1.5 encoding: ^0.1.13 - checksum: 3803c6fead5343e495ccfe177793ed6f58411c2c5efdb1538cdcb0203164ebefb299ed30c8b9c1590d00cafb1dd20e2d06d6b0339159ea237d2939856f377ca9 + checksum: 7523d7b8dc6a5d7b5d5cf37c97f6070112473f26d82aec23ca198554634e6bc0883866fcc9a8b2978e287daad8665085cef1d4f16d86a7ba0f8b623d88bdda54 languageName: node linkType: hard @@ -27345,7 +27342,7 @@ __metadata: "@langchain/yandex": "workspace:*" "@layerup/layerup-security": ^1.5.12 "@opensearch-project/opensearch": ^2.2.0 - "@pinecone-database/pinecone": ^3.0.0 + "@pinecone-database/pinecone": ^4.0.0 "@planetscale/database": ^1.8.0 "@prisma/client": ^4.11.0 "@qdrant/js-client-rest": ^1.9.0 From 05e5813715150cd69d9e384924818562e3b7c1fa Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Sat, 9 Nov 2024 11:57:40 -0800 Subject: [PATCH 072/100] chore(pinecone): Release 0.1.2 (#7175) --- libs/langchain-pinecone/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-pinecone/package.json b/libs/langchain-pinecone/package.json index 59a7637f00a7..2f6e4cef205f 100644 --- a/libs/langchain-pinecone/package.json +++ b/libs/langchain-pinecone/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/pinecone", - "version": "0.1.1", + "version": "0.1.2", "description": "LangChain integration for Pinecone's vector database", "type": "module", "engines": { From 831f9de94dc1824e547434ce496169d38a7633ef Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 12:58:33 -0800 Subject: [PATCH 073/100] fix(core): Clear inherited config for called callbacks (#7174) --- langchain-core/src/callbacks/promises.ts | 44 +------ .../src/callbacks/tests/callbacks.test.ts | 38 ++++++ .../singletons/async_local_storage/globals.ts | 20 +++ .../singletons/async_local_storage/index.ts | 98 +++++++++++++++ langchain-core/src/singletons/callbacks.ts | 67 ++++++++++ langchain-core/src/singletons/index.ts | 114 ++---------------- 6 files changed, 238 insertions(+), 143 deletions(-) create mode 100644 langchain-core/src/singletons/async_local_storage/globals.ts create mode 100644 langchain-core/src/singletons/async_local_storage/index.ts create mode 100644 langchain-core/src/singletons/callbacks.ts diff --git a/langchain-core/src/callbacks/promises.ts b/langchain-core/src/callbacks/promises.ts index 3484a0b4a522..fc10f044176e 100644 --- a/langchain-core/src/callbacks/promises.ts +++ b/langchain-core/src/callbacks/promises.ts @@ -1,43 +1,3 @@ -import PQueueMod from "p-queue"; +import { awaitAllCallbacks, consumeCallback } from "../singletons/callbacks.js"; -let queue: typeof import("p-queue")["default"]["prototype"]; - -/** - * Creates a queue using the p-queue library. The queue is configured to - * auto-start and has a concurrency of 1, meaning it will process tasks - * one at a time. - */ -function createQueue() { - const PQueue = "default" in PQueueMod ? PQueueMod.default : PQueueMod; - return new PQueue({ - autoStart: true, - concurrency: 1, - }); -} - -/** - * Consume a promise, either adding it to the queue or waiting for it to resolve - * @param promiseFn Promise to consume - * @param wait Whether to wait for the promise to resolve or resolve immediately - */ -export async function consumeCallback( - promiseFn: () => Promise | T | void, - wait: boolean -): Promise { - if (wait === true) { - await promiseFn(); - } else { - if (typeof queue === "undefined") { - queue = createQueue(); - } - void queue.add(promiseFn); - } -} - -/** - * Waits for all promises in the queue to resolve. If the queue is - * undefined, it immediately resolves a promise. - */ -export function awaitAllCallbacks(): Promise { - return typeof queue !== "undefined" ? queue.onIdle() : Promise.resolve(); -} +export { awaitAllCallbacks, consumeCallback }; diff --git a/langchain-core/src/callbacks/tests/callbacks.test.ts b/langchain-core/src/callbacks/tests/callbacks.test.ts index 37e9e5d84410..889c1fefa1be 100644 --- a/langchain-core/src/callbacks/tests/callbacks.test.ts +++ b/langchain-core/src/callbacks/tests/callbacks.test.ts @@ -1,6 +1,7 @@ /* eslint-disable no-promise-executor-return */ import { test, expect } from "@jest/globals"; import * as uuid from "uuid"; +import { AsyncLocalStorage } from "node:async_hooks"; import { CallbackManager } from "../manager.js"; import { BaseCallbackHandler, type BaseCallbackHandlerInput } from "../base.js"; import type { Serialized } from "../../load/serializable.js"; @@ -10,6 +11,8 @@ import type { AgentAction, AgentFinish } from "../../agents.js"; import { BaseMessage, HumanMessage } from "../../messages/index.js"; import type { LLMResult } from "../../outputs.js"; import { RunnableLambda } from "../../runnables/base.js"; +import { AsyncLocalStorageProviderSingleton } from "../../singletons/index.js"; +import { awaitAllCallbacks } from "../promises.js"; class FakeCallbackHandler extends BaseCallbackHandler { name = `fake-${uuid.v4()}`; @@ -536,3 +539,38 @@ test("chain should still run if a normal callback handler throws an error", asyn ); expect(res).toEqual("hello world"); }); + +test("runnables in callbacks should be root runs", async () => { + AsyncLocalStorageProviderSingleton.initializeGlobalInstance( + new AsyncLocalStorage() + ); + const nestedChain = RunnableLambda.from(async () => { + const subRun = RunnableLambda.from(async () => "hello world"); + return await subRun.invoke({ foo: "bar" }); + }); + let error; + let finalInputs; + const res = await nestedChain.invoke( + {}, + { + callbacks: [ + { + handleChainStart: (_chain, inputs) => { + finalInputs = inputs; + try { + expect( + AsyncLocalStorageProviderSingleton.getRunnableConfig() + ).toEqual(undefined); + } catch (e) { + error = e; + } + }, + }, + ], + } + ); + await awaitAllCallbacks(); + expect(res).toEqual("hello world"); + expect(error).toBe(undefined); + expect(finalInputs).toEqual({ foo: "bar" }); +}); diff --git a/langchain-core/src/singletons/async_local_storage/globals.ts b/langchain-core/src/singletons/async_local_storage/globals.ts new file mode 100644 index 000000000000..c3428989b8c4 --- /dev/null +++ b/langchain-core/src/singletons/async_local_storage/globals.ts @@ -0,0 +1,20 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +export interface AsyncLocalStorageInterface { + getStore: () => any | undefined; + + run: (store: any, callback: () => T) => T; + + enterWith: (store: any) => void; +} + +export const TRACING_ALS_KEY = Symbol.for("ls:tracing_async_local_storage"); + +export const setGlobalAsyncLocalStorageInstance = ( + instance: AsyncLocalStorageInterface +) => { + (globalThis as any)[TRACING_ALS_KEY] = instance; +}; + +export const getGlobalAsyncLocalStorageInstance = () => { + return (globalThis as any)[TRACING_ALS_KEY]; +}; diff --git a/langchain-core/src/singletons/async_local_storage/index.ts b/langchain-core/src/singletons/async_local_storage/index.ts new file mode 100644 index 000000000000..f89c9fbce50a --- /dev/null +++ b/langchain-core/src/singletons/async_local_storage/index.ts @@ -0,0 +1,98 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +import { RunTree } from "langsmith"; +import { + AsyncLocalStorageInterface, + getGlobalAsyncLocalStorageInstance, + setGlobalAsyncLocalStorageInstance, +} from "./globals.js"; +import { CallbackManager } from "../../callbacks/manager.js"; +import { LangChainTracer } from "../../tracers/tracer_langchain.js"; + +export class MockAsyncLocalStorage implements AsyncLocalStorageInterface { + getStore(): any { + return undefined; + } + + run(_store: any, callback: () => T): T { + return callback(); + } + + enterWith(_store: any) { + return undefined; + } +} + +const mockAsyncLocalStorage = new MockAsyncLocalStorage(); + +const LC_CHILD_KEY = Symbol.for("lc:child_config"); + +export const _CONTEXT_VARIABLES_KEY = Symbol.for("lc:context_variables"); + +class AsyncLocalStorageProvider { + getInstance(): AsyncLocalStorageInterface { + return getGlobalAsyncLocalStorageInstance() ?? mockAsyncLocalStorage; + } + + getRunnableConfig() { + const storage = this.getInstance(); + // this has the runnable config + // which means that we should also have an instance of a LangChainTracer + // with the run map prepopulated + return storage.getStore()?.extra?.[LC_CHILD_KEY]; + } + + runWithConfig( + config: any, + callback: () => T, + avoidCreatingRootRunTree?: boolean + ): T { + const callbackManager = CallbackManager._configureSync( + config?.callbacks, + undefined, + config?.tags, + undefined, + config?.metadata + ); + const storage = this.getInstance(); + const previousValue = storage.getStore(); + const parentRunId = callbackManager?.getParentRunId(); + + const langChainTracer = callbackManager?.handlers?.find( + (handler) => handler?.name === "langchain_tracer" + ) as LangChainTracer | undefined; + + let runTree; + if (langChainTracer && parentRunId) { + runTree = langChainTracer.convertToRunTree(parentRunId); + } else if (!avoidCreatingRootRunTree) { + runTree = new RunTree({ + name: "", + tracingEnabled: false, + }); + } + + if (runTree) { + runTree.extra = { ...runTree.extra, [LC_CHILD_KEY]: config }; + } + + if ( + previousValue !== undefined && + previousValue[_CONTEXT_VARIABLES_KEY] !== undefined + ) { + (runTree as any)[_CONTEXT_VARIABLES_KEY] = + previousValue[_CONTEXT_VARIABLES_KEY]; + } + + return storage.run(runTree, callback); + } + + initializeGlobalInstance(instance: AsyncLocalStorageInterface) { + if (getGlobalAsyncLocalStorageInstance() === undefined) { + setGlobalAsyncLocalStorageInstance(instance); + } + } +} + +const AsyncLocalStorageProviderSingleton = new AsyncLocalStorageProvider(); + +export { AsyncLocalStorageProviderSingleton, type AsyncLocalStorageInterface }; diff --git a/langchain-core/src/singletons/callbacks.ts b/langchain-core/src/singletons/callbacks.ts new file mode 100644 index 000000000000..681d770ba96d --- /dev/null +++ b/langchain-core/src/singletons/callbacks.ts @@ -0,0 +1,67 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ + +import PQueueMod from "p-queue"; +import { getGlobalAsyncLocalStorageInstance } from "./async_local_storage/globals.js"; + +let queue: typeof import("p-queue")["default"]["prototype"]; + +/** + * Creates a queue using the p-queue library. The queue is configured to + * auto-start and has a concurrency of 1, meaning it will process tasks + * one at a time. + */ +function createQueue() { + const PQueue: any = "default" in PQueueMod ? PQueueMod.default : PQueueMod; + return new PQueue({ + autoStart: true, + concurrency: 1, + }); +} + +export function getQueue() { + if (typeof queue === "undefined") { + queue = createQueue(); + } + return queue; +} + +/** + * Consume a promise, either adding it to the queue or waiting for it to resolve + * @param promiseFn Promise to consume + * @param wait Whether to wait for the promise to resolve or resolve immediately + */ +export async function consumeCallback( + promiseFn: () => Promise | T | void, + wait: boolean +): Promise { + if (wait === true) { + // Clear config since callbacks are not part of the root run + // Avoid using global singleton due to circuluar dependency issues + if (getGlobalAsyncLocalStorageInstance() !== undefined) { + await getGlobalAsyncLocalStorageInstance().run(undefined, async () => + promiseFn() + ); + } else { + await promiseFn(); + } + } else { + queue = getQueue(); + void queue.add(async () => { + if (getGlobalAsyncLocalStorageInstance() !== undefined) { + await getGlobalAsyncLocalStorageInstance().run(undefined, async () => + promiseFn() + ); + } else { + await promiseFn(); + } + }); + } +} + +/** + * Waits for all promises in the queue to resolve. If the queue is + * undefined, it immediately resolves a promise. + */ +export function awaitAllCallbacks(): Promise { + return typeof queue !== "undefined" ? queue.onIdle() : Promise.resolve(); +} diff --git a/langchain-core/src/singletons/index.ts b/langchain-core/src/singletons/index.ts index e2c0119db4c3..bee8320abaec 100644 --- a/langchain-core/src/singletons/index.ts +++ b/langchain-core/src/singletons/index.ts @@ -1,102 +1,14 @@ /* eslint-disable @typescript-eslint/no-explicit-any */ -import { RunTree } from "langsmith"; -import { CallbackManager } from "../callbacks/manager.js"; -import { LangChainTracer } from "../tracers/tracer_langchain.js"; - -export interface AsyncLocalStorageInterface { - getStore: () => any | undefined; - - run: (store: any, callback: () => T) => T; - - enterWith: (store: any) => void; -} - -export class MockAsyncLocalStorage implements AsyncLocalStorageInterface { - getStore(): any { - return undefined; - } - - run(_store: any, callback: () => T): T { - return callback(); - } - - enterWith(_store: any) { - return undefined; - } -} - -const mockAsyncLocalStorage = new MockAsyncLocalStorage(); - -const TRACING_ALS_KEY = Symbol.for("ls:tracing_async_local_storage"); -const LC_CHILD_KEY = Symbol.for("lc:child_config"); - -export const _CONTEXT_VARIABLES_KEY = Symbol.for("lc:context_variables"); - -class AsyncLocalStorageProvider { - getInstance(): AsyncLocalStorageInterface { - return (globalThis as any)[TRACING_ALS_KEY] ?? mockAsyncLocalStorage; - } - - getRunnableConfig() { - const storage = this.getInstance(); - // this has the runnable config - // which means that we should also have an instance of a LangChainTracer - // with the run map prepopulated - return storage.getStore()?.extra?.[LC_CHILD_KEY]; - } - - runWithConfig( - config: any, - callback: () => T, - avoidCreatingRootRunTree?: boolean - ): T { - const callbackManager = CallbackManager._configureSync( - config?.callbacks, - undefined, - config?.tags, - undefined, - config?.metadata - ); - const storage = this.getInstance(); - const previousValue = storage.getStore(); - const parentRunId = callbackManager?.getParentRunId(); - - const langChainTracer = callbackManager?.handlers?.find( - (handler) => handler?.name === "langchain_tracer" - ) as LangChainTracer | undefined; - - let runTree; - if (langChainTracer && parentRunId) { - runTree = langChainTracer.convertToRunTree(parentRunId); - } else if (!avoidCreatingRootRunTree) { - runTree = new RunTree({ - name: "", - tracingEnabled: false, - }); - } - - if (runTree) { - runTree.extra = { ...runTree.extra, [LC_CHILD_KEY]: config }; - } - - if ( - previousValue !== undefined && - previousValue[_CONTEXT_VARIABLES_KEY] !== undefined - ) { - (runTree as any)[_CONTEXT_VARIABLES_KEY] = - previousValue[_CONTEXT_VARIABLES_KEY]; - } - - return storage.run(runTree, callback); - } - - initializeGlobalInstance(instance: AsyncLocalStorageInterface) { - if ((globalThis as any)[TRACING_ALS_KEY] === undefined) { - (globalThis as any)[TRACING_ALS_KEY] = instance; - } - } -} - -const AsyncLocalStorageProviderSingleton = new AsyncLocalStorageProvider(); - -export { AsyncLocalStorageProviderSingleton }; +import { + type AsyncLocalStorageInterface, + AsyncLocalStorageProviderSingleton, + _CONTEXT_VARIABLES_KEY, + MockAsyncLocalStorage, +} from "./async_local_storage/index.js"; + +export { + type AsyncLocalStorageInterface, + AsyncLocalStorageProviderSingleton, + _CONTEXT_VARIABLES_KEY, + MockAsyncLocalStorage, +}; From 6db5fb8cb96d46b1c8a78cea5205071210917c24 Mon Sep 17 00:00:00 2001 From: Fares <46092308+FaresKi@users.noreply.github.com> Date: Mon, 11 Nov 2024 22:42:20 +0100 Subject: [PATCH 074/100] feat(community): added code blocks in markdown into document's page content (#7178) --- .../src/document_loaders/web/confluence.ts | 39 +++++++++++++++---- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/libs/langchain-community/src/document_loaders/web/confluence.ts b/libs/langchain-community/src/document_loaders/web/confluence.ts index 5a821b9628a3..848d36b8edf6 100644 --- a/libs/langchain-community/src/document_loaders/web/confluence.ts +++ b/libs/langchain-community/src/document_loaders/web/confluence.ts @@ -218,19 +218,44 @@ export class ConfluencePagesLoader extends BaseDocumentLoader { * @returns A Document instance. */ private createDocumentFromPage(page: ConfluencePage): Document { + const htmlContent = page.body.storage.value; + + // Handle both self-closing and regular macros for attachments and view-file + const htmlWithoutOtherMacros = htmlContent.replace( + /]*(?:\/?>|>.*?<\/ac:structured-macro>)/gs, + "[ATTACHMENT]" + ); + + // Extract and preserve code blocks with unique placeholders + const codeBlocks: { language: string; code: string }[] = []; + const htmlWithPlaceholders = htmlWithoutOtherMacros.replace( + /(.*?)<\/ac:parameter>.*?<\/ac:plain-text-body><\/ac:structured-macro>/g, + (_, language, code) => { + const placeholder = `CODE_BLOCK_${codeBlocks.length}`; + codeBlocks.push({ language, code: code.trim() }); + return `\n${placeholder}\n`; + } + ); + // Convert the HTML content to plain text - const plainTextContent = htmlToText(page.body.storage.value, { + let plainTextContent = htmlToText(htmlWithPlaceholders, { wordwrap: false, - preserveNewlines: false, + preserveNewlines: true, + }); + + // Reinsert code blocks with proper markdown formatting + codeBlocks.forEach(({ language, code }, index) => { + const placeholder = `CODE_BLOCK_${index}`; + plainTextContent = plainTextContent.replace( + placeholder, + `\`\`\`${language}\n${code}\n\`\`\`` + ); }); // Remove empty lines const textWithoutEmptyLines = plainTextContent.replace(/^\s*[\r\n]/gm, ""); - // Generate the URL - const pageUrl = `${this.baseUrl}/spaces/${this.spaceKey}/pages/${page.id}`; - - // Return a langchain document + // Rest of the method remains the same... return new Document({ pageContent: textWithoutEmptyLines, metadata: { @@ -238,7 +263,7 @@ export class ConfluencePagesLoader extends BaseDocumentLoader { status: page.status, title: page.title, type: page.type, - url: pageUrl, + url: `${this.baseUrl}/spaces/${this.spaceKey}/pages/${page.id}`, version: page.version?.number, updated_by: page.version?.by?.displayName, updated_at: page.version?.when, From 14fa21035841be276b80994c800b08fbb9d9581f Mon Sep 17 00:00:00 2001 From: FilipZmijewski Date: Tue, 12 Nov 2024 00:42:07 +0100 Subject: [PATCH 075/100] fix(community): For IBM implementation rename variables, remove defaults, fix tests and minor docs fixes (#7129) Co-authored-by: Jacob Lee --- .../docs/integrations/chat/ibm.ipynb | 103 ++------------- .../docs/integrations/llms/ibm.ipynb | 16 +-- .../integrations/text_embedding/ibm.ipynb | 93 ++++---------- .../src/chat_models/ibm.ts | 111 +++++++--------- .../src/chat_models/tests/ibm.int.test.ts | 86 ++++++------- .../tests/ibm.standard.int.test.ts | 1 + .../chat_models/tests/ibm.standard.test.ts | 1 + .../src/chat_models/tests/ibm.test.ts | 11 +- .../langchain-community/src/embeddings/ibm.ts | 32 +++-- .../src/embeddings/tests/ibm.int.test.ts | 4 + .../src/embeddings/tests/ibm.test.ts | 12 +- libs/langchain-community/src/llms/ibm.ts | 119 +++++++++--------- .../src/llms/tests/ibm.int.test.ts | 59 +++++++-- .../src/llms/tests/ibm.test.ts | 41 +++--- libs/langchain-community/src/types/ibm.ts | 2 +- 15 files changed, 287 insertions(+), 404 deletions(-) diff --git a/docs/core_docs/docs/integrations/chat/ibm.ipynb b/docs/core_docs/docs/integrations/chat/ibm.ipynb index 46cb7bf92d74..c3f60d925f99 100644 --- a/docs/core_docs/docs/integrations/chat/ibm.ipynb +++ b/docs/core_docs/docs/integrations/chat/ibm.ipynb @@ -21,14 +21,15 @@ "source": [ "# IBM watsonx.ai\n", "\n", - "This will help you getting started with IBM watsonx.ai [chat models](/docs/concepts/chat_models). For detailed documentation of all `IBM watsonx.ai` features and configurations head to the [IBM watsonx.ai](https://api.js.langchain.com/classes/_langchain_community.chat_models_ibm.html).\n", + "This will help you getting started with IBM watsonx.ai [chat models](/docs/concepts/chat_models). For detailed documentation of all `IBM watsonx.ai` features and configurations head to the [IBM watsonx.ai](https://api.js.langchain.com/modules/_langchain_community.chat_models_ibm.html).\n", "\n", "## Overview\n", "### Integration details\n", "\n", "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/ibm_watsonx/) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`ChatWatsonx`](https://api.js.langchain.com/classes/_langchain_community.chat_models_ibm.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "| [`ChatWatsonx`](https://api.js.langchain.com/classes/_langchain_community.chat_models_ibm.ChatWatsonx.html) | [@langchain/community](https://www.npmjs.com/package/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", "### Model features\n", "\n", @@ -138,7 +139,7 @@ "\n", "\n", "\n", - " __package_name__ @langchain/core\n", + " @langchain/community @langchain/core\n", "\n", "\n", "```" @@ -340,7 +341,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 1, "id": "cd21e356", "metadata": {}, "outputs": [ @@ -357,97 +358,7 @@ " only\n", " natural\n", " satellite\n", - " and\n", - " the\n", - " fifth\n", - " largest\n", - " satellite\n", - " in\n", - " the\n", - " Solar\n", - " System\n", - ".\n", - " It\n", - " or\n", - "bits\n", - " Earth\n", - " every\n", - " \n", - "2\n", - "7\n", - ".\n", - "3\n", - " days\n", - " and\n", - " rot\n", - "ates\n", - " on\n", - " its\n", - " axis\n", - " in\n", - " the\n", - " same\n", - " amount\n", - " of\n", - " time\n", - ",\n", - " which\n", - " is\n", - " why\n", - " we\n", - " always\n", - " see\n", - " the\n", - " same\n", - " side\n", - " of\n", - " it\n", - ".\n", - " The\n", - " Moon\n", - "'\n", - "s\n", - " phases\n", - " change\n", - " as\n", - " it\n", - " or\n", - "bits\n", - " Earth\n", - ",\n", - " going\n", - " through\n", - " cycles\n", - " of\n", - " new\n", - ",\n", - " c\n", - "res\n", - "cent\n", - ",\n", - " half\n", - ",\n", - " g\n", - "ib\n", - "b\n", - "ous\n", - ",\n", - " and\n", - " full\n", - " phases\n", - ".\n", - " Its\n", - " gravity\n", - " influences\n", - " Earth\n", - "'\n", - "s\n", - " t\n", - "ides\n", - " and\n", - " stabil\n", - "izes\n", - " our\n" + " and\n" ] } ], @@ -589,4 +500,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/core_docs/docs/integrations/llms/ibm.ipynb b/docs/core_docs/docs/integrations/llms/ibm.ipynb index 1644f7401724..7c47f382e59e 100644 --- a/docs/core_docs/docs/integrations/llms/ibm.ipynb +++ b/docs/core_docs/docs/integrations/llms/ibm.ipynb @@ -22,7 +22,7 @@ "# IBM watsonx.ai\n", "\n", "\n", - "This will help you get started with IBM [text completion models (LLMs)](/docs/concepts/text_llms) using LangChain. For detailed documentation on `IBM watsonx.ai` features and configuration options, please refer to the [IBM watsonx.ai](https://api.js.langchain.com/classes/_langchain_community.llms_ibm.html).\n", + "This will help you get started with IBM [text completion models (LLMs)](/docs/concepts/text_llms) using LangChain. For detailed documentation on `IBM watsonx.ai` features and configuration options, please refer to the [IBM watsonx.ai](https://api.js.langchain.com/modules/_langchain_community.llms_ibm.html).\n", "\n", "## Overview\n", "### Integration details\n", @@ -30,7 +30,7 @@ "\n", "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/ibm_watsonx/) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [`IBM watsonx.ai`](https://api.js.langchain.com/modules/_langchain_community.llms_ibm.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "| [`WatsonxLLM`](https://api.js.langchain.com/classes/_langchain_community.llms_ibm.WatsonxLLM.html) | [@langchain/community](https://www.npmjs.com/package/@langchain/community) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", "\n", "## Setup\n", "\n", @@ -161,11 +161,11 @@ "\n", "const props = {\n", " decoding_method: \"sample\",\n", - " max_new_tokens: 100,\n", - " min_new_tokens: 1,\n", + " maxNewTokens: 100,\n", + " minNewTokens: 1,\n", " temperature: 0.5,\n", - " top_k: 50,\n", - " top_p: 1,\n", + " topK: 50,\n", + " topP: 1,\n", "};\n", "const instance = new WatsonxLLM({\n", " version: \"YYYY-MM-DD\",\n", @@ -298,7 +298,7 @@ "source": [ "const result2 = await instance.invoke(\"Print hello world.\", {\n", " parameters: {\n", - " max_new_tokens: 20,\n", + " maxNewTokens: 100,\n", " },\n", "});\n", "console.log(result2);" @@ -358,4 +358,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb b/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb index dfe43b07c462..bac03b424f48 100644 --- a/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb +++ b/docs/core_docs/docs/integrations/text_embedding/ibm.ipynb @@ -22,7 +22,7 @@ "# IBM watsonx.ai\n", "\n", "\n", - "This will help you get started with IBM watsonx.ai [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `IBM watsonx.ai` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/_langchain_community.embeddings_ibm.html).\n", + "This will help you get started with IBM watsonx.ai [embedding models](/docs/concepts/embedding_models) using LangChain. For detailed documentation on `IBM watsonx.ai` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/modules/_langchain_community.embeddings_ibm.html).\n", "\n", "## Overview\n", "### Integration details\n", @@ -30,7 +30,7 @@ "\n", "| Class | Package | Local | [Py support](https://python.langchain.com/docs/integrations/text_embedding/ibm_watsonx/) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: |\n", - "| [`IBM watsonx.ai`](https://api.js.langchain.com/classes/_langchain_community.embeddings_ibm.WatsonxEmbeddings.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_ibm.html)| ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "| [`WatsonxEmbeddings`](https://api.js.langchain.com/classes/_langchain_community.embeddings_ibm.WatsonxEmbeddings.html) | [@langchain/community](https://www.npmjs.com/package/@langchain/community)| ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", "\n", "## Setup\n", "\n", @@ -163,7 +163,6 @@ " serviceUrl: process.env.API_URL,\n", " projectId: \"\",\n", " spaceId: \"\",\n", - " idOrName: \"\",\n", " model: \"\",\n", "});" ] @@ -175,7 +174,7 @@ "source": [ "Note:\n", "\n", - "- You must provide `spaceId`, `projectId` or `idOrName`(deployment id) in order to proceed.\n", + "- You must provide `spaceId` or `projectId` in order to proceed.\n", "- Depending on the region of your provisioned service instance, use correct serviceUrl." ] }, @@ -243,7 +242,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 1, "id": "0d2befcd", "metadata": {}, "outputs": [ @@ -252,33 +251,18 @@ "output_type": "stream", "text": [ "[\n", - " -0.017436018, -0.01469498, -0.015685871, -0.013543149, -0.0011519607,\n", - " -0.008123747, 0.015286108, -0.023845721, -0.02454774, 0.07235078,\n", - " -0.032333843, -0.0035843418, -0.015389036, 0.0455373, -0.021119863,\n", - " -0.022039745, 0.021746712, -0.017774817, -0.008232582, -0.036727764,\n", - " -0.015734928, 0.03606811, -0.005108186, -0.036052454, 0.024462992,\n", - " 0.02359307, 0.03273164, 0.009195497, -0.0077208397, -0.0127943,\n", - " -0.023869334, -0.029473905, -0.0080457395, -0.0021337876, 0.04949132,\n", - " 0.013950589, -0.010046689, 0.021029025, -0.031725302, 0.004251065,\n", - " -0.034171984, -0.03696642, -0.014253629, -0.017757406, -0.007531065,\n", - " 0.07187789, 0.009661725, 0.041889492, -0.04660478, 0.028036641,\n", - " 0.059334517, -0.04561291, 0.056029715, -0.00676024, 0.026493236,\n", - " 0.0116374, 0.050126843, -0.018036349, -0.013711887, 0.042252757,\n", - " -0.04453391, 0.04705777, -0.00044598224, -0.030227259, 0.029286578,\n", - " 0.0252211, 0.011694125, -0.031404093, 0.02951232, 0.08812359,\n", - " 0.023539362, -0.011082862, 0.008024676, 0.00084492035, -0.007984158,\n", - " -0.0005008702, -0.025189219, 0.021000557, -0.0065513053, 0.036524914,\n", - " 0.0015150858, -0.0042383806, 0.049065087, 0.000941666, 0.04447001,\n", - " 0.012942205, -0.078316726, -0.03004237, -0.025807172, -0.03446275,\n", - " -0.00932942, -0.044925686, 0.03190307, 0.010136769, -0.048854534,\n", - " 0.025738232, -0.017840309, 0.023738133, 0.014214792, 0.030452395\n", + " -0.017436018, -0.01469498,\n", + " -0.015685871, -0.013543149,\n", + " -0.0011519607, -0.008123747,\n", + " 0.015286108, -0.023845721,\n", + " -0.02454774, 0.07235078\n", "]\n" ] } ], "source": [ " const singleVector = await embeddings.embedQuery(text);\n", - " singleVector.slice(0, 100);" + " singleVector.slice(0, 10);" ] }, { @@ -302,48 +286,18 @@ "output_type": "stream", "text": [ "[\n", - " -0.017436024, -0.014695002, -0.01568589, -0.013543164, -0.001151976,\n", - " -0.008123703, 0.015286064, -0.023845702, -0.024547677, 0.07235076,\n", - " -0.032333862, -0.0035843418, -0.015389038, 0.045537304, -0.021119865,\n", - " -0.02203975, 0.021746716, -0.01777481, -0.008232588, -0.03672781,\n", - " -0.015734889, 0.036068108, -0.0051082, -0.036052432, 0.024462998,\n", - " 0.023593083, 0.03273162, 0.009195521, -0.007720828, -0.012794304,\n", - " -0.023869323, -0.029473891, -0.008045726, -0.002133793, 0.049491342,\n", - " 0.013950573, -0.010046691, 0.02102898, -0.03172528, 0.0042510596,\n", - " -0.034171965, -0.036966413, -0.014253668, -0.017757434, -0.007531062,\n", - " 0.07187787, 0.009661732, 0.041889492, -0.04660476, 0.028036654,\n", - " 0.059334517, -0.045612894, 0.056029722, -0.00676024, 0.026493296,\n", - " 0.0116374055, 0.050126873, -0.018036384, -0.013711868, 0.0422528,\n", - " -0.044533912, 0.047057763, -0.00044596897, -0.030227251, 0.029286569,\n", - " 0.025221113, 0.011694138, -0.03140413, 0.029512335, 0.08812357,\n", - " 0.023539348, -0.011082865, 0.008024677, 0.00084490055, -0.007984145,\n", - " -0.0005008745, -0.025189226, 0.021000564, -0.0065513197, 0.036524955,\n", - " 0.0015150585, -0.0042383634, 0.049065102, 0.000941638, 0.044469994,\n", - " 0.012942193, -0.078316696, -0.0300424, -0.025807157, -0.0344627,\n", - " -0.009329439, -0.04492573, 0.031903077, 0.010136808, -0.048854522,\n", - " 0.025738247, -0.01784033, 0.023738142, 0.014214801, 0.030452369\n", + " -0.017436024, -0.014695002,\n", + " -0.01568589, -0.013543164,\n", + " -0.001151976, -0.008123703,\n", + " 0.015286064, -0.023845702,\n", + " -0.024547677, 0.07235076\n", "]\n", "[\n", - " 0.03278884, -0.017893745, -0.0027520044, 0.016506646, 0.028271576,\n", - " -0.01284331, 0.014344065, -0.007968607, -0.03899479, 0.039327156,\n", - " -0.047726233, 0.009559004, -0.05302522, 0.011498492, -0.0055542476,\n", - " -0.0020940166, -0.029262392, -0.025919685, 0.024261741, -0.0010863725,\n", - " 0.0074619935, 0.014191284, -0.009054746, -0.038633537, 0.039744128,\n", - " 0.012625762, 0.030490868, 0.013526139, -0.024638629, -0.011268263,\n", - " -0.012759613, -0.04693565, -0.013087251, -0.01971696, 0.0125782555,\n", - " 0.024156926, -0.011638484, 0.017364893, -0.0405832, -0.0032466082,\n", - " -0.01611277, -0.022583133, 0.019492855, -0.03664484, -0.022627067,\n", - " 0.011026938, -0.014631298, 0.043255687, -0.029447634, 0.017212389,\n", - " 0.029366229, -0.041978795, 0.005347565, -0.0106230285, -0.008334342,\n", - " -0.008841154, 0.045096103, 0.03996879, -0.002039457, -0.0051824683,\n", - " -0.019464444, 0.092018366, -0.009283633, -0.020052811, 0.0043408144,\n", - " -0.029403884, 0.02587689, -0.027253918, 0.0159064, 0.0421537,\n", - " 0.05078811, -0.012380686, -0.018032575, 0.01711449, 0.03636163,\n", - " -0.014590949, -0.015076142, 0.00018201554, 0.002490666, 0.044776678,\n", - " 0.05301749, -0.007891316, 0.028668318, -0.0016632816, 0.04487743,\n", - " -0.032529455, -0.040372133, -0.020566158, -0.011109745, -0.01724949,\n", - " -0.0047519016, -0.041635286, 0.0068111843, 0.039498538, -0.02491227,\n", - " 0.016853934, -0.017926402, -0.006154979, 0.025893573, 0.015262395\n", + " 0.03278884, -0.017893745,\n", + " -0.0027520044, 0.016506646,\n", + " 0.028271576, -0.01284331,\n", + " 0.014344065, -0.007968607,\n", + " -0.03899479, 0.039327156\n", "]\n" ] } @@ -355,9 +309,8 @@ "\n", " const vectors = await embeddings.embedDocuments([text, text2]);\n", " \n", - " console.log(vectors[0].slice(0, 100));\n", - " console.log(vectors[1].slice(0, 100));\n", - " " + " console.log(vectors[0].slice(0, 10));\n", + " console.log(vectors[1].slice(0, 10));\n" ] }, { @@ -386,4 +339,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/libs/langchain-community/src/chat_models/ibm.ts b/libs/langchain-community/src/chat_models/ibm.ts index dd468909a886..d4dc6a64ba28 100644 --- a/libs/langchain-community/src/chat_models/ibm.ts +++ b/libs/langchain-community/src/chat_models/ibm.ts @@ -33,7 +33,6 @@ import { } from "@langchain/core/outputs"; import { AsyncCaller } from "@langchain/core/utils/async_caller"; import { - TextChatConstants, TextChatMessagesTextChatMessageAssistant, TextChatParameterTools, TextChatParams, @@ -42,7 +41,6 @@ import { TextChatResultChoice, TextChatResultMessage, TextChatToolCall, - TextChatToolChoiceTool, TextChatUsage, } from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; import { WatsonXAI } from "@ibm-cloud/watsonx-ai"; @@ -81,30 +79,8 @@ export interface WatsonxDeltaStream { } export interface WatsonxCallParams - extends Partial< - Omit< - TextChatParams, - | "toolChoiceOption" - | "toolChoice" - | "frequencyPenalty" - | "topLogprobs" - | "maxTokens" - | "presencePenalty" - | "responseFormat" - | "timeLimit" - | "modelId" - > - > { + extends Partial> { maxRetries?: number; - tool_choice?: TextChatToolChoiceTool; - tool_choice_option?: TextChatConstants.ToolChoiceOption | string; - frequency_penalty?: number; - top_logprobs?: number; - max_new_tokens?: number; - presence_penalty?: number; - top_p?: number; - time_limit?: number; - response_format?: TextChatResponseFormat; } export interface WatsonxCallOptionsChat extends Omit, @@ -114,12 +90,15 @@ export interface WatsonxCallOptionsChat type ChatWatsonxToolType = BindToolsInput | TextChatParameterTools; -export interface ChatWatsonxInput extends BaseChatModelParams, WatsonxParams { +export interface ChatWatsonxInput + extends BaseChatModelParams, + WatsonxParams, + WatsonxCallParams { streaming?: boolean; } -function _convertToValidToolId(modelId: string, tool_call_id: string) { - if (modelId.startsWith("mistralai")) +function _convertToValidToolId(model: string, tool_call_id: string) { + if (model.startsWith("mistralai")) return _convertToolCallIdToMistralCompatible(tool_call_id); else return tool_call_id; } @@ -144,7 +123,7 @@ function _convertToolToWatsonxTool( function _convertMessagesToWatsonxMessages( messages: BaseMessage[], - modelId: string + model: string ): TextChatResultMessage[] { const getRole = (role: MessageType) => { switch (role) { @@ -168,7 +147,7 @@ function _convertMessagesToWatsonxMessages( return message.tool_calls .map((toolCall) => ({ ...toolCall, - id: _convertToValidToolId(modelId, toolCall.id ?? ""), + id: _convertToValidToolId(model, toolCall.id ?? ""), })) .map(convertLangChainToolCallToOpenAI) as TextChatToolCall[]; } @@ -183,7 +162,7 @@ function _convertMessagesToWatsonxMessages( role: getRole(message._getType()), content, name: message.name, - tool_call_id: _convertToValidToolId(modelId, message.tool_call_id), + tool_call_id: _convertToValidToolId(model, message.tool_call_id), }; } @@ -246,7 +225,7 @@ function _watsonxResponseToChatMessage( function _convertDeltaToMessageChunk( delta: WatsonxDeltaStream, rawData: TextChatResponse, - modelId: string, + model: string, usage?: TextChatUsage, defaultRole?: TextChatMessagesTextChatMessageAssistant.Constants.Role ) { @@ -262,7 +241,7 @@ function _convertDeltaToMessageChunk( } => ({ ...toolCall, index, - id: _convertToValidToolId(modelId, toolCall.id), + id: _convertToValidToolId(model, toolCall.id), type: "function", }) ) @@ -315,7 +294,7 @@ function _convertDeltaToMessageChunk( return new ToolMessageChunk({ content, additional_kwargs, - tool_call_id: _convertToValidToolId(modelId, rawToolCalls?.[0].id), + tool_call_id: _convertToValidToolId(model, rawToolCalls?.[0].id), }); } else if (role === "function") { return new FunctionMessageChunk({ @@ -379,11 +358,11 @@ export class ChatWatsonx< }; } - model = "mistralai/mistral-large"; + model: string; version = "2024-05-31"; - max_new_tokens = 100; + maxTokens: number; maxRetries = 0; @@ -393,35 +372,31 @@ export class ChatWatsonx< projectId?: string; - frequency_penalty?: number; + frequencyPenalty?: number; logprobs?: boolean; - top_logprobs?: number; + topLogprobs?: number; n?: number; - presence_penalty?: number; + presencePenalty?: number; temperature?: number; - top_p?: number; + topP?: number; - time_limit?: number; + timeLimit?: number; maxConcurrency?: number; service: WatsonXAI; - response_format?: TextChatResponseFormat | string; + responseFormat?: TextChatResponseFormat; streaming: boolean; - constructor( - fields: ChatWatsonxInput & - WatsonxAuth & - Partial> - ) { + constructor(fields: ChatWatsonxInput & WatsonxAuth) { super(fields); if ( (fields.projectId && fields.spaceId) || @@ -432,20 +407,20 @@ export class ChatWatsonx< if (!fields.projectId && !fields.spaceId && !fields.idOrName) throw new Error( - "No id specified! At least ide of 1 type has to be specified" + "No id specified! At least id of 1 type has to be specified" ); this.projectId = fields?.projectId; this.spaceId = fields?.spaceId; this.temperature = fields?.temperature; this.maxRetries = fields?.maxRetries || this.maxRetries; this.maxConcurrency = fields?.maxConcurrency; - this.frequency_penalty = fields?.frequency_penalty; - this.top_logprobs = fields?.top_logprobs; - this.max_new_tokens = fields?.max_new_tokens ?? this.max_new_tokens; - this.presence_penalty = fields?.presence_penalty; - this.top_p = fields?.top_p; - this.time_limit = fields?.time_limit; - this.response_format = fields?.response_format ?? this.response_format; + this.frequencyPenalty = fields?.frequencyPenalty; + this.topLogprobs = fields?.topLogprobs; + this.maxTokens = fields?.maxTokens ?? this.maxTokens; + this.presencePenalty = fields?.presencePenalty; + this.topP = fields?.topP; + this.timeLimit = fields?.timeLimit; + this.responseFormat = fields?.responseFormat ?? this.responseFormat; this.serviceUrl = fields?.serviceUrl; this.streaming = fields?.streaming ?? this.streaming; this.n = fields?.n ?? this.n; @@ -483,21 +458,21 @@ export class ChatWatsonx< invocationParams(options: this["ParsedCallOptions"]) { return { - maxTokens: options.max_new_tokens ?? this.max_new_tokens, + maxTokens: options.maxTokens ?? this.maxTokens, temperature: options?.temperature ?? this.temperature, - timeLimit: options?.time_limit ?? this.time_limit, - topP: options?.top_p ?? this.top_p, - presencePenalty: options?.presence_penalty ?? this.presence_penalty, + timeLimit: options?.timeLimit ?? this.timeLimit, + topP: options?.topP ?? this.topP, + presencePenalty: options?.presencePenalty ?? this.presencePenalty, n: options?.n ?? this.n, - topLogprobs: options?.top_logprobs ?? this.top_logprobs, + topLogprobs: options?.topLogprobs ?? this.topLogprobs, logprobs: options?.logprobs ?? this?.logprobs, - frequencyPenalty: options?.frequency_penalty ?? this.frequency_penalty, + frequencyPenalty: options?.frequencyPenalty ?? this.frequencyPenalty, tools: options.tools ? _convertToolToWatsonxTool(options.tools) : undefined, - toolChoice: options.tool_choice, - responseFormat: options.response_format, - toolChoiceOption: options.tool_choice_option, + toolChoice: options.toolChoice, + responseFormat: options.responseFormat, + toolChoiceOption: options.toolChoiceOption, }; } @@ -556,7 +531,7 @@ export class ChatWatsonx< if (message?.usage_metadata) { const completion = chunk.generationInfo?.completion; if (tokenUsages[completion]) - tokenUsages[completion].output_tokens += + tokenUsages[completion].output_tokens = message.usage_metadata.output_tokens; else tokenUsages[completion] = message.usage_metadata; } @@ -759,7 +734,7 @@ export class ChatWatsonx< let llm: Runnable; if (method === "jsonMode") { const options = { - response_format: { type: "json_object" }, + responseFormat: { type: "json_object" }, } as Partial; llm = this.bind(options); @@ -783,7 +758,7 @@ export class ChatWatsonx< }, ], // Ideally that would be set to required but this is not supported yet - tool_choice: { + toolChoice: { type: "function", function: { name: functionName, @@ -819,7 +794,7 @@ export class ChatWatsonx< }, ], // Ideally that would be set to required but this is not supported yet - tool_choice: { + toolChoice: { type: "function", function: { name: functionName, diff --git a/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts b/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts index 85f33d733e6d..2f1d118d92a4 100644 --- a/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/ibm.int.test.ts @@ -12,15 +12,13 @@ import { LLMResult } from "@langchain/core/outputs"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { tool } from "@langchain/core/tools"; import { NewTokenIndices } from "@langchain/core/callbacks/base"; -import * as fs from "node:fs/promises"; -import { fileURLToPath } from "node:url"; -import * as path from "node:path"; import { ChatWatsonx } from "../ibm.js"; describe("Tests for chat", () => { describe("Test ChatWatsonx invoke and generate", () => { test("Basic invoke", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -30,6 +28,7 @@ describe("Tests for chat", () => { }); test("Basic generate", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -40,6 +39,7 @@ describe("Tests for chat", () => { }); test("Invoke with system message", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -53,6 +53,7 @@ describe("Tests for chat", () => { }); test("Invoke with output parser", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -68,6 +69,7 @@ describe("Tests for chat", () => { }); test("Invoke with prompt", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -83,6 +85,7 @@ describe("Tests for chat", () => { }); test("Invoke with chat conversation", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -104,6 +107,7 @@ describe("Tests for chat", () => { totalTokens: 0, }; const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -120,6 +124,7 @@ describe("Tests for chat", () => { }); test("Timeout", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -132,6 +137,7 @@ describe("Tests for chat", () => { }, 5000); test("Controller options", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -150,6 +156,7 @@ describe("Tests for chat", () => { describe("Test ChatWatsonx invoke and generate with stream mode", () => { test("Basic invoke", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -160,6 +167,7 @@ describe("Tests for chat", () => { }); test("Basic generate", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -170,6 +178,7 @@ describe("Tests for chat", () => { }); test("Generate with n>1", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -198,11 +207,12 @@ describe("Tests for chat", () => { ]; let tokenUsed = 0; const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", n: 2, - max_new_tokens: 5, + maxTokens: 5, streaming: true, callbackManager: CallbackManager.fromHandlers({ async handleLLMEnd(output: LLMResult) { @@ -236,6 +246,7 @@ describe("Tests for chat", () => { }); test("Invoke with system message", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -249,6 +260,7 @@ describe("Tests for chat", () => { }); test("Invoke with output parser", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -264,6 +276,7 @@ describe("Tests for chat", () => { }); test("Invoke with prompt", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -279,6 +292,7 @@ describe("Tests for chat", () => { }); test("Invoke with chat conversation", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -300,6 +314,7 @@ describe("Tests for chat", () => { totalTokens: 0, }; const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -316,6 +331,7 @@ describe("Tests for chat", () => { }); test("Timeout", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -328,6 +344,7 @@ describe("Tests for chat", () => { }, 5000); test("Controller options", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -346,6 +363,7 @@ describe("Tests for chat", () => { describe("Test ChatWatsonx stream", () => { test("Basic stream", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -366,6 +384,7 @@ describe("Tests for chat", () => { }); test("Timeout", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -378,6 +397,7 @@ describe("Tests for chat", () => { }, 5000); test("Controller options", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -399,6 +419,7 @@ describe("Tests for chat", () => { test("Token count and response equality", async () => { let generation = ""; const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -426,21 +447,24 @@ describe("Tests for chat", () => { }); test("Token count usage_metadata", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", }); let res: AIMessageChunk | null = null; + let outputCount = 0; const stream = await service.stream("Why is the sky blue? Be concise."); for await (const chunk of stream) { res = chunk; + outputCount += 1; } expect(res?.usage_metadata).toBeDefined(); if (!res?.usage_metadata) { return; } expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); - expect(res.usage_metadata.output_tokens).toBe(1); + expect(res.usage_metadata.output_tokens).toBe(outputCount); expect(res.usage_metadata.total_tokens).toBe( res.usage_metadata.input_tokens + res.usage_metadata.output_tokens ); @@ -450,6 +474,7 @@ describe("Tests for chat", () => { describe("Test tool usage", () => { test("Passing tool to chat model", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -503,6 +528,7 @@ describe("Tests for chat", () => { }); test("Passing tool to chat model extended", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -563,6 +589,7 @@ describe("Tests for chat", () => { }); test("Binding model-specific formats", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -603,6 +630,7 @@ describe("Tests for chat", () => { }); test("Passing tool to chat model", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -655,6 +683,7 @@ describe("Tests for chat", () => { describe("Test withStructuredOutput usage", () => { test("Schema with zod", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -677,6 +706,7 @@ describe("Tests for chat", () => { test("Schema with zod and stream", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -703,6 +733,7 @@ describe("Tests for chat", () => { }); test("Schema with object", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -729,6 +760,7 @@ describe("Tests for chat", () => { }); test("Schema with rawOutput", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -761,6 +793,7 @@ describe("Tests for chat", () => { }); test("Schema with zod and JSON mode", async () => { const service = new ChatWatsonx({ + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", @@ -798,47 +831,4 @@ describe("Tests for chat", () => { expect(typeof result.number2).toBe("number"); }); }); - - describe("Test image input", () => { - test("Image input", async () => { - const service = new ChatWatsonx({ - version: "2024-05-31", - serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", - model: "meta-llama/llama-3-2-11b-vision-instruct", - projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", - max_new_tokens: 100, - }); - const __filename = fileURLToPath(import.meta.url); - const __dirname = path.dirname(__filename); - const encodedString = await fs.readFile( - path.join(__dirname, "/data/hotdog.jpg") - ); - const question = "What is on the picture"; - const messages = [ - { - role: "user", - content: [ - { - type: "text", - text: question, - }, - { - type: "image_url", - image_url: { - url: - "data:image/jpeg;base64," + encodedString.toString("base64"), - }, - }, - ], - }, - ]; - const res = await service.stream(messages); - const chunks = []; - for await (const chunk of res) { - expect(chunk).toBeInstanceOf(AIMessageChunk); - chunks.push(chunk.content); - } - expect(typeof chunks.join("")).toBe("string"); - }); - }); }); diff --git a/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts b/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts index 03b8eb4b3351..545ed3c06fa9 100644 --- a/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/ibm.standard.int.test.ts @@ -26,6 +26,7 @@ class ChatWatsonxStandardIntegrationTests extends ChatModelIntegrationTests< chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: { + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", projectId: process.env.WATSONX_AI_PROJECT_ID ?? "testString", diff --git a/libs/langchain-community/src/chat_models/tests/ibm.standard.test.ts b/libs/langchain-community/src/chat_models/tests/ibm.standard.test.ts index 6c7ab7d5576a..da9a624209c9 100644 --- a/libs/langchain-community/src/chat_models/tests/ibm.standard.test.ts +++ b/libs/langchain-community/src/chat_models/tests/ibm.standard.test.ts @@ -24,6 +24,7 @@ class ChatWatsonxStandardTests extends ChatModelUnitTests< chatModelHasToolCalling: true, chatModelHasStructuredOutput: true, constructorArgs: { + model: "mistralai/mistral-large", watsonxAIApikey: "testString", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL ?? "testString", diff --git a/libs/langchain-community/src/chat_models/tests/ibm.test.ts b/libs/langchain-community/src/chat_models/tests/ibm.test.ts index 8e04c1c26c6b..f52a689f6755 100644 --- a/libs/langchain-community/src/chat_models/tests/ibm.test.ts +++ b/libs/langchain-community/src/chat_models/tests/ibm.test.ts @@ -52,6 +52,7 @@ describe("LLM unit tests", () => { test("Test basic properties after init", async () => { const testProps = { + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", @@ -63,6 +64,7 @@ describe("LLM unit tests", () => { test("Test methods after init", () => { const testProps: ChatWatsonxInput = { + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", @@ -83,10 +85,10 @@ describe("LLM unit tests", () => { serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", model: "ibm/granite-13b-chat-v2", - max_new_tokens: 100, + maxTokens: 100, temperature: 0.1, - time_limit: 10000, - top_p: 1, + timeLimit: 10000, + topP: 1, maxRetries: 3, maxConcurrency: 3, }; @@ -99,6 +101,7 @@ describe("LLM unit tests", () => { describe("Negative tests", () => { test("Missing id", async () => { const testProps: ChatWatsonxInput = { + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, }; @@ -149,6 +152,7 @@ describe("LLM unit tests", () => { test("Passing more than one id", async () => { const testProps: ChatWatsonxInput = { + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", @@ -165,6 +169,7 @@ describe("LLM unit tests", () => { test("Not existing property passed", async () => { const testProps = { + model: "mistralai/mistral-large", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", diff --git a/libs/langchain-community/src/embeddings/ibm.ts b/libs/langchain-community/src/embeddings/ibm.ts index fc8fcaebb561..9ee0f39976f9 100644 --- a/libs/langchain-community/src/embeddings/ibm.ts +++ b/libs/langchain-community/src/embeddings/ibm.ts @@ -9,14 +9,20 @@ import { WatsonxAuth, WatsonxParams } from "../types/ibm.js"; import { authenticateAndSetInstance } from "../utils/ibm.js"; export interface WatsonxEmbeddingsParams - extends Omit, - Pick {} + extends Pick { + truncateInputTokens?: number; +} + +export interface WatsonxInputEmbeddings + extends Omit { + truncateInputTokens?: number; +} export class WatsonxEmbeddings extends Embeddings implements WatsonxEmbeddingsParams, WatsonxParams { - model = "ibm/slate-125m-english-rtrvr"; + model: string; serviceUrl: string; @@ -26,7 +32,7 @@ export class WatsonxEmbeddings projectId?: string; - truncate_input_tokens?: number; + truncateInputTokens?: number; maxRetries?: number; @@ -34,18 +40,18 @@ export class WatsonxEmbeddings private service: WatsonXAI; - constructor(fields: WatsonxEmbeddingsParams & WatsonxAuth & WatsonxParams) { + constructor(fields: WatsonxInputEmbeddings & WatsonxAuth) { const superProps = { maxConcurrency: 2, ...fields }; super(superProps); - this.model = fields?.model ? fields.model : this.model; + this.model = fields.model; this.version = fields.version; this.serviceUrl = fields.serviceUrl; - this.truncate_input_tokens = fields.truncate_input_tokens; + this.truncateInputTokens = fields.truncateInputTokens; this.maxConcurrency = fields.maxConcurrency; - this.maxRetries = fields.maxRetries; + this.maxRetries = fields.maxRetries ?? 0; if (fields.projectId && fields.spaceId) throw new Error("Maximum 1 id type can be specified per instance"); - else if (!fields.projectId && !fields.spaceId && !fields.idOrName) + else if (!fields.projectId && !fields.spaceId) throw new Error( "No id specified! At least id of 1 type has to be specified" ); @@ -77,13 +83,14 @@ export class WatsonxEmbeddings } scopeId() { - if (this.projectId) return { projectId: this.projectId }; - else return { spaceId: this.spaceId }; + if (this.projectId) + return { projectId: this.projectId, modelId: this.model }; + else return { spaceId: this.spaceId, modelId: this.model }; } invocationParams(): EmbeddingParameters { return { - truncate_input_tokens: this.truncate_input_tokens, + truncate_input_tokens: this.truncateInputTokens, }; } @@ -104,7 +111,6 @@ export class WatsonxEmbeddings private async embedSingleText(inputs: string[]) { const textEmbeddingParams: TextEmbeddingsParams = { inputs, - modelId: this.model, ...this.scopeId(), parameters: this.invocationParams(), }; diff --git a/libs/langchain-community/src/embeddings/tests/ibm.int.test.ts b/libs/langchain-community/src/embeddings/tests/ibm.int.test.ts index 9361a7915213..a774181d4b91 100644 --- a/libs/langchain-community/src/embeddings/tests/ibm.int.test.ts +++ b/libs/langchain-community/src/embeddings/tests/ibm.int.test.ts @@ -5,6 +5,7 @@ import { WatsonxEmbeddings } from "../ibm.js"; describe("Test embeddings", () => { test("embedQuery method", async () => { const embeddings = new WatsonxEmbeddings({ + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, @@ -15,6 +16,7 @@ describe("Test embeddings", () => { test("embedDocuments", async () => { const embeddings = new WatsonxEmbeddings({ + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, @@ -27,6 +29,7 @@ describe("Test embeddings", () => { test("Concurrency", async () => { const embeddings = new WatsonxEmbeddings({ + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, @@ -50,6 +53,7 @@ describe("Test embeddings", () => { test("List models", async () => { const embeddings = new WatsonxEmbeddings({ + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, diff --git a/libs/langchain-community/src/embeddings/tests/ibm.test.ts b/libs/langchain-community/src/embeddings/tests/ibm.test.ts index 05f033f6f1af..ad4196b3387d 100644 --- a/libs/langchain-community/src/embeddings/tests/ibm.test.ts +++ b/libs/langchain-community/src/embeddings/tests/ibm.test.ts @@ -1,7 +1,7 @@ /* eslint-disable no-process-env */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { testProperties } from "../../llms/tests/ibm.test.js"; -import { WatsonxEmbeddings } from "../ibm.js"; +import { WatsonxEmbeddings, WatsonxInputEmbeddings } from "../ibm.js"; const fakeAuthProp = { watsonxAIAuthType: "iam", @@ -11,6 +11,7 @@ describe("Embeddings unit tests", () => { describe("Positive tests", () => { test("Basic properties", () => { const testProps = { + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", @@ -20,14 +21,14 @@ describe("Embeddings unit tests", () => { }); test("Basic properties", () => { - const testProps = { + const testProps: WatsonxInputEmbeddings = { + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", - truncate_input_tokens: 10, + truncateInputTokens: 10, maxConcurrency: 2, maxRetries: 2, - model: "ibm/slate-125m-english-rtrvr", }; const instance = new WatsonxEmbeddings({ ...testProps, ...fakeAuthProp }); @@ -38,6 +39,7 @@ describe("Embeddings unit tests", () => { describe("Negative tests", () => { test("Missing id", async () => { const testProps = { + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, }; @@ -85,6 +87,7 @@ describe("Embeddings unit tests", () => { test("Passing more than one id", async () => { const testProps = { + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", @@ -101,6 +104,7 @@ describe("Embeddings unit tests", () => { test("Invalid properties", () => { const testProps = { + model: "ibm/slate-125m-english-rtrvr", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", diff --git a/libs/langchain-community/src/llms/ibm.ts b/libs/langchain-community/src/llms/ibm.ts index 302275158d9c..a0e8a292f0bf 100644 --- a/libs/langchain-community/src/llms/ibm.ts +++ b/libs/langchain-community/src/llms/ibm.ts @@ -3,12 +3,8 @@ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { BaseLLM, BaseLLMParams } from "@langchain/core/language_models/llms"; import { WatsonXAI } from "@ibm-cloud/watsonx-ai"; import { - DeploymentsTextGenerationParams, - DeploymentsTextGenerationStreamParams, DeploymentTextGenProperties, ReturnOptionProperties, - TextGenerationParams, - TextGenerationStreamParams, TextGenLengthPenalty, TextGenParameters, TextTokenizationParams, @@ -34,25 +30,28 @@ import { * Input to LLM class. */ -export interface WatsonxCallOptionsLLM - extends BaseLanguageModelCallOptions, - Omit< - Partial< - TextGenerationParams & - TextGenerationStreamParams & - DeploymentsTextGenerationParams & - DeploymentsTextGenerationStreamParams - >, - "input" - > { +export interface WatsonxCallOptionsLLM extends BaseLanguageModelCallOptions { maxRetries?: number; + parameters?: Partial; + idOrName?: string; } -export interface WatsonxInputLLM - extends TextGenParameters, - WatsonxParams, - BaseLLMParams { +export interface WatsonxInputLLM extends WatsonxParams, BaseLLMParams { streaming?: boolean; + maxNewTokens?: number; + decodingMethod?: TextGenParameters.Constants.DecodingMethod | string; + lengthPenalty?: TextGenLengthPenalty; + minNewTokens?: number; + randomSeed?: number; + stopSequence?: string[]; + temperature?: number; + timeLimit?: number; + topK?: number; + topP?: number; + repetitionPenalty?: number; + truncateInpuTokens?: number; + returnOptions?: ReturnOptionProperties; + includeStopSequence?: boolean; } /** @@ -73,7 +72,7 @@ export class WatsonxLLM< streaming = false; - model = "ibm/granite-13b-chat-v2"; + model: string; maxRetries = 0; @@ -81,7 +80,7 @@ export class WatsonxLLM< serviceUrl: string; - max_new_tokens?: number; + maxNewTokens?: number; spaceId?: string; @@ -89,31 +88,31 @@ export class WatsonxLLM< idOrName?: string; - decoding_method?: TextGenParameters.Constants.DecodingMethod | string; + decodingMethod?: TextGenParameters.Constants.DecodingMethod | string; - length_penalty?: TextGenLengthPenalty; + lengthPenalty?: TextGenLengthPenalty; - min_new_tokens?: number; + minNewTokens?: number; - random_seed?: number; + randomSeed?: number; - stop_sequences?: string[]; + stopSequence?: string[]; temperature?: number; - time_limit?: number; + timeLimit?: number; - top_k?: number; + topK?: number; - top_p?: number; + topP?: number; - repetition_penalty?: number; + repetitionPenalty?: number; - truncate_input_tokens?: number; + truncateInpuTokens?: number; - return_options?: ReturnOptionProperties; + returnOptions?: ReturnOptionProperties; - include_stop_sequence?: boolean; + includeStopSequence?: boolean; maxConcurrency?: number; @@ -123,21 +122,21 @@ export class WatsonxLLM< super(fields); this.model = fields.model ?? this.model; this.version = fields.version; - this.max_new_tokens = fields.max_new_tokens ?? this.max_new_tokens; + this.maxNewTokens = fields.maxNewTokens ?? this.maxNewTokens; this.serviceUrl = fields.serviceUrl; - this.decoding_method = fields.decoding_method; - this.length_penalty = fields.length_penalty; - this.min_new_tokens = fields.min_new_tokens; - this.random_seed = fields.random_seed; - this.stop_sequences = fields.stop_sequences; + this.decodingMethod = fields.decodingMethod; + this.lengthPenalty = fields.lengthPenalty; + this.minNewTokens = fields.minNewTokens; + this.randomSeed = fields.randomSeed; + this.stopSequence = fields.stopSequence; this.temperature = fields.temperature; - this.time_limit = fields.time_limit; - this.top_k = fields.top_k; - this.top_p = fields.top_p; - this.repetition_penalty = fields.repetition_penalty; - this.truncate_input_tokens = fields.truncate_input_tokens; - this.return_options = fields.return_options; - this.include_stop_sequence = fields.include_stop_sequence; + this.timeLimit = fields.timeLimit; + this.topK = fields.topK; + this.topP = fields.topP; + this.repetitionPenalty = fields.repetitionPenalty; + this.truncateInpuTokens = fields.truncateInpuTokens; + this.returnOptions = fields.returnOptions; + this.includeStopSequence = fields.includeStopSequence; this.maxRetries = fields.maxRetries || this.maxRetries; this.maxConcurrency = fields.maxConcurrency; this.streaming = fields.streaming || this.streaming; @@ -150,7 +149,7 @@ export class WatsonxLLM< if (!fields.projectId && !fields.spaceId && !fields.idOrName) throw new Error( - "No id specified! At least ide of 1 type has to be specified" + "No id specified! At least id of 1 type has to be specified" ); this.projectId = fields?.projectId; this.spaceId = fields?.spaceId; @@ -216,23 +215,23 @@ export class WatsonxLLM< const { parameters } = options; return { - max_new_tokens: parameters?.max_new_tokens ?? this.max_new_tokens, - decoding_method: parameters?.decoding_method ?? this.decoding_method, - length_penalty: parameters?.length_penalty ?? this.length_penalty, - min_new_tokens: parameters?.min_new_tokens ?? this.min_new_tokens, - random_seed: parameters?.random_seed ?? this.random_seed, - stop_sequences: options?.stop ?? this.stop_sequences, + max_new_tokens: parameters?.maxNewTokens ?? this.maxNewTokens, + decoding_method: parameters?.decodingMethod ?? this.decodingMethod, + length_penalty: parameters?.lengthPenalty ?? this.lengthPenalty, + min_new_tokens: parameters?.minNewTokens ?? this.minNewTokens, + random_seed: parameters?.randomSeed ?? this.randomSeed, + stop_sequences: options?.stop ?? this.stopSequence, temperature: parameters?.temperature ?? this.temperature, - time_limit: parameters?.time_limit ?? this.time_limit, - top_k: parameters?.top_k ?? this.top_k, - top_p: parameters?.top_p ?? this.top_p, + time_limit: parameters?.timeLimit ?? this.timeLimit, + top_k: parameters?.topK ?? this.topK, + top_p: parameters?.topP ?? this.topP, repetition_penalty: - parameters?.repetition_penalty ?? this.repetition_penalty, + parameters?.repetitionPenalty ?? this.repetitionPenalty, truncate_input_tokens: - parameters?.truncate_input_tokens ?? this.truncate_input_tokens, - return_options: parameters?.return_options ?? this.return_options, + parameters?.truncateInpuTokens ?? this.truncateInpuTokens, + return_options: parameters?.returnOptions ?? this.returnOptions, include_stop_sequence: - parameters?.include_stop_sequence ?? this.include_stop_sequence, + parameters?.includeStopSequence ?? this.includeStopSequence, }; } diff --git a/libs/langchain-community/src/llms/tests/ibm.int.test.ts b/libs/langchain-community/src/llms/tests/ibm.int.test.ts index 236fd4950be8..dfeebedd39e2 100644 --- a/libs/langchain-community/src/llms/tests/ibm.int.test.ts +++ b/libs/langchain-community/src/llms/tests/ibm.int.test.ts @@ -11,6 +11,7 @@ describe("Text generation", () => { describe("Test invoke method", () => { test("Correct value", async () => { const watsonXInstance = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, @@ -18,8 +19,21 @@ describe("Text generation", () => { await watsonXInstance.invoke("Hello world?"); }); + test("Overwritte params", async () => { + const watsonXInstance = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + }); + await watsonXInstance.invoke("Hello world?", { + parameters: { maxNewTokens: 10 }, + }); + }); + test("Invalid projectId", async () => { const watsonXInstance = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: "Test wrong value", @@ -29,6 +43,7 @@ describe("Text generation", () => { test("Invalid credentials", async () => { const watsonXInstance = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: "Test wrong value", @@ -41,6 +56,7 @@ describe("Text generation", () => { test("Wrong value", async () => { const watsonXInstance = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, @@ -51,6 +67,7 @@ describe("Text generation", () => { test("Stop", async () => { const watsonXInstance = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, @@ -62,10 +79,11 @@ describe("Text generation", () => { test("Stop with timeout", async () => { const watsonXInstance = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: "sdadasdas" as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 5, + maxNewTokens: 5, maxRetries: 3, }); @@ -76,10 +94,11 @@ describe("Text generation", () => { test("Signal in call options", async () => { const watsonXInstance = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 5, + maxNewTokens: 5, maxRetries: 3, }); const controllerNoAbortion = new AbortController(); @@ -100,6 +119,7 @@ describe("Text generation", () => { test("Concurenccy", async () => { const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", maxConcurrency: 1, version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, @@ -119,9 +139,10 @@ describe("Text generation", () => { input_token_count: 0, }; const model = new WatsonxLLM({ - maxConcurrency: 1, + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", - max_new_tokens: 1, + maxNewTokens: 1, + maxConcurrency: 1, serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, callbacks: CallbackManager.fromHandlers({ @@ -150,10 +171,12 @@ describe("Text generation", () => { let streamedText = ""; let usedTokens = 0; const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", + maxConcurrency: 1, version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 5, + maxNewTokens: 5, streaming: true, callbacks: CallbackManager.fromHandlers({ @@ -176,10 +199,11 @@ describe("Text generation", () => { describe("Test generate methods", () => { test("Basic usage", async () => { const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 5, + maxNewTokens: 5, }); const res = await model.generate([ "Print hello world!", @@ -190,10 +214,11 @@ describe("Text generation", () => { test("Stop", async () => { const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 100, + maxNewTokens: 100, }); const res = await model.generate( @@ -215,10 +240,11 @@ describe("Text generation", () => { const nrNewTokens = [0, 0, 0]; const completions = ["", "", ""]; const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 5, + maxNewTokens: 5, streaming: true, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string, idx) { @@ -245,10 +271,11 @@ describe("Text generation", () => { test("Prompt value", async () => { const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 5, + maxNewTokens: 5, }); const res = await model.generatePrompt([ new StringPromptValue("Print hello world!"), @@ -264,10 +291,11 @@ describe("Text generation", () => { let countedTokens = 0; let streamedText = ""; const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 100, + maxNewTokens: 100, callbacks: CallbackManager.fromHandlers({ async handleLLMNewToken(token: string) { countedTokens += 1; @@ -286,10 +314,11 @@ describe("Text generation", () => { test("Stop", async () => { const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 100, + maxNewTokens: 100, }); const stream = await model.stream("Print hello world!", { @@ -304,10 +333,11 @@ describe("Text generation", () => { test("Timeout", async () => { const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 1000, + maxNewTokens: 1000, }); await expect(async () => { const stream = await model.stream( @@ -325,10 +355,11 @@ describe("Text generation", () => { test("Signal in call options", async () => { const model = new WatsonxLLM({ + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, - max_new_tokens: 1000, + maxNewTokens: 1000, }); const controller = new AbortController(); await expect(async () => { @@ -354,6 +385,7 @@ describe("Text generation", () => { describe("Test getNumToken method", () => { test("Passing correct value", async () => { const testProps: WatsonxInputLLM = { + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, @@ -371,6 +403,7 @@ describe("Text generation", () => { test("Passing wrong value", async () => { const testProps: WatsonxInputLLM = { + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID, diff --git a/libs/langchain-community/src/llms/tests/ibm.test.ts b/libs/langchain-community/src/llms/tests/ibm.test.ts index 7dfaecd6361c..6237cb1d14c1 100644 --- a/libs/langchain-community/src/llms/tests/ibm.test.ts +++ b/libs/langchain-community/src/llms/tests/ibm.test.ts @@ -3,10 +3,7 @@ import WatsonxAiMlVml_v1 from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; import { WatsonxLLM, WatsonxInputLLM } from "../ibm.js"; import { authenticateAndSetInstance } from "../../utils/ibm.js"; -import { - WatsonxEmbeddings, - WatsonxEmbeddingsParams, -} from "../../embeddings/ibm.js"; +import { WatsonxEmbeddings } from "../../embeddings/ibm.js"; const fakeAuthProp = { watsonxAIAuthType: "iam", @@ -38,7 +35,7 @@ export const testProperties = ( } }); }; - checkProperty(testProps, instance); + checkProperty(testProps, instance); if (notExTestProps) checkProperty(notExTestProps, instance, false); }; @@ -56,6 +53,7 @@ describe("LLM unit tests", () => { test("Test basic properties after init", async () => { const testProps = { + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", @@ -67,6 +65,7 @@ describe("LLM unit tests", () => { test("Test methods after init", () => { const testProps: WatsonxInputLLM = { + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", @@ -82,33 +81,32 @@ describe("LLM unit tests", () => { }); test("Test properties after init", async () => { - const testProps = { + const testProps: WatsonxInputLLM = { version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", model: "ibm/granite-13b-chat-v2", - max_new_tokens: 100, - decoding_method: "sample", - length_penalty: { decay_factor: 1, start_index: 1 }, - min_new_tokens: 10, - random_seed: 1, - stop_sequences: ["hello"], + maxNewTokens: 100, + decodingMethod: "sample", + lengthPenalty: { decay_factor: 1, start_index: 1 }, + minNewTokens: 10, + randomSeed: 1, + stopSequence: ["hello"], temperature: 0.1, - time_limit: 10000, - top_k: 1, - top_p: 1, - repetition_penalty: 1, - truncate_input_tokens: 1, - return_options: { + timeLimit: 10000, + topK: 1, + topP: 1, + repetitionPenalty: 1, + truncateInpuTokens: 1, + returnOptions: { input_text: true, generated_tokens: true, input_tokens: true, token_logprobs: true, token_ranks: true, - top_n_tokens: 2, }, - include_stop_sequence: false, + includeStopSequence: false, maxRetries: 3, maxConcurrency: 3, }; @@ -121,6 +119,7 @@ describe("LLM unit tests", () => { describe("Negative tests", () => { test("Missing id", async () => { const testProps: WatsonxInputLLM = { + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, }; @@ -171,6 +170,7 @@ describe("LLM unit tests", () => { test("Passing more than one id", async () => { const testProps: WatsonxInputLLM = { + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", @@ -187,6 +187,7 @@ describe("LLM unit tests", () => { test("Not existing property passed", async () => { const testProps = { + model: "ibm/granite-13b-chat-v2", version: "2024-05-31", serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", diff --git a/libs/langchain-community/src/types/ibm.ts b/libs/langchain-community/src/types/ibm.ts index cd11592ee48b..ee5db8532036 100644 --- a/libs/langchain-community/src/types/ibm.ts +++ b/libs/langchain-community/src/types/ibm.ts @@ -18,7 +18,7 @@ export interface WatsonxInit { } export interface WatsonxParams extends WatsonxInit { - model?: string; + model: string; spaceId?: string; projectId?: string; idOrName?: string; From a1530dafca0ac1734f9c02382f5825de011aa158 Mon Sep 17 00:00:00 2001 From: Allen Firstenberg Date: Mon, 11 Nov 2024 19:06:33 -0500 Subject: [PATCH 076/100] feat(google-vertexai): Support Non-Google and Model Garden models in Vertex AI - Anthropic integration (#6999) Co-authored-by: jacoblee93 Co-authored-by: bracesproul --- .../integrations/chat/google_vertex_ai.ipynb | 6 +- .../docs/integrations/platforms/google.mdx | 13 +- .../src/chat_models.ts | 121 ++- .../langchain-google-common/src/connection.ts | 414 ++++++---- libs/langchain-google-common/src/llms.ts | 12 +- .../src/tests/chat_models.test.ts | 213 +++++- .../src/tests/data/chat-2-mock.json | 8 - .../src/tests/data/claude-chat-1-mock.json | 18 + .../src/tests/data/claude-chat-1-mock.sse | 267 +++++++ .../src/tests/utils.test.ts | 150 ++-- .../src/types-anthropic.ts | 237 ++++++ libs/langchain-google-common/src/types.ts | 118 ++- .../src/utils/anthropic.ts | 719 ++++++++++++++++++ .../src/utils/common.ts | 26 +- .../src/utils/gemini.ts | 528 +++++++++---- .../src/utils/stream.ts | 201 ++++- libs/langchain-google-gauth/src/auth.ts | 67 +- .../src/tests/chat_models.int.test.ts | 10 +- .../src/tests/chat_models.int.test.ts | 497 +++++++----- .../src/tests/chat_models.int.test.ts | 6 +- 20 files changed, 2957 insertions(+), 674 deletions(-) create mode 100644 libs/langchain-google-common/src/tests/data/claude-chat-1-mock.json create mode 100644 libs/langchain-google-common/src/tests/data/claude-chat-1-mock.sse create mode 100644 libs/langchain-google-common/src/types-anthropic.ts create mode 100644 libs/langchain-google-common/src/utils/anthropic.ts diff --git a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb index 158e71453fcb..d4de68c3f5e2 100644 --- a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb +++ b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb @@ -21,7 +21,9 @@ "source": [ "# ChatVertexAI\n", "\n", - "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc.\n", + "[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc.", + "It also provides some non-Google models such as [Anthropic's Claude](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude).", + "\n", "\n", "This will help you getting started with `ChatVertexAI` [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatVertexAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html).\n", "\n", @@ -279,4 +281,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/core_docs/docs/integrations/platforms/google.mdx b/docs/core_docs/docs/integrations/platforms/google.mdx index 00ff6503538d..8460c654bf98 100644 --- a/docs/core_docs/docs/integrations/platforms/google.mdx +++ b/docs/core_docs/docs/integrations/platforms/google.mdx @@ -10,7 +10,7 @@ Functionality related to [Google Cloud Platform](https://cloud.google.com/) ### Gemini Models -Access Gemini models such as `gemini-pro` and `gemini-pro-vision` through the [`ChatGoogleGenerativeAI`](/docs/integrations/chat/google_generativeai), +Access Gemini models such as `gemini-1.5-pro` and `gemini-1.5-flex` through the [`ChatGoogleGenerativeAI`](/docs/integrations/chat/google_generativeai), or if using VertexAI, via the [`ChatVertexAI`](/docs/integrations/chat/google_vertex_ai) class. import Tabs from "@theme/Tabs"; @@ -153,6 +153,17 @@ Click [here](/docs/integrations/chat/google_vertex_ai) for the `@langchain/googl The value of `image_url` must be a base64 encoded image (e.g., `data:image/png;base64,abcd124`). +### Non-Gemini Models + +See above for setting up authentication through Vertex AI to use these models. + +[Anthropic](/docs/integrations/chat/anthropic) Claude models are also available through +the [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude) +platform. See [here](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude) +for more information about enabling access to the models and the model names to use. + +PaLM models are no longer supported. + ## Vector Store ### Vertex AI Vector Search diff --git a/libs/langchain-google-common/src/chat_models.ts b/libs/langchain-google-common/src/chat_models.ts index d770c25b026f..1de5280fbe72 100644 --- a/libs/langchain-google-common/src/chat_models.ts +++ b/libs/langchain-google-common/src/chat_models.ts @@ -29,9 +29,10 @@ import { GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, - GeminiContent, GeminiTool, GoogleAIBaseLanguageModelCallOptions, + GoogleAIAPI, + GoogleAIAPIParams, } from "./types.js"; import { convertToGeminiTools, @@ -39,7 +40,7 @@ import { copyAndValidateModelParamsInto, } from "./utils/common.js"; import { AbstractGoogleLLMConnection } from "./connection.js"; -import { DefaultGeminiSafetyHandler } from "./utils/gemini.js"; +import { DefaultGeminiSafetyHandler, getGeminiAPI } from "./utils/gemini.js"; import { ApiKeyGoogleAuth, GoogleAbstractedClient } from "./auth.js"; import { JsonStream } from "./utils/stream.js"; import { ensureParams } from "./utils/failed_handler.js"; @@ -96,71 +97,21 @@ export class ChatConnection extends AbstractGoogleLLMConnection< return true; } - async formatContents( - input: BaseMessage[], - _parameters: GoogleAIModelParams - ): Promise { - const inputPromises: Promise[] = input.map((msg, i) => - this.api.baseMessageToContent( - msg, - input[i - 1], - this.useSystemInstruction - ) - ); - const inputs = await Promise.all(inputPromises); - - return inputs.reduce((acc, cur) => { - // Filter out the system content - if (cur.every((content) => content.role === "system")) { - return acc; - } - - // Combine adjacent function messages - if ( - cur[0]?.role === "function" && - acc.length > 0 && - acc[acc.length - 1].role === "function" - ) { - acc[acc.length - 1].parts = [ - ...acc[acc.length - 1].parts, - ...cur[0].parts, - ]; - } else { - acc.push(...cur); - } - - return acc; - }, [] as GeminiContent[]); + buildGeminiAPI(): GoogleAIAPI { + const geminiConfig: GeminiAPIConfig = { + useSystemInstruction: this.useSystemInstruction, + ...(this.apiConfig as GeminiAPIConfig), + }; + return getGeminiAPI(geminiConfig); } - async formatSystemInstruction( - input: BaseMessage[], - _parameters: GoogleAIModelParams - ): Promise { - if (!this.useSystemInstruction) { - return {} as GeminiContent; + get api(): GoogleAIAPI { + switch (this.apiName) { + case "google": + return this.buildGeminiAPI(); + default: + return super.api; } - - let ret = {} as GeminiContent; - for (let index = 0; index < input.length; index += 1) { - const message = input[index]; - if (message._getType() === "system") { - // For system types, we only want it if it is the first message, - // if it appears anywhere else, it should be an error. - if (index === 0) { - // eslint-disable-next-line prefer-destructuring - ret = ( - await this.api.baseMessageToContent(message, undefined, true) - )[0]; - } else { - throw new Error( - "System messages are only permitted as the first passed message." - ); - } - } - } - - return ret; } } @@ -172,7 +123,7 @@ export interface ChatGoogleBaseInput GoogleConnectionParams, GoogleAIModelParams, GoogleAISafetyParams, - GeminiAPIConfig, + GoogleAIAPIParams, Pick {} /** @@ -341,13 +292,14 @@ export abstract class ChatGoogleBase const response = await this.connection.request( messages, parameters, - options + options, + runManager ); - const ret = this.connection.api.safeResponseToChatResult( - response, - this.safetyHandler - ); - await runManager?.handleLLMNewToken(ret.generations[0].text); + const ret = this.connection.api.responseToChatResult(response); + const chunk = ret?.generations?.[0]; + if (chunk) { + await runManager?.handleLLMNewToken(chunk.text || ""); + } return ret; } @@ -361,7 +313,8 @@ export abstract class ChatGoogleBase const response = await this.streamedConnection.request( _messages, parameters, - options + options, + runManager ); // Get the streaming parser of the response @@ -372,6 +325,12 @@ export abstract class ChatGoogleBase // that is either available or added to the queue while (!stream.streamDone) { const output = await stream.nextChunk(); + await runManager?.handleCustomEvent( + `google-chunk-${this.constructor.name}`, + { + output, + } + ); if ( output && output.usageMetadata && @@ -386,10 +345,7 @@ export abstract class ChatGoogleBase } const chunk = output !== null - ? this.connection.api.safeResponseToChatGeneration( - { data: output }, - this.safetyHandler - ) + ? this.connection.api.responseToChatGeneration({ data: output }) : new ChatGenerationChunk({ text: "", generationInfo: { finishReason: "stop" }, @@ -398,8 +354,17 @@ export abstract class ChatGoogleBase usage_metadata: usageMetadata, }), }); - yield chunk; - await runManager?.handleLLMNewToken(chunk.text); + if (chunk) { + yield chunk; + await runManager?.handleLLMNewToken( + chunk.text ?? "", + undefined, + undefined, + undefined, + undefined, + { chunk } + ); + } } } diff --git a/libs/langchain-google-common/src/connection.ts b/libs/langchain-google-common/src/connection.ts index 7e7da9daa304..5a1c1fa494ae 100644 --- a/libs/langchain-google-common/src/connection.ts +++ b/libs/langchain-google-common/src/connection.ts @@ -1,35 +1,37 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import { AsyncCaller, AsyncCallerCallOptions, } from "@langchain/core/utils/async_caller"; import { getRuntimeEnvironment } from "@langchain/core/utils/env"; -import { StructuredToolParams } from "@langchain/core/tools"; -import { isLangChainTool } from "@langchain/core/utils/function_calling"; +import { BaseRunManager } from "@langchain/core/callbacks/manager"; +import { BaseCallbackHandler } from "@langchain/core/callbacks/base"; import type { GoogleAIBaseLLMInput, GoogleConnectionParams, - GoogleLLMModelFamily, GooglePlatformType, GoogleResponse, GoogleLLMResponse, - GeminiContent, - GeminiGenerationConfig, - GeminiRequest, - GeminiSafetySetting, - GeminiTool, - GeminiFunctionDeclaration, GoogleAIModelRequestParams, GoogleRawResponse, - GoogleAIToolType, + GoogleAIAPI, + VertexModelFamily, + GoogleAIAPIConfig, + AnthropicAPIConfig, + GeminiAPIConfig, } from "./types.js"; import { GoogleAbstractedClient, GoogleAbstractedClientOps, GoogleAbstractedClientOpsMethod, } from "./auth.js"; -import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js"; -import { getGeminiAPI } from "./utils/index.js"; +import { + getGeminiAPI, + modelToFamily, + modelToPublisher, +} from "./utils/index.js"; +import { getAnthropicAPI } from "./utils/anthropic.js"; export abstract class GoogleConnection< CallOptions extends AsyncCallerCallOptions, @@ -148,9 +150,9 @@ export abstract class GoogleHostConnection< // Use the "platform" getter if you need this. platformType: GooglePlatformType | undefined; - endpoint = "us-central1-aiplatform.googleapis.com"; + _endpoint: string | undefined; - location = "us-central1"; + _location: string | undefined; apiVersion = "v1"; @@ -164,8 +166,8 @@ export abstract class GoogleHostConnection< this.caller = caller; this.platformType = fields?.platformType; - this.endpoint = fields?.endpoint ?? this.endpoint; - this.location = fields?.location ?? this.location; + this._endpoint = fields?.endpoint; + this._location = fields?.location; this.apiVersion = fields?.apiVersion ?? this.apiVersion; this.client = client; } @@ -178,6 +180,22 @@ export abstract class GoogleHostConnection< return "gcp"; } + get location(): string { + return this._location ?? this.computedLocation; + } + + get computedLocation(): string { + return "us-central1"; + } + + get endpoint(): string { + return this._endpoint ?? this.computedEndpoint; + } + + get computedEndpoint(): string { + return `${this.location}-aiplatform.googleapis.com`; + } + buildMethod(): GoogleAbstractedClientOpsMethod { return "POST"; } @@ -213,8 +231,9 @@ export abstract class GoogleAIConnection< client: GoogleAbstractedClient; - // eslint-disable-next-line @typescript-eslint/no-explicit-any - api: any; // FIXME: Make this a real type + _apiName?: string; + + apiConfig?: GoogleAIAPIConfig; constructor( fields: GoogleAIBaseLLMInput | undefined, @@ -226,14 +245,39 @@ export abstract class GoogleAIConnection< this.client = client; this.modelName = fields?.model ?? fields?.modelName ?? this.model; this.model = this.modelName; - this.api = getGeminiAPI(fields); + + this._apiName = fields?.apiName; + this.apiConfig = { + safetyHandler: fields?.safetyHandler, // For backwards compatibility + ...fields?.apiConfig, + }; } - get modelFamily(): GoogleLLMModelFamily { - if (this.model.startsWith("gemini")) { - return "gemini"; - } else { - return null; + get modelFamily(): VertexModelFamily { + return modelToFamily(this.model); + } + + get modelPublisher(): string { + return modelToPublisher(this.model); + } + + get computedAPIName(): string { + // At least at the moment, model publishers and APIs map the same + return this.modelPublisher; + } + + get apiName(): string { + return this._apiName ?? this.computedAPIName; + } + + get api(): GoogleAIAPI { + switch (this.apiName) { + case "google": + return getGeminiAPI(this.apiConfig as GeminiAPIConfig); + case "anthropic": + return getAnthropicAPI(this.apiConfig as AnthropicAPIConfig); + default: + throw new Error(`Unknown API: ${this.apiName}`); } } @@ -245,6 +289,19 @@ export abstract class GoogleAIConnection< } } + get computedLocation(): string { + switch (this.apiName) { + case "google": + return super.computedLocation; + case "anthropic": + return "us-east5"; + default: + throw new Error( + `Unknown apiName: ${this.apiName}. Can't get location.` + ); + } + } + abstract buildUrlMethod(): Promise; async buildUrlGenerativeLanguage(): Promise { @@ -256,7 +313,8 @@ export abstract class GoogleAIConnection< async buildUrlVertex(): Promise { const projectId = await this.client.getProjectId(); const method = await this.buildUrlMethod(); - const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.model}:${method}`; + const publisher = this.modelPublisher; + const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/${publisher}/models/${this.model}:${method}`; return url; } @@ -277,10 +335,37 @@ export abstract class GoogleAIConnection< async request( input: InputType, parameters: GoogleAIModelRequestParams, - options: CallOptions - ): Promise { - const data = await this.formatData(input, parameters); + + options: CallOptions, + runManager?: BaseRunManager + ): Promise { + const moduleName = this.constructor.name; + const streamingParameters: GoogleAIModelRequestParams = { + ...parameters, + streaming: this.streaming, + }; + const data = await this.formatData(input, streamingParameters); + + await runManager?.handleCustomEvent(`google-request-${moduleName}`, { + data, + parameters: streamingParameters, + options, + connection: { + ...this, + url: await this.buildUrl(), + urlMethod: await this.buildUrlMethod(), + modelFamily: this.modelFamily, + modelPublisher: this.modelPublisher, + computedPlatformType: this.computedPlatformType, + }, + }); + const response = await this._request(data, options); + + await runManager?.handleCustomEvent(`google-response-${moduleName}`, { + response, + }); + return response; } } @@ -298,141 +383,202 @@ export abstract class AbstractGoogleLLMConnection< return this.streaming ? "streamGenerateContent" : "generateContent"; } + async buildUrlMethodClaude(): Promise { + return this.streaming ? "streamRawPredict" : "rawPredict"; + } + async buildUrlMethod(): Promise { switch (this.modelFamily) { case "gemini": return this.buildUrlMethodGemini(); + case "claude": + return this.buildUrlMethodClaude(); default: throw new Error(`Unknown model family: ${this.modelFamily}`); } } - abstract formatContents( + async formatData( input: MessageType, parameters: GoogleAIModelRequestParams - ): Promise; + ): Promise { + return this.api.formatData(input, parameters); + } +} - formatGenerationConfig( - _input: MessageType, - parameters: GoogleAIModelRequestParams - ): GeminiGenerationConfig { +export interface GoogleCustomEventInfo { + subEvent: string; + module: string; +} + +export abstract class GoogleRequestCallbackHandler extends BaseCallbackHandler { + customEventInfo(eventName: string): GoogleCustomEventInfo { + const names = eventName.split("-"); return { - temperature: parameters.temperature, - topK: parameters.topK, - topP: parameters.topP, - maxOutputTokens: parameters.maxOutputTokens, - stopSequences: parameters.stopSequences, - responseMimeType: parameters.responseMimeType, + subEvent: names[1], + module: names[2], }; } - formatSafetySettings( - _input: MessageType, - parameters: GoogleAIModelRequestParams - ): GeminiSafetySetting[] { - return parameters.safetySettings ?? []; + abstract handleCustomRequestEvent( + eventName: string, + eventInfo: GoogleCustomEventInfo, + data: any, + runId: string, + tags?: string[], + metadata?: Record + ): any; + + abstract handleCustomResponseEvent( + eventName: string, + eventInfo: GoogleCustomEventInfo, + data: any, + runId: string, + tags?: string[], + metadata?: Record + ): any; + + abstract handleCustomChunkEvent( + eventName: string, + eventInfo: GoogleCustomEventInfo, + data: any, + runId: string, + tags?: string[], + metadata?: Record + ): any; + + handleCustomEvent( + eventName: string, + data: any, + runId: string, + tags?: string[], + metadata?: Record + ): any { + if (!eventName) { + return undefined; + } + const eventInfo = this.customEventInfo(eventName); + switch (eventInfo.subEvent) { + case "request": + return this.handleCustomRequestEvent( + eventName, + eventInfo, + data, + runId, + tags, + metadata + ); + case "response": + return this.handleCustomResponseEvent( + eventName, + eventInfo, + data, + runId, + tags, + metadata + ); + case "chunk": + return this.handleCustomChunkEvent( + eventName, + eventInfo, + data, + runId, + tags, + metadata + ); + default: + console.error( + `Unexpected eventInfo for ${eventName} ${JSON.stringify( + eventInfo, + null, + 1 + )}` + ); + } } +} - async formatSystemInstruction( - _input: MessageType, - _parameters: GoogleAIModelRequestParams - ): Promise { - return {} as GeminiContent; - } +export class GoogleRequestLogger extends GoogleRequestCallbackHandler { + name: string = "GoogleRequestLogger"; - structuredToolToFunctionDeclaration( - tool: StructuredToolParams - ): GeminiFunctionDeclaration { - const jsonSchema = zodToGeminiParameters(tool.schema); - return { - name: tool.name, - description: tool.description ?? `A function available to call.`, - parameters: jsonSchema, - }; + log(eventName: string, data: any, tags?: string[]): undefined { + const tagStr = tags ? `[${tags}]` : "[]"; + console.log(`${eventName} ${tagStr} ${JSON.stringify(data, null, 1)}`); } - structuredToolsToGeminiTools(tools: StructuredToolParams[]): GeminiTool[] { - return [ - { - functionDeclarations: tools.map( - this.structuredToolToFunctionDeclaration - ), - }, - ]; + handleCustomRequestEvent( + eventName: string, + _eventInfo: GoogleCustomEventInfo, + data: any, + _runId: string, + tags?: string[], + _metadata?: Record + ): any { + this.log(eventName, data, tags); } - formatTools( - _input: MessageType, - parameters: GoogleAIModelRequestParams - ): GeminiTool[] { - const tools: GoogleAIToolType[] | undefined = parameters?.tools; - if (!tools || tools.length === 0) { - return []; - } + handleCustomResponseEvent( + eventName: string, + _eventInfo: GoogleCustomEventInfo, + data: any, + _runId: string, + tags?: string[], + _metadata?: Record + ): any { + this.log(eventName, data, tags); + } - if (tools.every(isLangChainTool)) { - return this.structuredToolsToGeminiTools(tools); - } else { - if ( - tools.length === 1 && - (!("functionDeclarations" in tools[0]) || - !tools[0].functionDeclarations?.length) - ) { - return []; - } - return tools as GeminiTool[]; - } + handleCustomChunkEvent( + eventName: string, + _eventInfo: GoogleCustomEventInfo, + data: any, + _runId: string, + tags?: string[], + _metadata?: Record + ): any { + this.log(eventName, data, tags); } +} - formatToolConfig( - parameters: GoogleAIModelRequestParams - ): GeminiRequest["toolConfig"] | undefined { - if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") { - return undefined; - } +export class GoogleRequestRecorder extends GoogleRequestCallbackHandler { + name = "GoogleRequestRecorder"; - return { - functionCallingConfig: { - mode: parameters.tool_choice as "auto" | "any" | "none", - allowedFunctionNames: parameters.allowed_function_names, - }, - }; + request: any = {}; + + response: any = {}; + + chunk: any[] = []; + + handleCustomRequestEvent( + _eventName: string, + _eventInfo: GoogleCustomEventInfo, + data: any, + _runId: string, + _tags?: string[], + _metadata?: Record + ): any { + this.request = data; } - async formatData( - input: MessageType, - parameters: GoogleAIModelRequestParams - ): Promise { - const contents = await this.formatContents(input, parameters); - const generationConfig = this.formatGenerationConfig(input, parameters); - const tools = this.formatTools(input, parameters); - const toolConfig = this.formatToolConfig(parameters); - const safetySettings = this.formatSafetySettings(input, parameters); - const systemInstruction = await this.formatSystemInstruction( - input, - parameters - ); + handleCustomResponseEvent( + _eventName: string, + _eventInfo: GoogleCustomEventInfo, + data: any, + _runId: string, + _tags?: string[], + _metadata?: Record + ): any { + this.response = data; + } - const ret: GeminiRequest = { - contents, - generationConfig, - }; - if (tools && tools.length) { - ret.tools = tools; - } - if (toolConfig) { - ret.toolConfig = toolConfig; - } - if (safetySettings && safetySettings.length) { - ret.safetySettings = safetySettings; - } - if ( - systemInstruction?.role && - systemInstruction?.parts && - systemInstruction?.parts?.length - ) { - ret.systemInstruction = systemInstruction; - } - return ret; + handleCustomChunkEvent( + _eventName: string, + _eventInfo: GoogleCustomEventInfo, + data: any, + _runId: string, + _tags?: string[], + _metadata?: Record + ): any { + this.chunk.push(data); } } diff --git a/libs/langchain-google-common/src/llms.ts b/libs/langchain-google-common/src/llms.ts index b359a41e7d45..ad74c74e4ac3 100644 --- a/libs/langchain-google-common/src/llms.ts +++ b/libs/langchain-google-common/src/llms.ts @@ -37,7 +37,7 @@ class GoogleLLMConnection extends AbstractGoogleLLMConnection< input: MessageContent, _parameters: GoogleAIModelParams ): Promise { - const parts = await this.api.messageContentToParts(input); + const parts = await this.api.messageContentToParts!(input); const contents: GeminiContent[] = [ { role: "user", // Required by Vertex AI @@ -189,10 +189,7 @@ export abstract class GoogleBaseLLM ): Promise { const parameters = copyAIModelParams(this, options); const result = await this.connection.request(prompt, parameters, options); - const ret = this.connection.api.safeResponseToString( - result, - this.safetyHandler - ); + const ret = this.connection.api.responseToString(result); return ret; } @@ -270,10 +267,7 @@ export abstract class GoogleBaseLLM {}, options as BaseLanguageModelCallOptions ); - const ret = this.connection.api.safeResponseToBaseMessage( - result, - this.safetyHandler - ); + const ret = this.connection.api.responseToBaseMessage(result); return ret; } diff --git a/libs/langchain-google-common/src/tests/chat_models.test.ts b/libs/langchain-google-common/src/tests/chat_models.test.ts index 9da477df3e0e..aa15be74ed79 100644 --- a/libs/langchain-google-common/src/tests/chat_models.test.ts +++ b/libs/langchain-google-common/src/tests/chat_models.test.ts @@ -1,3 +1,4 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ import { expect, test } from "@jest/globals"; import { AIMessage, @@ -10,12 +11,19 @@ import { ToolMessage, } from "@langchain/core/messages"; import { InMemoryStore } from "@langchain/core/stores"; - +import { CallbackHandlerMethods } from "@langchain/core/callbacks/base"; +import { Serialized } from "@langchain/core/load/serializable"; import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; import { ChatGoogleBase, ChatGoogleBaseInput } from "../chat_models.js"; import { authOptions, MockClient, MockClientAuthInfo, mockId } from "./mock.js"; -import { GeminiTool, GoogleAIBaseLLMInput } from "../types.js"; +import { + GeminiTool, + GoogleAIBaseLLMInput, + GoogleAISafetyCategory, + GoogleAISafetyHandler, + GoogleAISafetyThreshold, +} from "../types.js"; import { GoogleAbstractedClient } from "../auth.js"; import { GoogleAISafetyError } from "../utils/safety.js"; import { @@ -25,6 +33,7 @@ import { ReadThroughBlobStore, } from "../experimental/utils/media_core.js"; import { removeAdditionalProperties } from "../utils/zod_to_gemini_parameters.js"; +import { MessageGeminiSafetyHandler } from "../utils/index.js"; class ChatGoogle extends ChatGoogleBase { constructor(fields?: ChatGoogleBaseInput) { @@ -39,7 +48,7 @@ class ChatGoogle extends ChatGoogleBase { } } -describe("Mock ChatGoogle", () => { +describe("Mock ChatGoogle - Gemini", () => { test("Setting invalid model parameters", async () => { expect(() => { const model = new ChatGoogle({ @@ -71,7 +80,6 @@ describe("Mock ChatGoogle", () => { }); test("user agent header", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -100,7 +108,6 @@ describe("Mock ChatGoogle", () => { }); test("platform default", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -115,7 +122,6 @@ describe("Mock ChatGoogle", () => { }); test("platform set", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -131,7 +137,6 @@ describe("Mock ChatGoogle", () => { }); test("1. Basic request format", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -165,7 +170,6 @@ describe("Mock ChatGoogle", () => { }); test("1. Invoke request format", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -199,7 +203,6 @@ describe("Mock ChatGoogle", () => { }); test("1. Response format", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -224,7 +227,6 @@ describe("Mock ChatGoogle", () => { }); test("1. Invoke response format", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -252,7 +254,6 @@ describe("Mock ChatGoogle", () => { // SystemMessages will be turned into the human request with the prompt // from the system message and a faked ai response saying "Ok". test("1. System request format old model", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -293,7 +294,6 @@ describe("Mock ChatGoogle", () => { }); test("1. System request format convert true", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -334,7 +334,6 @@ describe("Mock ChatGoogle", () => { }); test("1. System request format convert false", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -373,7 +372,6 @@ describe("Mock ChatGoogle", () => { }); test("1. System request format new model", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -412,7 +410,6 @@ describe("Mock ChatGoogle", () => { }); test("1. System request - multiple", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -444,7 +441,6 @@ describe("Mock ChatGoogle", () => { }); test("1. System request - not first", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -472,8 +468,7 @@ describe("Mock ChatGoogle", () => { expect(caught).toBeTruthy(); }); - test("2. Response format - safety", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any + test("2. Safety - settings", async () => { const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -483,6 +478,12 @@ describe("Mock ChatGoogle", () => { }; const model = new ChatGoogle({ authOptions, + safetySettings: [ + { + category: GoogleAISafetyCategory.Harassment, + threshold: GoogleAISafetyThreshold.Most, + }, + ], }); const messages: BaseMessageLike[] = [ new HumanMessage("Flip a coin and tell me H for heads and T for tails"), @@ -492,25 +493,88 @@ describe("Mock ChatGoogle", () => { let caught = false; try { await model.invoke(messages); + } catch (xx: any) { + caught = true; + } + + const settings = record?.opts?.data?.safetySettings; + expect(settings).toBeDefined(); + expect(Array.isArray(settings)).toEqual(true); + expect(settings).toHaveLength(1); + expect(settings[0].category).toEqual("HARM_CATEGORY_HARASSMENT"); + expect(settings[0].threshold).toEqual("BLOCK_LOW_AND_ABOVE"); - // eslint-disable-next-line @typescript-eslint/no-explicit-any + expect(caught).toEqual(true); + }); + + test("2. Safety - default", async () => { + const record: Record = {}; + const projectId = mockId(); + const authOptions: MockClientAuthInfo = { + record, + projectId, + resultFile: "chat-2-mock.json", + }; + const model = new ChatGoogle({ + authOptions, + }); + const messages: BaseMessageLike[] = [ + new HumanMessage("Flip a coin and tell me H for heads and T for tails"), + new AIMessage("H"), + new HumanMessage("Flip it again"), + ]; + let caught = false; + try { + await model.invoke(messages); } catch (xx: any) { caught = true; expect(xx).toBeInstanceOf(GoogleAISafetyError); - const result = xx?.reply.generations[0].message; + const result = xx?.reply.generations[0]; + expect(result).toBeUndefined(); + } + + expect(caught).toEqual(true); + }); + + test("2. Safety - safety handler", async () => { + const safetyHandler: GoogleAISafetyHandler = new MessageGeminiSafetyHandler( + { + msg: "I'm sorry, Dave, but I can't do that.", + } + ); + const record: Record = {}; + const projectId = mockId(); + const authOptions: MockClientAuthInfo = { + record, + projectId, + resultFile: "chat-2-mock.json", + }; + const model = new ChatGoogle({ + authOptions, + safetyHandler, + }); + const messages: BaseMessageLike[] = [ + new HumanMessage("Flip a coin and tell me H for heads and T for tails"), + new AIMessage("H"), + new HumanMessage("Flip it again"), + ]; + let caught = false; + try { + const result = await model.invoke(messages); expect(result._getType()).toEqual("ai"); const aiMessage = result as AIMessage; expect(aiMessage.content).toBeDefined(); - expect(aiMessage.content).toBe("T"); + expect(aiMessage.content).toBe("I'm sorry, Dave, but I can't do that."); + } catch (xx: any) { + caught = true; } - expect(caught).toEqual(true); + expect(caught).toEqual(false); }); test("3. invoke - images", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -556,7 +620,6 @@ describe("Mock ChatGoogle", () => { }); test("3. invoke - media - invalid", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -588,12 +651,11 @@ describe("Mock ChatGoogle", () => { const result = await model.invoke(messages); expect(result).toBeUndefined(); } catch (e) { - expect((e as Error).message).toEqual("Invalid media content"); + expect((e as Error).message).toMatch(/^Invalid media content/); } }); test("3. invoke - media - no manager", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -684,20 +746,14 @@ describe("Mock ChatGoogle", () => { async function store(path: string, text: string): Promise { const type = path.endsWith(".png") ? "image/png" : "text/plain"; - const blob = new MediaBlob({ - data: { - value: text, - type, - }, - path, - }); + const data = new Blob([text], { type }); + const blob = await MediaBlob.fromBlob(data, { path }); await resolver.store(blob); } await store("resolve://host/foo", "fooing"); await store("resolve://host2/bar/baz", "barbazing"); await store("resolve://host/foo/blue-box.png", "png"); - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -705,10 +761,38 @@ describe("Mock ChatGoogle", () => { projectId, resultFile: "chat-3-mock.json", }; + const callbacks: CallbackHandlerMethods[] = [ + { + handleChatModelStart( + llm: Serialized, + messages: BaseMessage[][], + runId: string, + _parentRunId?: string, + _extraParams?: Record, + _tags?: string[], + _metadata?: Record, + _runName?: string + ): any { + console.log("Chat start", llm, messages, runId); + }, + handleCustomEvent( + eventName: string, + data: any, + runId: string, + tags?: string[], + metadata?: Record + ): any { + console.log("Custom event", eventName, runId, data, tags, metadata); + }, + }, + ]; const model = new ChatGoogle({ authOptions, model: "gemini-1.5-flash", - mediaManager, + apiConfig: { + mediaManager, + }, + callbacks, }); const message: MessageContentComplex[] = [ @@ -750,7 +834,6 @@ describe("Mock ChatGoogle", () => { }); test("4. Functions Bind - Gemini format request", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -832,7 +915,6 @@ describe("Mock ChatGoogle", () => { }); test("4. Functions withStructuredOutput - Gemini format request", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -904,7 +986,6 @@ describe("Mock ChatGoogle", () => { }); test("4. Functions - results", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -965,7 +1046,6 @@ describe("Mock ChatGoogle", () => { }); test("5. Functions - function reply", async () => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any const record: Record = {}; const projectId = mockId(); const authOptions: MockClientAuthInfo = { @@ -1027,7 +1107,60 @@ describe("Mock ChatGoogle", () => { }); }); -// eslint-disable-next-line @typescript-eslint/no-explicit-any +describe("Mock ChatGoogle - Anthropic", () => { + test("1. Invoke request format", async () => { + const record: Record = {}; + const projectId = mockId(); + const authOptions: MockClientAuthInfo = { + record, + projectId, + resultFile: "claude-chat-1-mock.json", + }; + const model = new ChatGoogle({ + model: "claude-3-5-sonnet@20240620", + platformType: "gcp", + authOptions, + }); + const messages: BaseMessageLike[] = [new HumanMessage("What is 1+1?")]; + await model.invoke(messages); + + console.log("record", record); + expect(record.opts).toBeDefined(); + expect(record.opts.data).toBeDefined(); + const { data } = record.opts; + expect(data.messages).toBeDefined(); + expect(data.messages.length).toEqual(1); + expect(data.messages[0].role).toEqual("user"); + expect(data.messages[0].content).toBeDefined(); + expect(data.messages[0].content.length).toBeGreaterThanOrEqual(1); + expect(data.messages[0].content[0].text).toBeDefined(); + expect(data.system).not.toBeDefined(); + }); + + test("1. Invoke response format", async () => { + const record: Record = {}; + const projectId = mockId(); + const authOptions: MockClientAuthInfo = { + record, + projectId, + resultFile: "claude-chat-1-mock.json", + }; + const model = new ChatGoogle({ + model: "claude-3-5-sonnet@20240620", + platformType: "gcp", + authOptions, + }); + const messages: BaseMessageLike[] = [new HumanMessage("What is 1+1?")]; + const result = await model.invoke(messages); + + expect(result._getType()).toEqual("ai"); + const aiMessage = result as AIMessage; + expect(aiMessage.content).toBeDefined(); + expect(aiMessage.content).toBe( + "1 + 1 = 2\n\nThis is one of the most basic arithmetic equations. It represents the addition of two units, resulting in a sum of two." + ); + }); +}); function extractKeys(obj: Record, keys: string[] = []) { for (const key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { diff --git a/libs/langchain-google-common/src/tests/data/chat-2-mock.json b/libs/langchain-google-common/src/tests/data/chat-2-mock.json index 9ee0bf4564d8..406c22609a76 100644 --- a/libs/langchain-google-common/src/tests/data/chat-2-mock.json +++ b/libs/langchain-google-common/src/tests/data/chat-2-mock.json @@ -1,14 +1,6 @@ { "candidates": [ { - "content": { - "parts": [ - { - "text": "T" - } - ], - "role": "model" - }, "finishReason": "SAFETY", "index": 0, "safetyRatings": [ diff --git a/libs/langchain-google-common/src/tests/data/claude-chat-1-mock.json b/libs/langchain-google-common/src/tests/data/claude-chat-1-mock.json new file mode 100644 index 000000000000..d465fe45392d --- /dev/null +++ b/libs/langchain-google-common/src/tests/data/claude-chat-1-mock.json @@ -0,0 +1,18 @@ +{ + "id": "msg_vrtx_01AGfmYa73qH7wpmFsVFr4rq", + "type": "message", + "role": "assistant", + "model": "claude-3-5-sonnet-20240620", + "content": [ + { + "type": "text", + "text": "1 + 1 = 2\n\nThis is one of the most basic arithmetic equations. It represents the addition of two units, resulting in a sum of two." + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 39 + } +} diff --git a/libs/langchain-google-common/src/tests/data/claude-chat-1-mock.sse b/libs/langchain-google-common/src/tests/data/claude-chat-1-mock.sse new file mode 100644 index 000000000000..4213b5548378 --- /dev/null +++ b/libs/langchain-google-common/src/tests/data/claude-chat-1-mock.sse @@ -0,0 +1,267 @@ +event: message_start +data: {"type":"message_start","message":{"id":"msg_vrtx_01JLACAmH9Ke3HQEUK1Sg8iT","type":"message","role":"assistant","model":"claude-3-5-sonnet-20240620","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":15,"output_tokens":1}} } + +event: ping +data: {"type": "ping"} + +event: content_block_start +data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Thank"}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" you for inqu"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"iring about my well"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-being!"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" I'm functioning"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" optim"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ally an"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d feeling"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" quite enthusi"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"astic about"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" engaging"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" in"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" conversation"}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" an"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d assisting with"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" any tasks"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" or"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" queries you might"}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" have. As"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" an"}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" AI,"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" I don"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"'t experience emotions or"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" physical"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" sensations in"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" the way"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" humans do, but"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" I can"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" say"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" that my"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" systems"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" are operating"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" at"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" peak"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" efficiency. I'm"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" eager"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" to learn,"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" explore"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" ideas"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":", and tackle"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" intellectual"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" challenges. The"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" vast"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" repository"}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" of knowledge"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" at"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" my disposal is"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" pr"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"imed an"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d ready to be put"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" to use in whatever"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" manner"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" you"}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" see"}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" fit. Whether"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" you"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"'re"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" looking for in"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-depth analysis,"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" creative brainstor"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ming, or simply"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" a friendly chat"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":", I'm here"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" an"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d fully"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" prepare"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d to dive"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" into"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" our"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" interaction"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" with"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" gu"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"sto. Is"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" there any"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" particular"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" subject"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" or"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" task"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" you'"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d like to discuss or"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" work"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" on today"} } + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"?"} } + +event: content_block_stop +data: {"type":"content_block_stop","index":0 } + +event: message_delta +data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":165} } + +event: message_stop +data: {"type":"message_stop" } + diff --git a/libs/langchain-google-common/src/tests/utils.test.ts b/libs/langchain-google-common/src/tests/utils.test.ts index 547392c397e7..70dacca0b8ee 100644 --- a/libs/langchain-google-common/src/tests/utils.test.ts +++ b/libs/langchain-google-common/src/tests/utils.test.ts @@ -13,7 +13,11 @@ import { ReadThroughBlobStore, SimpleWebBlobStore, } from "../experimental/utils/media_core.js"; -import { ReadableJsonStream } from "../utils/stream.js"; +import { + ReadableJsonStream, + ReadableSseJsonStream, + ReadableSseStream, +} from "../utils/stream.js"; describe("zodToGeminiParameters", () => { test("can convert zod schema to gemini schema", () => { @@ -420,52 +424,112 @@ function toUint8Array(data: string): Uint8Array { return new TextEncoder().encode(data); } -test("ReadableJsonStream can handle stream", async () => { - const data = [ - toUint8Array("["), - toUint8Array('{"i": 1}'), - toUint8Array('{"i'), - toUint8Array('": 2}'), - toUint8Array("]"), - ]; +describe("streaming", () => { + test("ReadableJsonStream can handle stream", async () => { + const data = [ + toUint8Array("["), + toUint8Array('{"i": 1}'), + toUint8Array('{"i'), + toUint8Array('": 2}'), + toUint8Array("]"), + ]; + + const source = new ReadableStream({ + start(controller) { + data.forEach((chunk) => controller.enqueue(chunk)); + controller.close(); + }, + }); + const stream = new ReadableJsonStream(source); + expect(await stream.nextChunk()).toEqual({ i: 1 }); + expect(await stream.nextChunk()).toEqual({ i: 2 }); + expect(await stream.nextChunk()).toBeNull(); + expect(stream.streamDone).toEqual(true); + }); - const source = new ReadableStream({ - start(controller) { - data.forEach((chunk) => controller.enqueue(chunk)); - controller.close(); - }, + test("ReadableJsonStream can handle multibyte stream", async () => { + const data = [ + toUint8Array("["), + toUint8Array('{"i": 1, "msg":"hello👋"}'), + toUint8Array('{"i": 2,'), + toUint8Array('"msg":"こん'), + new Uint8Array([0xe3]), // 1st byte of "に" + new Uint8Array([0x81, 0xab]), // 2-3rd bytes of "に" + toUint8Array("ちは"), + new Uint8Array([0xf0, 0x9f]), // first half bytes of "👋" + new Uint8Array([0x91, 0x8b]), // second half bytes of "👋" + toUint8Array('"}'), + toUint8Array("]"), + ]; + + const source = new ReadableStream({ + start(controller) { + data.forEach((chunk) => controller.enqueue(chunk)); + controller.close(); + }, + }); + const stream = new ReadableJsonStream(source); + expect(await stream.nextChunk()).toEqual({ i: 1, msg: "hello👋" }); + expect(await stream.nextChunk()).toEqual({ i: 2, msg: "こんにちは👋" }); + expect(await stream.nextChunk()).toBeNull(); + expect(stream.streamDone).toEqual(true); }); - const stream = new ReadableJsonStream(source); - expect(await stream.nextChunk()).toEqual({ i: 1 }); - expect(await stream.nextChunk()).toEqual({ i: 2 }); - expect(await stream.nextChunk()).toBeNull(); - expect(stream.streamDone).toEqual(true); -}); -test("ReadableJsonStream can handle multibyte stream", async () => { - const data = [ - toUint8Array("["), - toUint8Array('{"i": 1, "msg":"hello👋"}'), - toUint8Array('{"i": 2,'), - toUint8Array('"msg":"こん'), - new Uint8Array([0xe3]), // 1st byte of "に" - new Uint8Array([0x81, 0xab]), // 2-3rd bytes of "に" - toUint8Array("ちは"), - new Uint8Array([0xf0, 0x9f]), // first half bytes of "👋" - new Uint8Array([0x91, 0x8b]), // second half bytes of "👋" - toUint8Array('"}'), - toUint8Array("]"), + const eventData: string[] = [ + "event: ping\n", + 'data: {"type": "ping"}\n', + "\n", + "event: pong\n", + 'data: {"type": "pong", "value": "ping-pong"}\n', + "\n", + "\n", ]; - const source = new ReadableStream({ - start(controller) { - data.forEach((chunk) => controller.enqueue(chunk)); - controller.close(); - }, + test("SseStream", async () => { + const source = new ReadableStream({ + start(controller) { + eventData.forEach((chunk) => controller.enqueue(toUint8Array(chunk))); + controller.close(); + }, + }); + + let chunk; + const stream = new ReadableSseStream(source); + + chunk = await stream.nextChunk(); + expect(chunk.event).toEqual("ping"); + expect(chunk.data).toEqual('{"type": "ping"}'); + + chunk = await stream.nextChunk(); + expect(chunk.event).toEqual("pong"); + + chunk = await stream.nextChunk(); + expect(chunk).toBeNull(); + + expect(stream.streamDone).toEqual(true); + }); + + test("SseJsonStream", async () => { + const source = new ReadableStream({ + start(controller) { + eventData.forEach((chunk) => controller.enqueue(toUint8Array(chunk))); + controller.close(); + }, + }); + + let chunk; + const stream = new ReadableSseJsonStream(source); + + chunk = await stream.nextChunk(); + expect(chunk.type).toEqual("ping"); + + chunk = await stream.nextChunk(); + expect(chunk.type).toEqual("pong"); + expect(chunk.value).toEqual("ping-pong"); + + chunk = await stream.nextChunk(); + expect(chunk).toBeNull(); + + expect(stream.streamDone).toEqual(true); }); - const stream = new ReadableJsonStream(source); - expect(await stream.nextChunk()).toEqual({ i: 1, msg: "hello👋" }); - expect(await stream.nextChunk()).toEqual({ i: 2, msg: "こんにちは👋" }); - expect(await stream.nextChunk()).toBeNull(); - expect(stream.streamDone).toEqual(true); }); diff --git a/libs/langchain-google-common/src/types-anthropic.ts b/libs/langchain-google-common/src/types-anthropic.ts new file mode 100644 index 000000000000..a4c182e09f39 --- /dev/null +++ b/libs/langchain-google-common/src/types-anthropic.ts @@ -0,0 +1,237 @@ +export interface AnthropicCacheControl { + type: "ephemeral" | string; +} + +interface AnthropicMessageContentBase { + type: string; + cache_control?: AnthropicCacheControl | null; +} + +export interface AnthropicMessageContentText + extends AnthropicMessageContentBase { + type: "text"; + text: string; +} + +export interface AnthropicMessageContentImage + extends AnthropicMessageContentBase { + type: "image"; + source: { + type: "base64" | string; + media_type: string; + data: string; + }; +} + +// TODO: Define this +export type AnthropicMessageContentToolUseInput = object; + +export interface AnthropicMessageContentToolUse + extends AnthropicMessageContentBase { + type: "tool_use"; + id: string; + name: string; + input: AnthropicMessageContentToolUseInput; +} + +export type AnthropicMessageContentToolResultContent = + | AnthropicMessageContentText + | AnthropicMessageContentImage; + +export interface AnthropicMessageContentToolResult + extends AnthropicMessageContentBase { + type: "tool_result"; + tool_use_id: string; + is_error?: boolean; + content: string | AnthropicMessageContentToolResultContent[]; +} + +export type AnthropicMessageContent = + | AnthropicMessageContentText + | AnthropicMessageContentImage + | AnthropicMessageContentToolUse + | AnthropicMessageContentToolResult; + +export interface AnthropicMessage { + role: string; + content: string | AnthropicMessageContent[]; +} + +export interface AnthropicMetadata { + user_id?: string | null; +} + +interface AnthropicToolChoiceBase { + type: string; +} + +export interface AnthropicToolChoiceAuto extends AnthropicToolChoiceBase { + type: "auto"; +} + +export interface AnthropicToolChoiceAny extends AnthropicToolChoiceBase { + type: "any"; +} + +export interface AnthropicToolChoiceTool extends AnthropicToolChoiceBase { + type: "tool"; + name: string; +} + +export type AnthropicToolChoice = + | AnthropicToolChoiceAuto + | AnthropicToolChoiceAny + | AnthropicToolChoiceTool; + +// TODO: Define this +export type AnthropicToolInputSchema = object; + +export interface AnthropicTool { + type?: string; // Just available on tools 20241022 and later? + name: string; + description?: string; + cache_control?: AnthropicCacheControl; + input_schema: AnthropicToolInputSchema; +} + +export interface AnthropicRequest { + anthropic_version: string; + messages: AnthropicMessage[]; + system?: string; + stream?: boolean; + max_tokens: number; + temperature?: number; + top_k?: number; + top_p?: number; + stop_sequences?: string[]; + metadata?: AnthropicMetadata; + tool_choice?: AnthropicToolChoice; + tools?: AnthropicTool[]; +} + +export type AnthropicRequestSettings = Pick< + AnthropicRequest, + "max_tokens" | "temperature" | "top_k" | "top_p" | "stop_sequences" | "stream" +>; + +export interface AnthropicContentText { + type: "text"; + text: string; +} + +export interface AnthropicContentToolUse { + type: "tool_use"; + id: string; + name: string; + input: object; +} + +export type AnthropicContent = AnthropicContentText | AnthropicContentToolUse; + +export interface AnthropicUsage { + input_tokens: number; + output_tokens: number; + cache_creation_input_tokens: number | null; + cache_creation_output_tokens: number | null; +} + +export type AnthropicResponseData = + | AnthropicResponseMessage + | AnthropicStreamBaseEvent; + +export interface AnthropicResponseMessage { + id: string; + type: string; + role: string; + content: AnthropicContent[]; + model: string; + stop_reason: string | null; + stop_sequence: string | null; + usage: AnthropicUsage; +} + +export interface AnthropicAPIConfig { + version?: string; +} + +export type AnthropicStreamEventType = + | "message_start" + | "content_block_start" + | "content_block_delta" + | "content_block_stop" + | "message_delta" + | "message_stop" + | "ping" + | "error"; + +export type AnthropicStreamDeltaType = "text_delta" | "input_json_delta"; + +export interface AnthropicStreamBaseEvent { + type: AnthropicStreamEventType; +} + +export interface AnthropicStreamMessageStartEvent + extends AnthropicStreamBaseEvent { + type: "message_start"; + message: AnthropicResponseMessage; +} + +export interface AnthropicStreamContentBlockStartEvent + extends AnthropicStreamBaseEvent { + type: "content_block_start"; + index: number; + content_block: AnthropicContent; +} + +export interface AnthropicStreamBaseDelta { + type: AnthropicStreamDeltaType; +} + +export interface AnthropicStreamTextDelta extends AnthropicStreamBaseDelta { + type: "text_delta"; + text: string; +} + +export interface AnthropicStreamInputJsonDelta + extends AnthropicStreamBaseDelta { + type: "input_json_delta"; + partial_json: string; +} + +export type AnthropicStreamDelta = + | AnthropicStreamTextDelta + | AnthropicStreamInputJsonDelta; + +export interface AnthropicStreamContentBlockDeltaEvent + extends AnthropicStreamBaseEvent { + type: "content_block_delta"; + index: number; + delta: AnthropicStreamDelta; +} + +export interface AnthropicStreamContentBlockStopEvent + extends AnthropicStreamBaseEvent { + type: "content_block_stop"; + index: number; +} + +export interface AnthropicStreamMessageDeltaEvent + extends AnthropicStreamBaseEvent { + type: "message_delta"; + delta: Partial; +} + +export interface AnthropicStreamMessageStopEvent + extends AnthropicStreamBaseEvent { + type: "message_stop"; +} + +export interface AnthropicStreamPingEvent extends AnthropicStreamBaseEvent { + type: "ping"; +} + +export interface AnthropicStreamErrorEvent extends AnthropicStreamBaseEvent { + type: "error"; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + error: any; +} diff --git a/libs/langchain-google-common/src/types.ts b/libs/langchain-google-common/src/types.ts index 4fecd254693b..bb49cf2edd4f 100644 --- a/libs/langchain-google-common/src/types.ts +++ b/libs/langchain-google-common/src/types.ts @@ -3,8 +3,20 @@ import type { BaseChatModelCallOptions, BindToolsInput, } from "@langchain/core/language_models/chat_models"; +import { + BaseMessage, + BaseMessageChunk, + MessageContent, +} from "@langchain/core/messages"; +import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs"; import type { JsonStream } from "./utils/stream.js"; import { MediaManager } from "./experimental/utils/media_core.js"; +import { + AnthropicResponseData, + AnthropicAPIConfig, +} from "./types-anthropic.js"; + +export * from "./types-anthropic.js"; /** * Parameters needed to setup the client connection. @@ -45,10 +57,68 @@ export interface GoogleConnectionParams platformType?: GooglePlatformType; } +export const GoogleAISafetyCategory = { + Harassment: "HARM_CATEGORY_HARASSMENT", + HARASSMENT: "HARM_CATEGORY_HARASSMENT", + HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT", + + HateSpeech: "HARM_CATEGORY_HATE_SPEECH", + HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH", + HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH", + + SexuallyExplicit: "HARM_CATEGORY_SEXUALLY_EXPLICIT", + SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT", + HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT", + + Dangerous: "HARM_CATEGORY_DANGEROUS", + DANGEROUS: "HARM_CATEGORY_DANGEROUS", + HARM_CATEGORY_DANGEROUS: "HARM_CATEGORY_DANGEROUS", + + CivicIntegrity: "HARM_CATEGORY_CIVIC_INTEGRITY", + CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY", + HARM_CATEGORY_CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY", +} as const; + +export type GoogleAISafetyCategory = + (typeof GoogleAISafetyCategory)[keyof typeof GoogleAISafetyCategory]; + +export const GoogleAISafetyThreshold = { + None: "BLOCK_NONE", + NONE: "BLOCK_NONE", + BLOCK_NONE: "BLOCK_NONE", + + Few: "BLOCK_ONLY_HIGH", + FEW: "BLOCK_ONLY_HIGH", + BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH", + + Some: "BLOCK_MEDIUM_AND_ABOVE", + SOME: "BLOCK_MEDIUM_AND_ABOVE", + BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE", + + Most: "BLOCK_LOW_AND_ABOVE", + MOST: "BLOCK_LOW_AND_ABOVE", + BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE", + + Off: "OFF", + OFF: "OFF", + BLOCK_OFF: "OFF", +} as const; + +export type GoogleAISafetyThreshold = + (typeof GoogleAISafetyThreshold)[keyof typeof GoogleAISafetyThreshold]; + +export const GoogleAISafetyMethod = { + Severity: "SEVERITY", + Probability: "PROBABILITY", +} as const; + +export type GoogleAISafetyMethod = + (typeof GoogleAISafetyMethod)[keyof typeof GoogleAISafetyMethod]; + export interface GoogleAISafetySetting { - category: string; - threshold: string; - method?: string; + category: GoogleAISafetyCategory | string; + threshold: GoogleAISafetyThreshold | string; + method?: GoogleAISafetyMethod | string; // Just for Vertex AI? } export type GoogleAIResponseMimeType = "text/plain" | "application/json"; @@ -149,7 +219,7 @@ export interface GoogleAIBaseLLMInput GoogleConnectionParams, GoogleAIModelParams, GoogleAISafetyParams, - GeminiAPIConfig {} + GoogleAIAPIParams {} export interface GoogleAIBaseLanguageModelCallOptions extends BaseChatModelCallOptions, @@ -314,13 +384,15 @@ export interface GenerateContentResponseData { export type GoogleLLMModelFamily = null | "palm" | "gemini"; +export type VertexModelFamily = GoogleLLMModelFamily | "claude"; + export type GoogleLLMResponseData = | JsonStream | GenerateContentResponseData | GenerateContentResponseData[]; export interface GoogleLLMResponse extends GoogleResponse { - data: GoogleLLMResponseData; + data: GoogleLLMResponseData | AnthropicResponseData; } export interface GoogleAISafetyHandler { @@ -348,6 +420,42 @@ export interface GeminiJsonSchemaDirty extends GeminiJsonSchema { additionalProperties?: boolean; } +export type GoogleAIAPI = { + messageContentToParts?: (content: MessageContent) => Promise; + + baseMessageToContent?: ( + message: BaseMessage, + prevMessage: BaseMessage | undefined, + useSystemInstruction: boolean + ) => Promise; + + responseToString: (response: GoogleLLMResponse) => string; + + responseToChatGeneration: ( + response: GoogleLLMResponse + ) => ChatGenerationChunk | null; + + chunkToString: (chunk: BaseMessageChunk) => string; + + responseToBaseMessage: (response: GoogleLLMResponse) => BaseMessage; + + responseToChatResult: (response: GoogleLLMResponse) => ChatResult; + + formatData: ( + input: unknown, + parameters: GoogleAIModelRequestParams + ) => Promise; +}; + export interface GeminiAPIConfig { + safetyHandler?: GoogleAISafetyHandler; mediaManager?: MediaManager; + useSystemInstruction?: boolean; +} + +export type GoogleAIAPIConfig = GeminiAPIConfig | AnthropicAPIConfig; + +export interface GoogleAIAPIParams { + apiName?: string; + apiConfig?: GoogleAIAPIConfig; } diff --git a/libs/langchain-google-common/src/utils/anthropic.ts b/libs/langchain-google-common/src/utils/anthropic.ts new file mode 100644 index 000000000000..72e1f9e57080 --- /dev/null +++ b/libs/langchain-google-common/src/utils/anthropic.ts @@ -0,0 +1,719 @@ +import { + ChatGeneration, + ChatGenerationChunk, + ChatResult, +} from "@langchain/core/outputs"; +import { + BaseMessage, + BaseMessageChunk, + AIMessageChunk, + MessageContentComplex, + MessageContentText, + MessageContent, + MessageContentImageUrl, + AIMessageFields, + AIMessageChunkFields, +} from "@langchain/core/messages"; +import { + ToolCall, + ToolCallChunk, + ToolMessage, +} from "@langchain/core/messages/tool"; +import { + AnthropicAPIConfig, + AnthropicContent, + AnthropicContentText, + AnthropicContentToolUse, + AnthropicMessage, + AnthropicMessageContent, + AnthropicMessageContentImage, + AnthropicMessageContentText, + AnthropicMessageContentToolResult, + AnthropicMessageContentToolResultContent, + AnthropicRequest, + AnthropicRequestSettings, + AnthropicResponseData, + AnthropicResponseMessage, + AnthropicStreamContentBlockDeltaEvent, + AnthropicStreamContentBlockStartEvent, + AnthropicStreamInputJsonDelta, + AnthropicStreamMessageDeltaEvent, + AnthropicStreamMessageStartEvent, + AnthropicStreamTextDelta, + AnthropicTool, + AnthropicToolChoice, + GeminiTool, + GoogleAIAPI, + GoogleAIModelParams, + GoogleAIModelRequestParams, + GoogleAIToolType, + GoogleLLMResponse, +} from "../types.js"; + +export function getAnthropicAPI(config?: AnthropicAPIConfig): GoogleAIAPI { + function partToString(part: AnthropicContent): string { + return "text" in part ? part.text : ""; + } + + function messageToString(message: AnthropicResponseMessage): string { + const content: AnthropicContent[] = message?.content ?? []; + const ret = content.reduce((acc, part) => { + const str = partToString(part); + return acc + str; + }, ""); + return ret; + } + + function responseToString(response: GoogleLLMResponse): string { + const data = response.data as AnthropicResponseData; + switch (data?.type) { + case "message": + return messageToString(data as AnthropicResponseMessage); + default: + throw Error(`Unknown type: ${data?.type}`); + } + } + + /** + * Normalize the AIMessageChunk. + * If the fields are just a string - use that as content. + * If the content is an array of just text fields, turn them into a string. + * @param fields + */ + function newAIMessageChunk(fields: string | AIMessageFields): AIMessageChunk { + if (typeof fields === "string") { + return new AIMessageChunk(fields); + } + const ret: AIMessageFields = { + ...fields, + }; + + if (Array.isArray(fields?.content)) { + let str: string | undefined = ""; + fields.content.forEach((val) => { + if (str !== undefined && val.type === "text") { + str = `${str}${val.text}`; + } else { + str = undefined; + } + }); + if (str) { + ret.content = str; + } + } + + return new AIMessageChunk(ret); + } + + function textContentToMessageFields( + textContent: AnthropicContentText + ): AIMessageFields { + return { + content: [textContent], + }; + } + + function toolUseContentToMessageFields( + toolUseContent: AnthropicContentToolUse + ): AIMessageFields { + const tool: ToolCall = { + id: toolUseContent.id, + name: toolUseContent.name, + type: "tool_call", + args: toolUseContent.input, + }; + return { + content: [], + tool_calls: [tool], + }; + } + + function anthropicContentToMessageFields( + anthropicContent: AnthropicContent + ): AIMessageFields | undefined { + const type = anthropicContent?.type; + switch (type) { + case "text": + return textContentToMessageFields(anthropicContent); + case "tool_use": + return toolUseContentToMessageFields(anthropicContent); + default: + return undefined; + } + } + + function contentToMessage( + anthropicContent: AnthropicContent[] + ): BaseMessageChunk { + const complexContent: MessageContentComplex[] = []; + const toolCalls: ToolCall[] = []; + anthropicContent.forEach((ac) => { + const messageFields = anthropicContentToMessageFields(ac); + if (messageFields?.content) { + complexContent.push( + ...(messageFields.content as MessageContentComplex[]) + ); + } + if (messageFields?.tool_calls) { + toolCalls.push(...messageFields.tool_calls); + } + }); + + const ret: AIMessageFields = { + content: complexContent, + tool_calls: toolCalls, + }; + return newAIMessageChunk(ret); + } + + function messageToGenerationInfo(message: AnthropicResponseMessage) { + const usage = message?.usage; + const usageMetadata: Record = { + input_tokens: usage?.input_tokens ?? 0, + output_tokens: usage?.output_tokens ?? 0, + total_tokens: (usage?.input_tokens ?? 0) + (usage?.output_tokens ?? 0), + }; + return { + usage_metadata: usageMetadata, + finish_reason: message.stop_reason, + }; + } + + function messageToChatGeneration( + responseMessage: AnthropicResponseMessage + ): ChatGenerationChunk { + const content: AnthropicContent[] = responseMessage?.content ?? []; + const text = messageToString(responseMessage); + const message = contentToMessage(content); + const generationInfo = messageToGenerationInfo(responseMessage); + return new ChatGenerationChunk({ + text, + message, + generationInfo, + }); + } + + function messageStartToChatGeneration( + event: AnthropicStreamMessageStartEvent + ): ChatGenerationChunk { + const responseMessage = event.message; + return messageToChatGeneration(responseMessage); + } + + function messageDeltaToChatGeneration( + event: AnthropicStreamMessageDeltaEvent + ): ChatGenerationChunk { + const responseMessage = event.delta; + return messageToChatGeneration(responseMessage as AnthropicResponseMessage); + } + + function contentBlockStartTextToChatGeneration( + event: AnthropicStreamContentBlockStartEvent + ): ChatGenerationChunk | null { + const content = event.content_block; + const message = contentToMessage([content]); + if (!message) { + return null; + } + + const text = "text" in content ? content.text : ""; + return new ChatGenerationChunk({ + message, + text, + }); + } + + function contentBlockStartToolUseToChatGeneration( + event: AnthropicStreamContentBlockStartEvent + ): ChatGenerationChunk | null { + const contentBlock = event.content_block as AnthropicContentToolUse; + const text: string = ""; + const toolChunk: ToolCallChunk = { + type: "tool_call_chunk", + index: event.index, + name: contentBlock.name, + id: contentBlock.id, + }; + if ( + typeof contentBlock.input === "object" && + Object.keys(contentBlock.input).length > 0 + ) { + toolChunk.args = JSON.stringify(contentBlock.input); + } + const toolChunks: ToolCallChunk[] = [toolChunk]; + + const content: MessageContentComplex[] = [ + { + index: event.index, + ...contentBlock, + }, + ]; + const messageFields: AIMessageChunkFields = { + content, + tool_call_chunks: toolChunks, + }; + const message = newAIMessageChunk(messageFields); + return new ChatGenerationChunk({ + message, + text, + }); + } + + function contentBlockStartToChatGeneration( + event: AnthropicStreamContentBlockStartEvent + ): ChatGenerationChunk | null { + switch (event.content_block.type) { + case "text": + return contentBlockStartTextToChatGeneration(event); + case "tool_use": + return contentBlockStartToolUseToChatGeneration(event); + default: + console.warn( + `Unexpected start content_block type: ${JSON.stringify(event)}` + ); + return null; + } + } + + function contentBlockDeltaTextToChatGeneration( + event: AnthropicStreamContentBlockDeltaEvent + ): ChatGenerationChunk { + const delta = event.delta as AnthropicStreamTextDelta; + const text = delta?.text; + const message = newAIMessageChunk(text); + return new ChatGenerationChunk({ + message, + text, + }); + } + + function contentBlockDeltaInputJsonDeltaToChatGeneration( + event: AnthropicStreamContentBlockDeltaEvent + ): ChatGenerationChunk { + const delta = event.delta as AnthropicStreamInputJsonDelta; + const text: string = ""; + const toolChunks: ToolCallChunk[] = [ + { + index: event.index, + args: delta.partial_json, + }, + ]; + const content: MessageContentComplex[] = [ + { + index: event.index, + ...delta, + }, + ]; + const messageFields: AIMessageChunkFields = { + content, + tool_call_chunks: toolChunks, + }; + const message = newAIMessageChunk(messageFields); + return new ChatGenerationChunk({ + message, + text, + }); + } + + function contentBlockDeltaToChatGeneration( + event: AnthropicStreamContentBlockDeltaEvent + ): ChatGenerationChunk | null { + switch (event.delta.type) { + case "text_delta": + return contentBlockDeltaTextToChatGeneration(event); + case "input_json_delta": + return contentBlockDeltaInputJsonDeltaToChatGeneration(event); + default: + console.warn( + `Unexpected delta content_block type: ${JSON.stringify(event)}` + ); + return null; + } + } + + function responseToChatGeneration( + response: GoogleLLMResponse + ): ChatGenerationChunk | null { + const data = response.data as AnthropicResponseData; + switch (data.type) { + case "message": + return messageToChatGeneration(data as AnthropicResponseMessage); + case "message_start": + return messageStartToChatGeneration( + data as AnthropicStreamMessageStartEvent + ); + case "message_delta": + return messageDeltaToChatGeneration( + data as AnthropicStreamMessageDeltaEvent + ); + case "content_block_start": + return contentBlockStartToChatGeneration( + data as AnthropicStreamContentBlockStartEvent + ); + case "content_block_delta": + return contentBlockDeltaToChatGeneration( + data as AnthropicStreamContentBlockDeltaEvent + ); + + case "ping": + case "message_stop": + case "content_block_stop": + // These are ignorable + return null; + + case "error": + throw new Error( + `Error while streaming results: ${JSON.stringify(data)}` + ); + + default: + // We don't know what type this is, but Anthropic may have added + // new ones without telling us. Don't error, but don't use them. + console.warn("Unknown data for responseToChatGeneration", data); + // throw new Error(`Unknown response type: ${data.type}`); + return null; + } + } + + function chunkToString(chunk: BaseMessageChunk): string { + if (chunk === null) { + return ""; + } else if (typeof chunk.content === "string") { + return chunk.content; + } else if (chunk.content.length === 0) { + return ""; + } else if (chunk.content[0].type === "text") { + return chunk.content[0].text; + } else { + throw new Error(`Unexpected chunk: ${chunk}`); + } + } + + function responseToBaseMessage(response: GoogleLLMResponse): BaseMessage { + const data = response.data as AnthropicResponseMessage; + const content: AnthropicContent[] = data?.content ?? []; + return contentToMessage(content); + } + + function responseToChatResult(response: GoogleLLMResponse): ChatResult { + const message = response.data as AnthropicResponseMessage; + const generations: ChatGeneration[] = []; + const gen = responseToChatGeneration(response); + if (gen) { + generations.push(gen); + } + const llmOutput = messageToGenerationInfo(message); + return { + generations, + llmOutput, + }; + } + + function formatAnthropicVersion(): string { + return config?.version ?? "vertex-2023-10-16"; + } + + function textContentToAnthropicContent( + content: MessageContentText + ): AnthropicMessageContentText { + return content; + } + + function extractMimeType( + str: string + ): { media_type: string; data: string } | null { + if (str.startsWith("data:")) { + return { + media_type: str.split(":")[1].split(";")[0], + data: str.split(",")[1], + }; + } + return null; + } + + function imageContentToAnthropicContent( + content: MessageContentImageUrl + ): AnthropicMessageContentImage | undefined { + const dataUrl = content.image_url; + const url = typeof dataUrl === "string" ? dataUrl : dataUrl?.url; + const urlInfo = extractMimeType(url); + + if (!urlInfo) { + return undefined; + } + + return { + type: "image", + source: { + type: "base64", + ...urlInfo, + }, + }; + } + + function contentComplexToAnthropicContent( + content: MessageContentComplex + ): AnthropicMessageContent | undefined { + const type = content?.type; + switch (type) { + case "text": + return textContentToAnthropicContent(content as MessageContentText); + case "image_url": + return imageContentToAnthropicContent( + content as MessageContentImageUrl + ); + default: + console.warn(`Unexpected content type: ${type}`); + return undefined; + } + } + + function contentToAnthropicContent( + content: MessageContent + ): AnthropicMessageContent[] { + const ret: AnthropicMessageContent[] = []; + + const ca = + typeof content === "string" ? [{ type: "text", text: content }] : content; + ca.forEach((complex) => { + const ac = contentComplexToAnthropicContent(complex); + if (ac) { + ret.push(ac); + } + }); + + return ret; + } + + function baseRoleToAnthropicMessage( + base: BaseMessage, + role: string + ): AnthropicMessage { + const content = contentToAnthropicContent(base.content); + return { + role, + content, + }; + } + + function toolMessageToAnthropicMessage(base: ToolMessage): AnthropicMessage { + const role = "user"; + const toolUseId = base.tool_call_id; + const toolContent = contentToAnthropicContent( + base.content + ) as AnthropicMessageContentToolResultContent[]; + const content: AnthropicMessageContentToolResult[] = [ + { + type: "tool_result", + tool_use_id: toolUseId, + content: toolContent, + }, + ]; + return { + role, + content, + }; + } + + function baseToAnthropicMessage( + base: BaseMessage + ): AnthropicMessage | undefined { + const type = base.getType(); + switch (type) { + case "human": + return baseRoleToAnthropicMessage(base, "user"); + case "ai": + return baseRoleToAnthropicMessage(base, "assistant"); + case "tool": + return toolMessageToAnthropicMessage(base as ToolMessage); + default: + return undefined; + } + } + + function formatMessages(input: BaseMessage[]): AnthropicMessage[] { + const ret: AnthropicMessage[] = []; + + input.forEach((baseMessage) => { + const anthropicMessage = baseToAnthropicMessage(baseMessage); + if (anthropicMessage) { + ret.push(anthropicMessage); + } + }); + + return ret; + } + + function formatSettings( + parameters: GoogleAIModelRequestParams + ): AnthropicRequestSettings { + const ret: AnthropicRequestSettings = { + stream: parameters?.streaming ?? false, + max_tokens: parameters?.maxOutputTokens ?? 8192, + }; + + if (parameters.topP) { + ret.top_p = parameters.topP; + } + if (parameters.topK) { + ret.top_k = parameters.topK; + } + if (parameters.temperature) { + ret.temperature = parameters.temperature; + } + if (parameters.stopSequences) { + ret.stop_sequences = parameters.stopSequences; + } + + return ret; + } + + function contentComplexArrayToText( + contentArray: MessageContentComplex[] + ): string { + let ret = ""; + + contentArray.forEach((content) => { + const contentType = content?.type; + if (contentType === "text") { + const textContent = content as MessageContentText; + ret = `${ret}\n${textContent.text}`; + } + }); + + return ret; + } + + function formatSystem(input: BaseMessage[]): string { + let ret = ""; + + input.forEach((message) => { + if (message._getType() === "system") { + const content = message?.content; + const contentString = + typeof content === "string" + ? (content as string) + : contentComplexArrayToText(content as MessageContentComplex[]); + ret = `${ret}\n${contentString}`; + } + }); + + return ret; + } + + function formatGeminiTool(tool: GeminiTool): AnthropicTool[] { + if (Object.hasOwn(tool, "functionDeclarations")) { + const funcs = tool?.functionDeclarations ?? []; + return funcs.map((func) => { + const inputSchema = func.parameters!; + return { + // type: "tool", // This may only be valid for models 20241022+ + name: func.name, + description: func.description, + input_schema: inputSchema, + }; + }); + } else { + console.warn( + `Unable to format GeminiTool: ${JSON.stringify(tool, null, 1)}` + ); + return []; + } + } + + function formatTool(tool: GoogleAIToolType): AnthropicTool[] { + if (Object.hasOwn(tool, "name")) { + return [tool as AnthropicTool]; + } else { + return formatGeminiTool(tool as GeminiTool); + } + } + + function formatTools( + parameters: GoogleAIModelRequestParams + ): AnthropicTool[] { + const tools: GoogleAIToolType[] = parameters?.tools ?? []; + const ret: AnthropicTool[] = []; + tools.forEach((tool) => { + const anthropicTools = formatTool(tool); + anthropicTools.forEach((anthropicTool) => { + if (anthropicTool) { + ret.push(anthropicTool); + } + }); + }); + return ret; + } + + function formatToolChoice( + parameters: GoogleAIModelRequestParams + ): AnthropicToolChoice | undefined { + const choice = parameters?.tool_choice; + if (!choice) { + return undefined; + } else if (typeof choice === "object") { + return choice as AnthropicToolChoice; + } else { + switch (choice) { + case "any": + case "auto": + return { + type: choice, + }; + case "none": + return undefined; + default: + return { + type: "tool", + name: choice, + }; + } + } + } + + async function formatData( + input: unknown, + parameters: GoogleAIModelRequestParams + ): Promise { + const typedInput = input as BaseMessage[]; + const anthropicVersion = formatAnthropicVersion(); + const messages = formatMessages(typedInput); + const settings = formatSettings(parameters); + const system = formatSystem(typedInput); + const tools = formatTools(parameters); + const toolChoice = formatToolChoice(parameters); + const ret: AnthropicRequest = { + anthropic_version: anthropicVersion, + messages, + ...settings, + }; + if (tools && tools.length && parameters?.tool_choice !== "none") { + ret.tools = tools; + } + if (toolChoice) { + ret.tool_choice = toolChoice; + } + if (system?.length) { + ret.system = system; + } + + return ret; + } + + return { + responseToString, + responseToChatGeneration, + chunkToString, + responseToBaseMessage, + responseToChatResult, + formatData, + }; +} + +export function validateClaudeParams(_params: GoogleAIModelParams): void { + // FIXME - validate the parameters +} + +export function isModelClaude(modelName: string): boolean { + return modelName.toLowerCase().startsWith("claude"); +} diff --git a/libs/langchain-google-common/src/utils/common.ts b/libs/langchain-google-common/src/utils/common.ts index b3aa2cba7b4b..bf8ddb228382 100644 --- a/libs/langchain-google-common/src/utils/common.ts +++ b/libs/langchain-google-common/src/utils/common.ts @@ -9,12 +9,13 @@ import type { GoogleAIModelParams, GoogleAIModelRequestParams, GoogleAIToolType, - GoogleLLMModelFamily, + VertexModelFamily, } from "../types.js"; import { jsonSchemaToGeminiParameters, zodToGeminiParameters, } from "./zod_to_gemini_parameters.js"; +import { isModelClaude, validateClaudeParams } from "./anthropic.js"; export function copyAIModelParams( params: GoogleAIModelParams | undefined, @@ -143,16 +144,33 @@ export function copyAIModelParamsInto( export function modelToFamily( modelName: string | undefined -): GoogleLLMModelFamily { +): VertexModelFamily { if (!modelName) { return null; } else if (isModelGemini(modelName)) { return "gemini"; + } else if (isModelClaude(modelName)) { + return "claude"; } else { return null; } } +export function modelToPublisher(modelName: string | undefined): string { + const family = modelToFamily(modelName); + switch (family) { + case "gemini": + case "palm": + return "google"; + + case "claude": + return "anthropic"; + + default: + return "unknown"; + } +} + export function validateModelParams( params: GoogleAIModelParams | undefined ): void { @@ -161,6 +179,10 @@ export function validateModelParams( switch (modelToFamily(model)) { case "gemini": return validateGeminiParams(testParams); + + case "claude": + return validateClaudeParams(testParams); + default: throw new Error( `Unable to verify model params: ${JSON.stringify(params)}` diff --git a/libs/langchain-google-common/src/utils/gemini.ts b/libs/langchain-google-common/src/utils/gemini.ts index 472f4c5725d8..cc8e994efec6 100644 --- a/libs/langchain-google-common/src/utils/gemini.ts +++ b/libs/langchain-google-common/src/utils/gemini.ts @@ -21,6 +21,8 @@ import { ChatResult, } from "@langchain/core/outputs"; import { ToolCallChunk } from "@langchain/core/messages/tool"; +import { StructuredToolParams } from "@langchain/core/tools"; +import { isLangChainTool } from "@langchain/core/utils/function_calling"; import type { GoogleLLMResponse, GoogleAIModelParams, @@ -33,10 +35,21 @@ import type { GenerateContentResponseData, GoogleAISafetyHandler, GeminiPartFunctionCall, + GoogleAIAPI, GeminiAPIConfig, } from "../types.js"; import { GoogleAISafetyError } from "./safety.js"; import { MediaBlob } from "../experimental/utils/media_core.js"; +import { + GeminiFunctionDeclaration, + GeminiGenerationConfig, + GeminiRequest, + GeminiSafetySetting, + GeminiTool, + GoogleAIModelRequestParams, + GoogleAIToolType, +} from "../types.js"; +import { zodToGeminiParameters } from "./zod_to_gemini_parameters.js"; export interface FunctionCall { name: string; @@ -60,6 +73,128 @@ export interface ToolCallRaw { function: FunctionCallRaw; } +export interface DefaultGeminiSafetySettings { + errorFinish?: string[]; +} + +export class DefaultGeminiSafetyHandler implements GoogleAISafetyHandler { + errorFinish = ["SAFETY", "RECITATION", "OTHER"]; + + constructor(settings?: DefaultGeminiSafetySettings) { + this.errorFinish = settings?.errorFinish ?? this.errorFinish; + } + + handleDataPromptFeedback( + response: GoogleLLMResponse, + data: GenerateContentResponseData + ): GenerateContentResponseData { + // Check to see if our prompt was blocked in the first place + const promptFeedback = data?.promptFeedback; + const blockReason = promptFeedback?.blockReason; + if (blockReason) { + throw new GoogleAISafetyError(response, `Prompt blocked: ${blockReason}`); + } + return data; + } + + handleDataFinishReason( + response: GoogleLLMResponse, + data: GenerateContentResponseData + ): GenerateContentResponseData { + const firstCandidate = data?.candidates?.[0]; + const finishReason = firstCandidate?.finishReason; + if (this.errorFinish.includes(finishReason)) { + throw new GoogleAISafetyError(response, `Finish reason: ${finishReason}`); + } + return data; + } + + handleData( + response: GoogleLLMResponse, + data: GenerateContentResponseData + ): GenerateContentResponseData { + let ret = data; + ret = this.handleDataPromptFeedback(response, ret); + ret = this.handleDataFinishReason(response, ret); + return ret; + } + + handle(response: GoogleLLMResponse): GoogleLLMResponse { + let newdata; + + if ("nextChunk" in response.data) { + // TODO: This is a stream. How to handle? + newdata = response.data; + } else if (Array.isArray(response.data)) { + // If it is an array, try to handle every item in the array + try { + newdata = response.data.map((item) => this.handleData(response, item)); + } catch (xx) { + // eslint-disable-next-line no-instanceof/no-instanceof + if (xx instanceof GoogleAISafetyError) { + throw new GoogleAISafetyError(response, xx.message); + } else { + throw xx; + } + } + } else { + const data = response.data as GenerateContentResponseData; + newdata = this.handleData(response, data); + } + + return { + ...response, + data: newdata, + }; + } +} + +export interface MessageGeminiSafetySettings + extends DefaultGeminiSafetySettings { + msg?: string; + forceNewMessage?: boolean; +} + +export class MessageGeminiSafetyHandler extends DefaultGeminiSafetyHandler { + msg: string = ""; + + forceNewMessage = false; + + constructor(settings?: MessageGeminiSafetySettings) { + super(settings); + this.msg = settings?.msg ?? this.msg; + this.forceNewMessage = settings?.forceNewMessage ?? this.forceNewMessage; + } + + setMessage(data: GenerateContentResponseData): GenerateContentResponseData { + const ret = data; + if ( + this.forceNewMessage || + !data?.candidates?.[0]?.content?.parts?.length + ) { + ret.candidates = data.candidates ?? []; + ret.candidates[0] = data.candidates[0] ?? {}; + ret.candidates[0].content = data.candidates[0].content ?? {}; + ret.candidates[0].content = { + role: "model", + parts: [{ text: this.msg }], + }; + } + return ret; + } + + handleData( + response: GoogleLLMResponse, + data: GenerateContentResponseData + ): GenerateContentResponseData { + try { + return super.handleData(response, data); + } catch (xx) { + return this.setMessage(data); + } + } +} + const extractMimeType = ( str: string ): { mimeType: string; data: string } | null => { @@ -72,7 +207,7 @@ const extractMimeType = ( return null; }; -export function getGeminiAPI(config?: GeminiAPIConfig) { +export function getGeminiAPI(config?: GeminiAPIConfig): GoogleAIAPI { function messageContentText( content: MessageContentText ): GeminiPartText | null { @@ -153,7 +288,9 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { } } - throw new Error("Invalid media content"); + throw new Error( + `Invalid media content: ${JSON.stringify(content, null, 1)}` + ); } async function messageContentComplexToPart( @@ -175,7 +312,7 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { return await messageContentMedia(content); default: throw new Error( - `Unsupported type received while converting message to message parts` + `Unsupported type "${content.type}" received while converting message to message parts: ${content}` ); } throw new Error( @@ -282,10 +419,9 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { } async function systemMessageToContent( - message: SystemMessage, - useSystemInstruction: boolean + message: SystemMessage ): Promise { - return useSystemInstruction + return config?.useSystemInstruction ? roleMessageToContent("system", message) : [ ...(await roleMessageToContent("user", message)), @@ -349,16 +485,12 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { async function baseMessageToContent( message: BaseMessage, - prevMessage: BaseMessage | undefined, - useSystemInstruction: boolean + prevMessage: BaseMessage | undefined ): Promise { const type = message._getType(); switch (type) { case "system": - return systemMessageToContent( - message as SystemMessage, - useSystemInstruction - ); + return systemMessageToContent(message as SystemMessage); case "human": return roleMessageToContent("user", message); case "ai": @@ -519,9 +651,10 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { function safeResponseTo( response: GoogleLLMResponse, - safetyHandler: GoogleAISafetyHandler, responseTo: (response: GoogleLLMResponse) => RetType ): RetType { + const safetyHandler = + config?.safetyHandler ?? new DefaultGeminiSafetyHandler(); try { const safeResponse = safetyHandler.handle(response); return responseTo(safeResponse); @@ -535,11 +668,8 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { } } - function safeResponseToString( - response: GoogleLLMResponse, - safetyHandler: GoogleAISafetyHandler - ): string { - return safeResponseTo(response, safetyHandler, responseToString); + function safeResponseToString(response: GoogleLLMResponse): string { + return safeResponseTo(response, responseToString); } function responseToGenerationInfo(response: GoogleLLMResponse) { @@ -575,10 +705,9 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { } function safeResponseToChatGeneration( - response: GoogleLLMResponse, - safetyHandler: GoogleAISafetyHandler + response: GoogleLLMResponse ): ChatGenerationChunk { - return safeResponseTo(response, safetyHandler, responseToChatGeneration); + return safeResponseTo(response, responseToChatGeneration); } function chunkToString(chunk: BaseMessageChunk): string { @@ -724,11 +853,8 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { return new AIMessage(fields); } - function safeResponseToBaseMessage( - response: GoogleLLMResponse, - safetyHandler: GoogleAISafetyHandler - ): BaseMessage { - return safeResponseTo(response, safetyHandler, responseToBaseMessage); + function safeResponseToBaseMessage(response: GoogleLLMResponse): BaseMessage { + return safeResponseTo(response, responseToBaseMessage); } function responseToChatResult(response: GoogleLLMResponse): ChatResult { @@ -739,167 +865,269 @@ export function getGeminiAPI(config?: GeminiAPIConfig) { }; } - function safeResponseToChatResult( - response: GoogleLLMResponse, - safetyHandler: GoogleAISafetyHandler - ): ChatResult { - return safeResponseTo(response, safetyHandler, responseToChatResult); - } - - return { - messageContentToParts, - baseMessageToContent, - safeResponseToString, - safeResponseToChatGeneration, - chunkToString, - safeResponseToBaseMessage, - safeResponseToChatResult, - }; -} - -export function validateGeminiParams(params: GoogleAIModelParams): void { - if (params.maxOutputTokens && params.maxOutputTokens < 0) { - throw new Error("`maxOutputTokens` must be a positive integer"); - } - - if ( - params.temperature && - (params.temperature < 0 || params.temperature > 2) - ) { - throw new Error("`temperature` must be in the range of [0.0,2.0]"); + function safeResponseToChatResult(response: GoogleLLMResponse): ChatResult { + return safeResponseTo(response, responseToChatResult); } - if (params.topP && (params.topP < 0 || params.topP > 1)) { - throw new Error("`topP` must be in the range of [0.0,1.0]"); + function inputType( + input: MessageContent | BaseMessage[] + ): "MessageContent" | "BaseMessageArray" { + if (typeof input === "string") { + return "MessageContent"; + } else { + const firstItem: BaseMessage | MessageContentComplex = input[0]; + if (Object.hasOwn(firstItem, "content")) { + return "BaseMessageArray"; + } else { + return "MessageContent"; + } + } } - if (params.topK && params.topK < 0) { - throw new Error("`topK` must be a positive integer"); + async function formatMessageContents( + input: MessageContent, + _parameters: GoogleAIModelParams + ): Promise { + const parts = await messageContentToParts!(input); + const contents: GeminiContent[] = [ + { + role: "user", // Required by Vertex AI + parts, + }, + ]; + return contents; } -} -export function isModelGemini(modelName: string): boolean { - return modelName.toLowerCase().startsWith("gemini"); -} + async function formatBaseMessageContents( + input: BaseMessage[], + _parameters: GoogleAIModelParams + ): Promise { + const inputPromises: Promise[] = input.map((msg, i) => + baseMessageToContent!(msg, input[i - 1]) + ); + const inputs = await Promise.all(inputPromises); -export interface DefaultGeminiSafetySettings { - errorFinish?: string[]; -} + return inputs.reduce((acc, cur) => { + // Filter out the system content + if (cur.every((content) => content.role === "system")) { + return acc; + } -export class DefaultGeminiSafetyHandler implements GoogleAISafetyHandler { - errorFinish = ["SAFETY", "RECITATION", "OTHER"]; + // Combine adjacent function messages + if ( + cur[0]?.role === "function" && + acc.length > 0 && + acc[acc.length - 1].role === "function" + ) { + acc[acc.length - 1].parts = [ + ...acc[acc.length - 1].parts, + ...cur[0].parts, + ]; + } else { + acc.push(...cur); + } - constructor(settings?: DefaultGeminiSafetySettings) { - this.errorFinish = settings?.errorFinish ?? this.errorFinish; + return acc; + }, [] as GeminiContent[]); } - handleDataPromptFeedback( - response: GoogleLLMResponse, - data: GenerateContentResponseData - ): GenerateContentResponseData { - // Check to see if our prompt was blocked in the first place - const promptFeedback = data?.promptFeedback; - const blockReason = promptFeedback?.blockReason; - if (blockReason) { - throw new GoogleAISafetyError(response, `Prompt blocked: ${blockReason}`); + async function formatContents( + input: MessageContent | BaseMessage[], + parameters: GoogleAIModelRequestParams + ): Promise { + const it = inputType(input); + switch (it) { + case "MessageContent": + return formatMessageContents(input as MessageContent, parameters); + case "BaseMessageArray": + return formatBaseMessageContents(input as BaseMessage[], parameters); + default: + throw new Error(`Unknown input type "${it}": ${input}`); } - return data; } - handleDataFinishReason( - response: GoogleLLMResponse, - data: GenerateContentResponseData - ): GenerateContentResponseData { - const firstCandidate = data?.candidates?.[0]; - const finishReason = firstCandidate?.finishReason; - if (this.errorFinish.includes(finishReason)) { - throw new GoogleAISafetyError(response, `Finish reason: ${finishReason}`); - } - return data; + function formatGenerationConfig( + parameters: GoogleAIModelRequestParams + ): GeminiGenerationConfig { + return { + temperature: parameters.temperature, + topK: parameters.topK, + topP: parameters.topP, + maxOutputTokens: parameters.maxOutputTokens, + stopSequences: parameters.stopSequences, + responseMimeType: parameters.responseMimeType, + }; } - handleData( - response: GoogleLLMResponse, - data: GenerateContentResponseData - ): GenerateContentResponseData { - let ret = data; - ret = this.handleDataPromptFeedback(response, ret); - ret = this.handleDataFinishReason(response, ret); - return ret; + function formatSafetySettings( + parameters: GoogleAIModelRequestParams + ): GeminiSafetySetting[] { + return parameters.safetySettings ?? []; } - handle(response: GoogleLLMResponse): GoogleLLMResponse { - let newdata; - - if ("nextChunk" in response.data) { - // TODO: This is a stream. How to handle? - newdata = response.data; - } else if (Array.isArray(response.data)) { - // If it is an array, try to handle every item in the array - try { - newdata = response.data.map((item) => this.handleData(response, item)); - } catch (xx) { - // eslint-disable-next-line no-instanceof/no-instanceof - if (xx instanceof GoogleAISafetyError) { - throw new GoogleAISafetyError(response, xx.message); + async function formatBaseMessageSystemInstruction( + input: BaseMessage[] + ): Promise { + let ret = {} as GeminiContent; + for (let index = 0; index < input.length; index += 1) { + const message = input[index]; + if (message._getType() === "system") { + // For system types, we only want it if it is the first message, + // if it appears anywhere else, it should be an error. + if (index === 0) { + // eslint-disable-next-line prefer-destructuring + ret = (await baseMessageToContent!(message, undefined))[0]; } else { - throw xx; + throw new Error( + "System messages are only permitted as the first passed message." + ); } } - } else { - const data = response.data as GenerateContentResponseData; - newdata = this.handleData(response, data); } + return ret; + } + + async function formatSystemInstruction( + input: MessageContent | BaseMessage[] + ): Promise { + if (!config?.useSystemInstruction) { + return {} as GeminiContent; + } + + const it = inputType(input); + switch (it) { + case "BaseMessageArray": + return formatBaseMessageSystemInstruction(input as BaseMessage[]); + default: + return {} as GeminiContent; + } + } + + function structuredToolToFunctionDeclaration( + tool: StructuredToolParams + ): GeminiFunctionDeclaration { + const jsonSchema = zodToGeminiParameters(tool.schema); return { - ...response, - data: newdata, + name: tool.name, + description: tool.description ?? `A function available to call.`, + parameters: jsonSchema, }; } -} -export interface MessageGeminiSafetySettings - extends DefaultGeminiSafetySettings { - msg?: string; - forceNewMessage?: boolean; -} + function structuredToolsToGeminiTools( + tools: StructuredToolParams[] + ): GeminiTool[] { + return [ + { + functionDeclarations: tools.map(structuredToolToFunctionDeclaration), + }, + ]; + } -export class MessageGeminiSafetyHandler extends DefaultGeminiSafetyHandler { - msg: string = ""; + function formatTools(parameters: GoogleAIModelRequestParams): GeminiTool[] { + const tools: GoogleAIToolType[] | undefined = parameters?.tools; + if (!tools || tools.length === 0) { + return []; + } - forceNewMessage = false; + if (tools.every(isLangChainTool)) { + return structuredToolsToGeminiTools(tools); + } else { + if ( + tools.length === 1 && + (!("functionDeclarations" in tools[0]) || + !tools[0].functionDeclarations?.length) + ) { + return []; + } + return tools as GeminiTool[]; + } + } - constructor(settings?: MessageGeminiSafetySettings) { - super(settings); - this.msg = settings?.msg ?? this.msg; - this.forceNewMessage = settings?.forceNewMessage ?? this.forceNewMessage; + function formatToolConfig( + parameters: GoogleAIModelRequestParams + ): GeminiRequest["toolConfig"] | undefined { + if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") { + return undefined; + } + + return { + functionCallingConfig: { + mode: parameters.tool_choice as "auto" | "any" | "none", + allowedFunctionNames: parameters.allowed_function_names, + }, + }; } - setMessage(data: GenerateContentResponseData): GenerateContentResponseData { - const ret = data; + async function formatData( + input: unknown, + parameters: GoogleAIModelRequestParams + ): Promise { + const typedInput = input as MessageContent | BaseMessage[]; + const contents = await formatContents(typedInput, parameters); + const generationConfig = formatGenerationConfig(parameters); + const tools = formatTools(parameters); + const toolConfig = formatToolConfig(parameters); + const safetySettings = formatSafetySettings(parameters); + const systemInstruction = await formatSystemInstruction(typedInput); + + const ret: GeminiRequest = { + contents, + generationConfig, + }; + if (tools && tools.length) { + ret.tools = tools; + } + if (toolConfig) { + ret.toolConfig = toolConfig; + } + if (safetySettings && safetySettings.length) { + ret.safetySettings = safetySettings; + } if ( - this.forceNewMessage || - !data?.candidates?.[0]?.content?.parts?.length + systemInstruction?.role && + systemInstruction?.parts && + systemInstruction?.parts?.length ) { - ret.candidates = data.candidates ?? []; - ret.candidates[0] = data.candidates[0] ?? {}; - ret.candidates[0].content = data.candidates[0].content ?? {}; - ret.candidates[0].content = { - role: "model", - parts: [{ text: this.msg }], - }; + ret.systemInstruction = systemInstruction; } return ret; } - handleData( - response: GoogleLLMResponse, - data: GenerateContentResponseData - ): GenerateContentResponseData { - try { - return super.handleData(response, data); - } catch (xx) { - return this.setMessage(data); - } + return { + messageContentToParts, + baseMessageToContent, + responseToString: safeResponseToString, + responseToChatGeneration: safeResponseToChatGeneration, + chunkToString, + responseToBaseMessage: safeResponseToBaseMessage, + responseToChatResult: safeResponseToChatResult, + formatData, + }; +} + +export function validateGeminiParams(params: GoogleAIModelParams): void { + if (params.maxOutputTokens && params.maxOutputTokens < 0) { + throw new Error("`maxOutputTokens` must be a positive integer"); + } + + if ( + params.temperature && + (params.temperature < 0 || params.temperature > 2) + ) { + throw new Error("`temperature` must be in the range of [0.0,2.0]"); + } + + if (params.topP && (params.topP < 0 || params.topP > 1)) { + throw new Error("`topP` must be in the range of [0.0,1.0]"); + } + + if (params.topK && params.topK < 0) { + throw new Error("`topK` must be a positive integer"); } } + +export function isModelGemini(modelName: string): boolean { + return modelName.toLowerCase().startsWith("gemini"); +} diff --git a/libs/langchain-google-common/src/utils/stream.ts b/libs/langchain-google-common/src/utils/stream.ts index 226ac49dca10..2a61446864f3 100644 --- a/libs/langchain-google-common/src/utils/stream.ts +++ b/libs/langchain-google-common/src/utils/stream.ts @@ -1,5 +1,34 @@ import { GenerationChunk } from "@langchain/core/outputs"; +export interface AbstractStream { + /** + * Add more text to the buffer + * @param data + */ + appendBuffer(data: string): void; + + /** + * Indicate that there is no more text to be added to the buffer + * (ie - our source material is done) + */ + closeBuffer(): void; + /** + * Get the next chunk that is coming from the stream. + * This chunk may be null, usually indicating the last chunk in the stream. + */ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + nextChunk(): Promise; + + /** + * Is the stream done? + * A stream is only done if all of the following are true: + * - There is no more data to be added to the text buffer + * - There is no more data in the text buffer + * - There are no chunks that are waiting to be consumed + */ + get streamDone(): boolean; +} + export function complexValue(value: unknown): unknown { if (value === null || typeof value === "undefined") { // I dunno what to put here. An error, probably @@ -68,8 +97,7 @@ export function simpleValue(val: unknown): unknown { return val; } } - -export class JsonStream { +export class JsonStream implements AbstractStream { _buffer = ""; _bufferOpen = true; @@ -247,11 +275,13 @@ export class ComplexJsonStream extends JsonStream { } } -export class ReadableJsonStream extends JsonStream { +export class ReadableAbstractStream implements AbstractStream { + private baseStream: AbstractStream; + decoder: TextDecoder; - constructor(body: ReadableStream | null) { - super(); + constructor(baseStream: AbstractStream, body: ReadableStream | null) { + this.baseStream = baseStream; this.decoder = new TextDecoder("utf-8"); if (body) { void this.run(body); @@ -260,6 +290,23 @@ export class ReadableJsonStream extends JsonStream { } } + appendBuffer(data: string): void { + return this.baseStream.appendBuffer(data); + } + + closeBuffer(): void { + return this.baseStream.closeBuffer(); + } + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + nextChunk(): Promise { + return this.baseStream.nextChunk(); + } + + get streamDone(): boolean { + return this.baseStream.streamDone; + } + async run(body: ReadableStream) { const reader = body.getReader(); let isDone = false; @@ -275,3 +322,147 @@ export class ReadableJsonStream extends JsonStream { } } } + +export class ReadableJsonStream extends ReadableAbstractStream { + constructor(body: ReadableStream | null) { + super(new JsonStream(), body); + } +} + +export class SseStream implements AbstractStream { + _buffer = ""; + + _bufferOpen = true; + + appendBuffer(data: string): void { + this._buffer += data; + this._parseBuffer(); + } + + closeBuffer(): void { + this._bufferOpen = false; + this._parseBuffer(); + } + + /** + * Attempt to load an entire event. + * For each entire event we load, + * send them to be handled. + */ + _parseBuffer(): void { + const events = this._buffer.split(/\n\n/); + this._buffer = events.pop() ?? ""; + events.forEach((event) => this._handleEvent(event.trim())); + + if (!this._bufferOpen) { + // No more data will be added, and we have parsed + // everything. So dump the rest. + this._handleEvent(null); + this._buffer = ""; + } + } + + /** + * Given an event string, get all the fields + * in the event. It is assumed there is one field + * per line, but that field names can be duplicated, + * indicating to append the new value to the previous value + * @param event + */ + _parseEvent(event: string | null): Record | null { + if (!event || event.trim() === "") { + return null; + } + const ret: Record = {}; + + const lines = event.split(/\n/); + lines.forEach((line) => { + const match = line.match(/^([^:]+): \s*(.+)\n*$/); + if (match && match.length === 3) { + const key = match[1]; + const val = match[2]; + const cur = ret[key] ?? ""; + ret[key] = `${cur}${val}`; + } + }); + + return ret; + } + + // Set up a potential Promise that the handler can resolve. + // eslint-disable-next-line @typescript-eslint/no-explicit-any + _chunkResolution: (chunk: any) => void; + + // If there is no Promise (it is null), the handler must add it to the queue + // eslint-disable-next-line @typescript-eslint/no-explicit-any + _chunkPending: Promise | null = null; + + // A queue that will collect chunks while there is no Promise + // eslint-disable-next-line @typescript-eslint/no-explicit-any + _chunkQueue: any[] = []; + + _handleEvent(event: string | null): void { + const chunk = this._parseEvent(event); + if (this._chunkPending) { + this._chunkResolution(chunk); + this._chunkPending = null; + } else { + this._chunkQueue.push(chunk); + } + } + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + async nextChunk(): Promise { + if (this._chunkQueue.length > 0) { + // If there is data in the queue, return the next queue chunk + return this._chunkQueue.shift() as Record; + } else { + // Otherwise, set up a promise that handleChunk will cause to be resolved + this._chunkPending = new Promise((resolve) => { + this._chunkResolution = resolve; + }); + return this._chunkPending; + } + } + + get streamDone(): boolean { + return ( + !this._bufferOpen && + this._buffer.length === 0 && + this._chunkQueue.length === 0 && + this._chunkPending === null + ); + } +} + +export class ReadableSseStream extends ReadableAbstractStream { + constructor(body: ReadableStream | null) { + super(new SseStream(), body); + } +} + +export class SseJsonStream extends SseStream { + _jsonAttribute: string = "data"; + + constructor(jsonAttribute?: string) { + super(); + this._jsonAttribute = jsonAttribute ?? this._jsonAttribute; + } + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + async nextChunk(): Promise { + const eventRecord = (await super.nextChunk()) as Record; + const json = eventRecord?.[this._jsonAttribute]; + if (!json) { + return null; + } else { + return JSON.parse(json); + } + } +} + +export class ReadableSseJsonStream extends ReadableAbstractStream { + constructor(body: ReadableStream | null) { + super(new SseJsonStream(), body); + } +} diff --git a/libs/langchain-google-gauth/src/auth.ts b/libs/langchain-google-gauth/src/auth.ts index 21093bcbce42..bb8053b9c521 100644 --- a/libs/langchain-google-gauth/src/auth.ts +++ b/libs/langchain-google-gauth/src/auth.ts @@ -1,16 +1,21 @@ import { Readable } from "stream"; import { + AbstractStream, ensureAuthOptionScopes, GoogleAbstractedClient, GoogleAbstractedClientOps, GoogleConnectionParams, JsonStream, + SseJsonStream, + SseStream, } from "@langchain/google-common"; import { GoogleAuth, GoogleAuthOptions } from "google-auth-library"; -export class NodeJsonStream extends JsonStream { - constructor(data: Readable) { - super(); +export class NodeAbstractStream implements AbstractStream { + private baseStream: AbstractStream; + + constructor(baseStream: AbstractStream, data: Readable) { + this.baseStream = baseStream; const decoder = new TextDecoder("utf-8"); data.on("data", (data) => { const text = decoder.decode(data, { stream: true }); @@ -22,6 +27,41 @@ export class NodeJsonStream extends JsonStream { this.closeBuffer(); }); } + + appendBuffer(data: string): void { + return this.baseStream.appendBuffer(data); + } + + closeBuffer(): void { + return this.baseStream.closeBuffer(); + } + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + nextChunk(): Promise { + return this.baseStream.nextChunk(); + } + + get streamDone(): boolean { + return this.baseStream.streamDone; + } +} + +export class NodeJsonStream extends NodeAbstractStream { + constructor(data: Readable) { + super(new JsonStream(), data); + } +} + +export class NodeSseStream extends NodeAbstractStream { + constructor(data: Readable) { + super(new SseStream(), data); + } +} + +export class NodeSseJsonStream extends NodeAbstractStream { + constructor(data: Readable) { + super(new SseJsonStream(), data); + } } export class GAuthClient implements GoogleAbstractedClient { @@ -47,12 +87,21 @@ export class GAuthClient implements GoogleAbstractedClient { async request(opts: GoogleAbstractedClientOps): Promise { try { const ret = await this.gauth.request(opts); - return opts.responseType !== "stream" - ? ret - : { - ...ret, - data: new NodeJsonStream(ret.data), - }; + const [contentType] = ret?.headers?.["content-type"]?.split(/;/) ?? [""]; + if (opts.responseType !== "stream") { + return ret; + } else if (contentType === "text/event-stream") { + return { + ...ret, + data: new NodeSseJsonStream(ret.data), + }; + } else { + return { + ...ret, + data: new NodeJsonStream(ret.data), + }; + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any } catch (xx: any) { console.error("call to gauth.request", JSON.stringify(xx, null, 2)); diff --git a/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts b/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts index 2bc8f483b13b..0284189ebec4 100644 --- a/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts +++ b/libs/langchain-google-gauth/src/tests/chat_models.int.test.ts @@ -258,7 +258,7 @@ describe("GAuth Chat", () => { actionIfBlobMissing: undefined, }, }); - const canonicalStore = new BlobStoreGoogleCloudStorage({ + const backingStore = new BlobStoreGoogleCloudStorage({ uriPrefix: new GoogleCloudStorageUri("gs://test-langchainjs/mediatest/"), defaultStoreOptions: { actionIfInvalid: "prefixPath", @@ -266,7 +266,7 @@ describe("GAuth Chat", () => { }); const blobStore = new ReadThroughBlobStore({ baseStore: aliasStore, - backingStore: canonicalStore, + backingStore, }); const resolver = new SimpleWebBlobStore(); const mediaManager = new MediaManager({ @@ -275,7 +275,9 @@ describe("GAuth Chat", () => { }); const model = new ChatGoogle({ modelName: "gemini-1.5-flash", - mediaManager, + apiConfig: { + mediaManager, + }, }); const message: MessageContentComplex[] = [ @@ -285,7 +287,7 @@ describe("GAuth Chat", () => { }, { type: "media", - fileUri: "https://js.langchain.com/img/brand/wordmark.png", + fileUri: "https://js.langchain.com/v0.2/img/brand/wordmark.png", }, ]; diff --git a/libs/langchain-google-vertexai/src/tests/chat_models.int.test.ts b/libs/langchain-google-vertexai/src/tests/chat_models.int.test.ts index dcac30321b53..a3b8bbe4b2d8 100644 --- a/libs/langchain-google-vertexai/src/tests/chat_models.int.test.ts +++ b/libs/langchain-google-vertexai/src/tests/chat_models.int.test.ts @@ -1,4 +1,4 @@ -import { test } from "@jest/globals"; +import { expect, test } from "@jest/globals"; import fs from "fs/promises"; import { BaseLanguageModelInput } from "@langchain/core/language_models/base"; import { ChatPromptValue } from "@langchain/core/prompt_values"; @@ -34,12 +34,44 @@ import { MessagesPlaceholder, } from "@langchain/core/prompts"; import { InMemoryStore } from "@langchain/core/stores"; +import { BaseCallbackHandler } from "@langchain/core/callbacks/base"; +import { + GoogleRequestLogger, + GoogleRequestRecorder, +} from "@langchain/google-common"; import { GeminiTool } from "../types.js"; import { ChatVertexAI } from "../chat_models.js"; -describe("GAuth Chat", () => { +const weatherTool = tool((_) => "no-op", { + name: "get_weather", + description: + "Get the weather of a specific location and return the temperature in Celsius.", + schema: z.object({ + location: z.string().describe("The name of city to get the weather for."), + }), +}); + +const calculatorTool = tool((_) => "no-op", { + name: "calculator", + description: "Calculate the result of a math expression.", + schema: z.object({ + expression: z.string().describe("The math expression to calculate."), + }), +}); + +describe("GAuth Gemini Chat", () => { + let recorder: GoogleRequestRecorder; + let callbacks: BaseCallbackHandler[]; + + beforeEach(() => { + recorder = new GoogleRequestRecorder(); + callbacks = [recorder, new GoogleRequestLogger()]; + }); + test("invoke", async () => { - const model = new ChatVertexAI(); + const model = new ChatVertexAI({ + callbacks, + }); const res = await model.invoke("What is 1 + 1?"); expect(res).toBeDefined(); expect(res._getType()).toEqual("ai"); @@ -75,7 +107,9 @@ describe("GAuth Chat", () => { }); test("stream", async () => { - const model = new ChatVertexAI(); + const model = new ChatVertexAI({ + callbacks, + }); const input: BaseLanguageModelInput = new ChatPromptValue([ new SystemMessage( "You will reply to all requests to flip a coin with either H, indicating heads, or T, indicating tails." @@ -225,7 +259,7 @@ describe("GAuth Chat", () => { actionIfBlobMissing: undefined, }, }); - const canonicalStore = new BlobStoreGoogleCloudStorage({ + const backingStore = new BlobStoreGoogleCloudStorage({ uriPrefix: new GoogleCloudStorageUri("gs://test-langchainjs/mediatest/"), defaultStoreOptions: { actionIfInvalid: "prefixPath", @@ -233,7 +267,7 @@ describe("GAuth Chat", () => { }); const blobStore = new ReadThroughBlobStore({ baseStore: aliasStore, - backingStore: canonicalStore, + backingStore, }); const resolver = new SimpleWebBlobStore(); const mediaManager = new MediaManager({ @@ -242,7 +276,9 @@ describe("GAuth Chat", () => { }); const model = new ChatGoogle({ modelName: "gemini-1.5-flash", - mediaManager, + apiConfig: { + mediaManager, + }, }); const message: MessageContentComplex[] = [ @@ -252,7 +288,7 @@ describe("GAuth Chat", () => { }, { type: "media", - fileUri: "https://js.langchain.com/img/brand/wordmark.png", + fileUri: "https://js.langchain.com/v0.2/img/brand/wordmark.png", }, ]; @@ -279,208 +315,305 @@ describe("GAuth Chat", () => { throw e; } }); -}); -test("Stream token count usage_metadata", async () => { - const model = new ChatVertexAI({ - temperature: 0, - maxOutputTokens: 10, - }); - let res: AIMessageChunk | null = null; - for await (const chunk of await model.stream( - "Why is the sky blue? Be concise." - )) { - if (!res) { - res = chunk; - } else { - res = res.concat(chunk); + test("Stream token count usage_metadata", async () => { + const model = new ChatVertexAI({ + temperature: 0, + maxOutputTokens: 10, + }); + let res: AIMessageChunk | null = null; + for await (const chunk of await model.stream( + "Why is the sky blue? Be concise." + )) { + if (!res) { + res = chunk; + } else { + res = res.concat(chunk); + } } - } - // console.log(res); - expect(res?.usage_metadata).toBeDefined(); - if (!res?.usage_metadata) { - return; - } - expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); - expect(res.usage_metadata.output_tokens).toBeGreaterThan(1); - expect(res.usage_metadata.total_tokens).toBe( - res.usage_metadata.input_tokens + res.usage_metadata.output_tokens - ); -}); - -test("streamUsage excludes token usage", async () => { - const model = new ChatVertexAI({ - temperature: 0, - streamUsage: false, - }); - let res: AIMessageChunk | null = null; - for await (const chunk of await model.stream( - "Why is the sky blue? Be concise." - )) { - if (!res) { - res = chunk; - } else { - res = res.concat(chunk); + // console.log(res); + expect(res?.usage_metadata).toBeDefined(); + if (!res?.usage_metadata) { + return; } - } - // console.log(res); - expect(res?.usage_metadata).not.toBeDefined(); -}); + expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); + expect(res.usage_metadata.output_tokens).toBeGreaterThan(1); + expect(res.usage_metadata.total_tokens).toBe( + res.usage_metadata.input_tokens + res.usage_metadata.output_tokens + ); + }); -test("Invoke token count usage_metadata", async () => { - const model = new ChatVertexAI({ - temperature: 0, - maxOutputTokens: 10, + test("streamUsage excludes token usage", async () => { + const model = new ChatVertexAI({ + temperature: 0, + streamUsage: false, + }); + let res: AIMessageChunk | null = null; + for await (const chunk of await model.stream( + "Why is the sky blue? Be concise." + )) { + if (!res) { + res = chunk; + } else { + res = res.concat(chunk); + } + } + // console.log(res); + expect(res?.usage_metadata).not.toBeDefined(); }); - const res = await model.invoke("Why is the sky blue? Be concise."); - // console.log(res); - expect(res?.usage_metadata).toBeDefined(); - if (!res?.usage_metadata) { - return; - } - expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); - expect(res.usage_metadata.output_tokens).toBeGreaterThan(1); - expect(res.usage_metadata.total_tokens).toBe( - res.usage_metadata.input_tokens + res.usage_metadata.output_tokens - ); -}); -test("Streaming true constructor param will stream", async () => { - const modelWithStreaming = new ChatVertexAI({ - maxOutputTokens: 50, - streaming: true, + test("Invoke token count usage_metadata", async () => { + const model = new ChatVertexAI({ + temperature: 0, + maxOutputTokens: 10, + }); + const res = await model.invoke("Why is the sky blue? Be concise."); + // console.log(res); + expect(res?.usage_metadata).toBeDefined(); + if (!res?.usage_metadata) { + return; + } + expect(res.usage_metadata.input_tokens).toBeGreaterThan(1); + expect(res.usage_metadata.output_tokens).toBeGreaterThan(1); + expect(res.usage_metadata.total_tokens).toBe( + res.usage_metadata.input_tokens + res.usage_metadata.output_tokens + ); }); - let totalTokenCount = 0; - let tokensString = ""; - const result = await modelWithStreaming.invoke("What is 1 + 1?", { - callbacks: [ - { - handleLLMNewToken: (tok) => { - totalTokenCount += 1; - tokensString += tok; + test("Streaming true constructor param will stream", async () => { + const modelWithStreaming = new ChatVertexAI({ + maxOutputTokens: 50, + streaming: true, + }); + + let totalTokenCount = 0; + let tokensString = ""; + const result = await modelWithStreaming.invoke("What is 1 + 1?", { + callbacks: [ + { + handleLLMNewToken: (tok) => { + totalTokenCount += 1; + tokensString += tok; + }, }, - }, - ], + ], + }); + + expect(result).toBeDefined(); + expect(result.content).toBe(tokensString); + + expect(totalTokenCount).toBeGreaterThan(1); }); - expect(result).toBeDefined(); - expect(result.content).toBe(tokensString); + test("Can force a model to invoke a tool", async () => { + const model = new ChatVertexAI({ + model: "gemini-1.5-pro", + }); + const modelWithTools = model.bind({ + tools: [calculatorTool, weatherTool], + tool_choice: "calculator", + }); - expect(totalTokenCount).toBeGreaterThan(1); -}); + const result = await modelWithTools.invoke( + "Whats the weather like in paris today? What's 1836 plus 7262?" + ); -test("Can force a model to invoke a tool", async () => { - const model = new ChatVertexAI({ - model: "gemini-1.5-pro", - }); - const weatherTool = tool((_) => "no-op", { - name: "get_weather", - description: - "Get the weather of a specific location and return the temperature in Celsius.", - schema: z.object({ - location: z.string().describe("The name of city to get the weather for."), - }), - }); - const calculatorTool = tool((_) => "no-op", { - name: "calculator", - description: "Calculate the result of a math expression.", - schema: z.object({ - expression: z.string().describe("The math expression to calculate."), - }), - }); - const modelWithTools = model.bind({ - tools: [calculatorTool, weatherTool], - tool_choice: "calculator", + expect(result.tool_calls).toHaveLength(1); + expect(result.tool_calls?.[0]).toBeDefined(); + if (!result.tool_calls?.[0]) return; + expect(result.tool_calls?.[0].name).toBe("calculator"); + expect(result.tool_calls?.[0].args).toHaveProperty("expression"); }); - const result = await modelWithTools.invoke( - "Whats the weather like in paris today? What's 1836 plus 7262?" - ); + test("ChatGoogleGenerativeAI can stream tools", async () => { + const model = new ChatVertexAI({}); - expect(result.tool_calls).toHaveLength(1); - expect(result.tool_calls?.[0]).toBeDefined(); - if (!result.tool_calls?.[0]) return; - expect(result.tool_calls?.[0].name).toBe("calculator"); - expect(result.tool_calls?.[0].args).toHaveProperty("expression"); -}); + const weatherTool = tool( + (_) => "The weather in San Francisco today is 18 degrees and sunny.", + { + name: "current_weather_tool", + description: "Get the current weather for a given location.", + schema: z.object({ + location: z.string().describe("The location to get the weather for."), + }), + } + ); + + const modelWithTools = model.bindTools([weatherTool]); + const stream = await modelWithTools.stream( + "Whats the weather like today in San Francisco?" + ); + let finalChunk: AIMessageChunk | undefined; + for await (const chunk of stream) { + finalChunk = !finalChunk ? chunk : concat(finalChunk, chunk); + } -test("ChatGoogleGenerativeAI can stream tools", async () => { - const model = new ChatVertexAI({}); + expect(finalChunk).toBeDefined(); + if (!finalChunk) return; - const weatherTool = tool( - (_) => "The weather in San Francisco today is 18 degrees and sunny.", - { - name: "current_weather_tool", - description: "Get the current weather for a given location.", - schema: z.object({ - location: z.string().describe("The location to get the weather for."), - }), + const toolCalls = finalChunk.tool_calls; + expect(toolCalls).toBeDefined(); + if (!toolCalls) { + throw new Error("tool_calls not in response"); } - ); - - const modelWithTools = model.bindTools([weatherTool]); - const stream = await modelWithTools.stream( - "Whats the weather like today in San Francisco?" - ); - let finalChunk: AIMessageChunk | undefined; - for await (const chunk of stream) { - finalChunk = !finalChunk ? chunk : concat(finalChunk, chunk); + expect(toolCalls.length).toBe(1); + expect(toolCalls[0].name).toBe("current_weather_tool"); + expect(toolCalls[0].args).toHaveProperty("location"); + }); + + async function fileToBase64(filePath: string): Promise { + const fileData = await fs.readFile(filePath); + const base64String = Buffer.from(fileData).toString("base64"); + return base64String; } - expect(finalChunk).toBeDefined(); - if (!finalChunk) return; + test("Gemini can understand audio", async () => { + // Update this with the correct path to an audio file on your machine. + const audioPath = + "../langchain-google-genai/src/tests/data/gettysburg10.wav"; + const audioMimeType = "audio/wav"; - const toolCalls = finalChunk.tool_calls; - expect(toolCalls).toBeDefined(); - if (!toolCalls) { - throw new Error("tool_calls not in response"); - } - expect(toolCalls.length).toBe(1); - expect(toolCalls[0].name).toBe("current_weather_tool"); - expect(toolCalls[0].args).toHaveProperty("location"); + const model = new ChatVertexAI({ + model: "gemini-1.5-flash", + temperature: 0, + maxRetries: 0, + }); + + const audioBase64 = await fileToBase64(audioPath); + + const prompt = ChatPromptTemplate.fromMessages([ + new MessagesPlaceholder("audio"), + ]); + + const chain = prompt.pipe(model); + const response = await chain.invoke({ + audio: new HumanMessage({ + content: [ + { + type: "media", + mimeType: audioMimeType, + data: audioBase64, + }, + { + type: "text", + text: "Summarize the content in this audio. ALso, what is the speaker's tone?", + }, + ], + }), + }); + + expect(typeof response.content).toBe("string"); + expect((response.content as string).length).toBeGreaterThan(15); + }); }); -async function fileToBase64(filePath: string): Promise { - const fileData = await fs.readFile(filePath); - const base64String = Buffer.from(fileData).toString("base64"); - return base64String; -} - -test("Gemini can understand audio", async () => { - // Update this with the correct path to an audio file on your machine. - const audioPath = "../langchain-google-genai/src/tests/data/gettysburg10.wav"; - const audioMimeType = "audio/wav"; - - const model = new ChatVertexAI({ - model: "gemini-1.5-flash", - temperature: 0, - maxRetries: 0, +describe("GAuth Anthropic Chat", () => { + let recorder: GoogleRequestRecorder; + let callbacks: BaseCallbackHandler[]; + + // const modelName: string = "claude-3-5-sonnet@20240620"; + // const modelName: string = "claude-3-sonnet@20240229"; + const modelName: string = "claude-3-5-sonnet-v2@20241022"; + + beforeEach(() => { + recorder = new GoogleRequestRecorder(); + callbacks = [recorder, new GoogleRequestLogger()]; }); - const audioBase64 = await fileToBase64(audioPath); + test("invoke", async () => { + const model = new ChatVertexAI({ + modelName, + callbacks, + }); + const res = await model.invoke("What is 1 + 1?"); + expect(res).toBeDefined(); + expect(res._getType()).toEqual("ai"); - const prompt = ChatPromptTemplate.fromMessages([ - new MessagesPlaceholder("audio"), - ]); + const aiMessage = res as AIMessageChunk; + expect(aiMessage.content).toBeDefined(); - const chain = prompt.pipe(model); - const response = await chain.invoke({ - audio: new HumanMessage({ - content: [ - { - type: "media", - mimeType: audioMimeType, - data: audioBase64, - }, - { - type: "text", - text: "Summarize the content in this audio. ALso, what is the speaker's tone?", - }, - ], - }), + expect(typeof aiMessage.content).toBe("string"); + const text = aiMessage.content as string; + expect(text).toMatch(/(1 + 1 (equals|is|=) )?2.? ?/); + + const connection = recorder?.request?.connection; + expect(connection?.url).toEqual( + `https://us-east5-aiplatform.googleapis.com/v1/projects/test-vertex-ai-382612/locations/us-east5/publishers/anthropic/models/${modelName}:rawPredict` + ); + + console.log(JSON.stringify(aiMessage, null, 1)); + console.log(aiMessage.lc_kwargs); }); - expect(typeof response.content).toBe("string"); - expect((response.content as string).length).toBeGreaterThan(15); + test("stream", async () => { + const model = new ChatVertexAI({ + modelName, + callbacks, + }); + const stream = await model.stream("How are you today? Be verbose."); + const chunks = []; + for await (const chunk of stream) { + console.log(chunk); + chunks.push(chunk); + } + expect(chunks.length).toBeGreaterThan(1); + }); + + test("tool invocation", async () => { + const model = new ChatVertexAI({ + modelName, + callbacks, + }); + const modelWithTools = model.bind({ + tools: [weatherTool], + }); + + const result = await modelWithTools.invoke( + "Whats the weather like in paris today?" + ); + + const request = recorder?.request ?? {}; + const data = request?.data; + expect(data).toHaveProperty("tools"); + expect(data.tools).toHaveLength(1); + + expect(result.tool_calls).toHaveLength(1); + expect(result.tool_calls?.[0]).toBeDefined(); + expect(result.tool_calls?.[0].name).toBe("get_weather"); + expect(result.tool_calls?.[0].args).toHaveProperty("location"); + }); + + test("stream tools", async () => { + const model = new ChatVertexAI({ + modelName, + callbacks, + }); + + const weatherTool = tool( + (_) => "The weather in San Francisco today is 18 degrees and sunny.", + { + name: "current_weather_tool", + description: "Get the current weather for a given location.", + schema: z.object({ + location: z.string().describe("The location to get the weather for."), + }), + } + ); + + const modelWithTools = model.bindTools([weatherTool]); + const stream = await modelWithTools.stream( + "Whats the weather like today in San Francisco?" + ); + let finalChunk: AIMessageChunk | undefined; + for await (const chunk of stream) { + finalChunk = !finalChunk ? chunk : concat(finalChunk, chunk); + } + + expect(finalChunk).toBeDefined(); + const toolCalls = finalChunk?.tool_calls; + expect(toolCalls).toBeDefined(); + expect(toolCalls?.length).toBe(1); + expect(toolCalls?.[0].name).toBe("current_weather_tool"); + expect(toolCalls?.[0].args).toHaveProperty("location"); + }); }); diff --git a/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts b/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts index 106b685bfee8..0e10359599b3 100644 --- a/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts +++ b/libs/langchain-google-webauth/src/tests/chat_models.int.test.ts @@ -204,7 +204,9 @@ describe("Google APIKey Chat", () => { const model = new ChatGoogle({ modelName: "gemini-1.5-flash", apiVersion: "v1beta", - mediaManager, + apiConfig: { + mediaManager, + }, }); const message: MessageContentComplex[] = [ @@ -214,7 +216,7 @@ describe("Google APIKey Chat", () => { }, { type: "media", - fileUri: "https://js.langchain.com/img/brand/wordmark.png", + fileUri: "https://js.langchain.com/v0.2/img/brand/wordmark.png", }, ]; From 2a7a2b832f704f85b3f786a4bd3044a1e5dafc08 Mon Sep 17 00:00:00 2001 From: Denny Wang <75592161+rd4cake@users.noreply.github.com> Date: Mon, 11 Nov 2024 19:13:40 -0500 Subject: [PATCH 077/100] fix(community) : Upgrade node-llama-cpp to be compatible with version 3 (#7135) Co-authored-by: Jacky Chen --- .../docs/integrations/chat/llama_cpp.mdx | 4 +- .../docs/integrations/llms/llama_cpp.mdx | 4 +- .../integrations/text_embedding/llama_cpp.mdx | 4 +- examples/src/embeddings/llama_cpp_basic.ts | 2 +- examples/src/embeddings/llama_cpp_docs.ts | 2 +- .../src/models/chat/integration_llama_cpp.ts | 2 +- .../chat/integration_llama_cpp_chain.ts | 5 +- .../chat/integration_llama_cpp_stream.ts | 5 +- .../integration_llama_cpp_stream_invoke.ts | 5 +- .../integration_llama_cpp_stream_multi.ts | 5 +- .../chat/integration_llama_cpp_system.ts | 2 +- examples/src/models/llm/llama_cpp.ts | 2 +- examples/src/models/llm/llama_cpp_stream.ts | 5 +- libs/langchain-community/package.json | 2 +- .../src/chat_models/llama_cpp.ts | 93 +- .../tests/chatllama_cpp.int.test.ts | 23 +- .../src/embeddings/llama_cpp.ts | 30 +- .../embeddings/tests/llama_cpp.int.test.ts | 10 +- .../langchain-community/src/llms/llama_cpp.ts | 50 +- .../src/llms/tests/llama_cpp.int.test.ts | 21 +- .../src/utils/llama_cpp.ts | 56 +- yarn.lock | 1041 +++++++++++------ 22 files changed, 908 insertions(+), 465 deletions(-) diff --git a/docs/core_docs/docs/integrations/chat/llama_cpp.mdx b/docs/core_docs/docs/integrations/chat/llama_cpp.mdx index c0190f3041f6..dbff7089dece 100644 --- a/docs/core_docs/docs/integrations/chat/llama_cpp.mdx +++ b/docs/core_docs/docs/integrations/chat/llama_cpp.mdx @@ -12,14 +12,14 @@ This module is based on the [node-llama-cpp](https://github.com/withcatai/node-l ## Setup -You'll need to install major version `2` of the [node-llama-cpp](https://github.com/withcatai/node-llama-cpp) module to communicate with your local model. +You'll need to install major version `3` of the [node-llama-cpp](https://github.com/withcatai/node-llama-cpp) module to communicate with your local model. import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; ```bash npm2yarn -npm install -S node-llama-cpp@2 @langchain/community @langchain/core +npm install -S node-llama-cpp@3 @langchain/community @langchain/core ``` You will also need a local Llama 2 model (or a model supported by [node-llama-cpp](https://github.com/withcatai/node-llama-cpp)). You will need to pass the path to this model to the LlamaCpp module as a part of the parameters (see example). diff --git a/docs/core_docs/docs/integrations/llms/llama_cpp.mdx b/docs/core_docs/docs/integrations/llms/llama_cpp.mdx index 576ea560440b..508229ac13b6 100644 --- a/docs/core_docs/docs/integrations/llms/llama_cpp.mdx +++ b/docs/core_docs/docs/integrations/llms/llama_cpp.mdx @@ -12,10 +12,10 @@ This module is based on the [node-llama-cpp](https://github.com/withcatai/node-l ## Setup -You'll need to install major version `2` of the [node-llama-cpp](https://github.com/withcatai/node-llama-cpp) module to communicate with your local model. +You'll need to install major version `3` of the [node-llama-cpp](https://github.com/withcatai/node-llama-cpp) module to communicate with your local model. ```bash npm2yarn -npm install -S node-llama-cpp@2 +npm install -S node-llama-cpp@3 ``` import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; diff --git a/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx b/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx index b4cb6868dd83..35ec34988a9c 100644 --- a/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx +++ b/docs/core_docs/docs/integrations/text_embedding/llama_cpp.mdx @@ -12,10 +12,10 @@ This module is based on the [node-llama-cpp](https://github.com/withcatai/node-l ## Setup -You'll need to install major version `2` of the [node-llama-cpp](https://github.com/withcatai/node-llama-cpp) module to communicate with your local model. +You'll need to install major version `3` of the [node-llama-cpp](https://github.com/withcatai/node-llama-cpp) module to communicate with your local model. ```bash npm2yarn -npm install -S node-llama-cpp@2 +npm install -S node-llama-cpp@3 ``` import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; diff --git a/examples/src/embeddings/llama_cpp_basic.ts b/examples/src/embeddings/llama_cpp_basic.ts index 1f956f785eff..cf89ffd4262f 100644 --- a/examples/src/embeddings/llama_cpp_basic.ts +++ b/examples/src/embeddings/llama_cpp_basic.ts @@ -2,7 +2,7 @@ import { LlamaCppEmbeddings } from "@langchain/community/embeddings/llama_cpp"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; -const embeddings = new LlamaCppEmbeddings({ +const embeddings = await LlamaCppEmbeddings.initialize({ modelPath: llamaPath, }); diff --git a/examples/src/embeddings/llama_cpp_docs.ts b/examples/src/embeddings/llama_cpp_docs.ts index 8a2b5f773745..19e9ee404abe 100644 --- a/examples/src/embeddings/llama_cpp_docs.ts +++ b/examples/src/embeddings/llama_cpp_docs.ts @@ -4,7 +4,7 @@ const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; const documents = ["Hello World!", "Bye Bye!"]; -const embeddings = new LlamaCppEmbeddings({ +const embeddings = await LlamaCppEmbeddings.initialize({ modelPath: llamaPath, }); diff --git a/examples/src/models/chat/integration_llama_cpp.ts b/examples/src/models/chat/integration_llama_cpp.ts index edad8fe7cfa0..bdd2f7818c3c 100644 --- a/examples/src/models/chat/integration_llama_cpp.ts +++ b/examples/src/models/chat/integration_llama_cpp.ts @@ -3,7 +3,7 @@ import { HumanMessage } from "@langchain/core/messages"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; -const model = new ChatLlamaCpp({ modelPath: llamaPath }); +const model = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); const response = await model.invoke([ new HumanMessage({ content: "My name is John." }), diff --git a/examples/src/models/chat/integration_llama_cpp_chain.ts b/examples/src/models/chat/integration_llama_cpp_chain.ts index fa4778e2d6c6..3499929b7ef7 100644 --- a/examples/src/models/chat/integration_llama_cpp_chain.ts +++ b/examples/src/models/chat/integration_llama_cpp_chain.ts @@ -4,7 +4,10 @@ import { PromptTemplate } from "@langchain/core/prompts"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; -const model = new ChatLlamaCpp({ modelPath: llamaPath, temperature: 0.5 }); +const model = await ChatLlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.5, +}); const prompt = PromptTemplate.fromTemplate( "What is a good name for a company that makes {product}?" diff --git a/examples/src/models/chat/integration_llama_cpp_stream.ts b/examples/src/models/chat/integration_llama_cpp_stream.ts index 2f5072dca0f3..33697fedd876 100644 --- a/examples/src/models/chat/integration_llama_cpp_stream.ts +++ b/examples/src/models/chat/integration_llama_cpp_stream.ts @@ -2,7 +2,10 @@ import { ChatLlamaCpp } from "@langchain/community/chat_models/llama_cpp"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; -const model = new ChatLlamaCpp({ modelPath: llamaPath, temperature: 0.7 }); +const model = await ChatLlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.7, +}); const stream = await model.stream("Tell me a short story about a happy Llama."); diff --git a/examples/src/models/chat/integration_llama_cpp_stream_invoke.ts b/examples/src/models/chat/integration_llama_cpp_stream_invoke.ts index 7685b687955f..f452b9764fd8 100644 --- a/examples/src/models/chat/integration_llama_cpp_stream_invoke.ts +++ b/examples/src/models/chat/integration_llama_cpp_stream_invoke.ts @@ -3,7 +3,10 @@ import { SystemMessage, HumanMessage } from "@langchain/core/messages"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; -const model = new ChatLlamaCpp({ modelPath: llamaPath, temperature: 0.7 }); +const model = await ChatLlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.7, +}); const controller = new AbortController(); diff --git a/examples/src/models/chat/integration_llama_cpp_stream_multi.ts b/examples/src/models/chat/integration_llama_cpp_stream_multi.ts index de4dcafe9224..9d2d337d0284 100644 --- a/examples/src/models/chat/integration_llama_cpp_stream_multi.ts +++ b/examples/src/models/chat/integration_llama_cpp_stream_multi.ts @@ -3,7 +3,10 @@ import { SystemMessage, HumanMessage } from "@langchain/core/messages"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; -const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath, temperature: 0.7 }); +const llamaCpp = await ChatLlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.7, +}); const stream = await llamaCpp.stream([ new SystemMessage( diff --git a/examples/src/models/chat/integration_llama_cpp_system.ts b/examples/src/models/chat/integration_llama_cpp_system.ts index 4c371491eeaf..ec53a8aac4b7 100644 --- a/examples/src/models/chat/integration_llama_cpp_system.ts +++ b/examples/src/models/chat/integration_llama_cpp_system.ts @@ -3,7 +3,7 @@ import { SystemMessage, HumanMessage } from "@langchain/core/messages"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; -const model = new ChatLlamaCpp({ modelPath: llamaPath }); +const model = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); const response = await model.invoke([ new SystemMessage( diff --git a/examples/src/models/llm/llama_cpp.ts b/examples/src/models/llm/llama_cpp.ts index 3411837ee9fd..da7d8b487930 100644 --- a/examples/src/models/llm/llama_cpp.ts +++ b/examples/src/models/llm/llama_cpp.ts @@ -3,7 +3,7 @@ import { LlamaCpp } from "@langchain/community/llms/llama_cpp"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; const question = "Where do Llamas come from?"; -const model = new LlamaCpp({ modelPath: llamaPath }); +const model = await LlamaCpp.initialize({ modelPath: llamaPath }); console.log(`You: ${question}`); const response = await model.invoke(question); diff --git a/examples/src/models/llm/llama_cpp_stream.ts b/examples/src/models/llm/llama_cpp_stream.ts index b95fddebc33a..022da280ff5d 100644 --- a/examples/src/models/llm/llama_cpp_stream.ts +++ b/examples/src/models/llm/llama_cpp_stream.ts @@ -2,7 +2,10 @@ import { LlamaCpp } from "@langchain/community/llms/llama_cpp"; const llamaPath = "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin"; -const model = new LlamaCpp({ modelPath: llamaPath, temperature: 0.7 }); +const model = await LlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.7, +}); const prompt = "Tell me a short story about a happy Llama."; diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index a1f60050f981..23517ee984d2 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -187,7 +187,7 @@ "mongodb": "^5.2.0", "mysql2": "^3.9.8", "neo4j-driver": "^5.17.0", - "node-llama-cpp": "^2", + "node-llama-cpp": "3.1.1", "notion-to-md": "^3.1.0", "officeparser": "^4.0.4", "pdf-parse": "1.1.1", diff --git a/libs/langchain-community/src/chat_models/llama_cpp.ts b/libs/langchain-community/src/chat_models/llama_cpp.ts index 94685dc087df..960228c1bb29 100644 --- a/libs/langchain-community/src/chat_models/llama_cpp.ts +++ b/libs/langchain-community/src/chat_models/llama_cpp.ts @@ -3,7 +3,11 @@ import { LlamaModel, LlamaContext, LlamaChatSession, - type ConversationInteraction, + type Token, + ChatUserMessage, + ChatModelResponse, + ChatHistoryItem, + getLlama, } from "node-llama-cpp"; import { @@ -47,7 +51,7 @@ export interface LlamaCppCallOptions extends BaseLanguageModelCallOptions { * @example * ```typescript * // Initialize the ChatLlamaCpp model with the path to the model binary file. - * const model = new ChatLlamaCpp({ + * const model = await ChatLlamaCpp.initialize({ * modelPath: "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin", * temperature: 0.5, * }); @@ -87,20 +91,35 @@ export class ChatLlamaCpp extends SimpleChatModel { return "ChatLlamaCpp"; } - constructor(inputs: LlamaCppInputs) { + public constructor(inputs: LlamaCppInputs) { super(inputs); this.maxTokens = inputs?.maxTokens; this.temperature = inputs?.temperature; this.topK = inputs?.topK; this.topP = inputs?.topP; this.trimWhitespaceSuffix = inputs?.trimWhitespaceSuffix; - this._model = createLlamaModel(inputs); - this._context = createLlamaContext(this._model, inputs); this._session = null; } + /** + * Initializes the llama_cpp model for usage in the chat models wrapper. + * @param inputs - the inputs passed onto the model. + * @returns A Promise that resolves to the ChatLlamaCpp type class. + */ + public static async initialize( + inputs: LlamaBaseCppInputs + ): Promise { + const instance = new ChatLlamaCpp(inputs); + const llama = await getLlama(); + + instance._model = await createLlamaModel(inputs, llama); + instance._context = await createLlamaContext(instance._model, inputs); + + return instance; + } + _llmType() { - return "llama2_cpp"; + return "llama_cpp"; } /** @ignore */ @@ -146,7 +165,9 @@ export class ChatLlamaCpp extends SimpleChatModel { signal: options.signal, onToken: async (tokens: number[]) => { options.onToken?.(tokens); - await runManager?.handleLLMNewToken(this._context.decode(tokens)); + await runManager?.handleLLMNewToken( + this._model.detokenize(tokens.map((num) => num as Token)) + ); }, maxTokens: this?.maxTokens, temperature: this?.temperature, @@ -180,20 +201,23 @@ export class ChatLlamaCpp extends SimpleChatModel { }; const prompt = this._buildPrompt(input); + const sequence = this._context.getSequence(); const stream = await this.caller.call(async () => - this._context.evaluate(this._context.encode(prompt), promptOptions) + sequence.evaluate(this._model.tokenize(prompt), promptOptions) ); for await (const chunk of stream) { yield new ChatGenerationChunk({ - text: this._context.decode([chunk]), + text: this._model.detokenize([chunk]), message: new AIMessageChunk({ - content: this._context.decode([chunk]), + content: this._model.detokenize([chunk]), }), generationInfo: {}, }); - await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? ""); + await runManager?.handleLLMNewToken( + this._model.detokenize([chunk]) ?? "" + ); } } @@ -202,12 +226,12 @@ export class ChatLlamaCpp extends SimpleChatModel { let prompt = ""; let sysMessage = ""; let noSystemMessages: BaseMessage[] = []; - let interactions: ConversationInteraction[] = []; + let interactions: ChatHistoryItem[] = []; // Let's see if we have a system message - if (messages.findIndex((msg) => msg._getType() === "system") !== -1) { + if (messages.findIndex((msg) => msg.getType() === "system") !== -1) { const sysMessages = messages.filter( - (message) => message._getType() === "system" + (message) => message.getType() === "system" ); const systemMessageContent = sysMessages[sysMessages.length - 1].content; @@ -222,7 +246,7 @@ export class ChatLlamaCpp extends SimpleChatModel { // Now filter out the system messages noSystemMessages = messages.filter( - (message) => message._getType() !== "system" + (message) => message.getType() !== "system" ); } else { noSystemMessages = messages; @@ -231,9 +255,7 @@ export class ChatLlamaCpp extends SimpleChatModel { // Lets see if we just have a prompt left or are their previous interactions? if (noSystemMessages.length > 1) { // Is the last message a prompt? - if ( - noSystemMessages[noSystemMessages.length - 1]._getType() === "human" - ) { + if (noSystemMessages[noSystemMessages.length - 1].getType() === "human") { const finalMessageContent = noSystemMessages[noSystemMessages.length - 1].content; if (typeof finalMessageContent !== "string") { @@ -261,23 +283,23 @@ export class ChatLlamaCpp extends SimpleChatModel { // Now lets construct a session according to what we got if (sysMessage !== "" && interactions.length > 0) { this._session = new LlamaChatSession({ - context: this._context, - conversationHistory: interactions, + contextSequence: this._context.getSequence(), systemPrompt: sysMessage, }); + this._session.setChatHistory(interactions); } else if (sysMessage !== "" && interactions.length === 0) { this._session = new LlamaChatSession({ - context: this._context, + contextSequence: this._context.getSequence(), systemPrompt: sysMessage, }); } else if (sysMessage === "" && interactions.length > 0) { this._session = new LlamaChatSession({ - context: this._context, - conversationHistory: interactions, + contextSequence: this._context.getSequence(), }); + this._session.setChatHistory(interactions); } else { this._session = new LlamaChatSession({ - context: this._context, + contextSequence: this._context.getSequence(), }); } @@ -287,8 +309,8 @@ export class ChatLlamaCpp extends SimpleChatModel { // This builds a an array of interactions protected _convertMessagesToInteractions( messages: BaseMessage[] - ): ConversationInteraction[] { - const result: ConversationInteraction[] = []; + ): ChatHistoryItem[] { + const result: ChatHistoryItem[] = []; for (let i = 0; i < messages.length; i += 2) { if (i + 1 < messages.length) { @@ -299,10 +321,13 @@ export class ChatLlamaCpp extends SimpleChatModel { "ChatLlamaCpp does not support non-string message content." ); } - result.push({ - prompt, - response, - }); + const llamaPrompt: ChatUserMessage = { type: "user", text: prompt }; + const llamaResponse: ChatModelResponse = { + type: "model", + response: [response], + }; + result.push(llamaPrompt); + result.push(llamaResponse); } } @@ -313,11 +338,11 @@ export class ChatLlamaCpp extends SimpleChatModel { const prompt = input .map((message) => { let messageText; - if (message._getType() === "human") { + if (message.getType() === "human") { messageText = `[INST] ${message.content} [/INST]`; - } else if (message._getType() === "ai") { + } else if (message.getType() === "ai") { messageText = message.content; - } else if (message._getType() === "system") { + } else if (message.getType() === "system") { messageText = `<> ${message.content} <>`; } else if (ChatMessage.isInstance(message)) { messageText = `\n\n${message.role[0].toUpperCase()}${message.role.slice( @@ -325,7 +350,7 @@ export class ChatLlamaCpp extends SimpleChatModel { )}: ${message.content}`; } else { console.warn( - `Unsupported message type passed to llama_cpp: "${message._getType()}"` + `Unsupported message type passed to llama_cpp: "${message.getType()}"` ); messageText = ""; } diff --git a/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts index 488e15c0874e..54b43217ab41 100644 --- a/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatllama_cpp.int.test.ts @@ -12,7 +12,7 @@ import { ChatLlamaCpp } from "../llama_cpp.js"; const llamaPath = getEnvironmentVariable("LLAMA_PATH")!; test.skip("Test predict", async () => { - const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath }); + const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var @@ -21,7 +21,7 @@ test.skip("Test predict", async () => { }); test.skip("Test call", async () => { - const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath }); + const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var @@ -32,7 +32,7 @@ test.skip("Test call", async () => { }); test.skip("Test multiple messages", async () => { - const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath }); + const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var @@ -47,7 +47,7 @@ test.skip("Test multiple messages", async () => { }); test.skip("Test system message", async () => { - const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath }); + const llamaCpp = await ChatLlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var @@ -61,7 +61,10 @@ test.skip("Test system message", async () => { }); test.skip("test streaming call", async () => { - const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath, temperature: 0.7 }); + const llamaCpp = await ChatLlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.7, + }); const stream = await llamaCpp.stream( "Tell me a short story about a happy Llama." @@ -77,7 +80,10 @@ test.skip("test streaming call", async () => { }); test.skip("test multi-mesage streaming call", async () => { - const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath, temperature: 0.7 }); + const llamaCpp = await ChatLlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.7, + }); const stream = await llamaCpp.stream([ new SystemMessage( @@ -96,7 +102,10 @@ test.skip("test multi-mesage streaming call", async () => { }); test.skip("test multi-mesage streaming call and abort after 5s", async () => { - const llamaCpp = new ChatLlamaCpp({ modelPath: llamaPath, temperature: 0.7 }); + const llamaCpp = await ChatLlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.7, + }); const controller = new AbortController(); setTimeout(() => { controller.abort(); diff --git a/libs/langchain-community/src/embeddings/llama_cpp.ts b/libs/langchain-community/src/embeddings/llama_cpp.ts index ab0a60cff60b..5a2103b33902 100644 --- a/libs/langchain-community/src/embeddings/llama_cpp.ts +++ b/libs/langchain-community/src/embeddings/llama_cpp.ts @@ -1,5 +1,5 @@ /* eslint-disable import/no-extraneous-dependencies */ -import { LlamaModel, LlamaContext } from "node-llama-cpp"; +import { LlamaModel, LlamaContext, getLlama } from "node-llama-cpp"; import { Embeddings, type EmbeddingsParams } from "@langchain/core/embeddings"; import { LlamaBaseCppInputs, @@ -19,8 +19,8 @@ export interface LlamaCppEmbeddingsParams * @example * ```typescript * // Initialize LlamaCppEmbeddings with the path to the model file - * const embeddings = new LlamaCppEmbeddings({ - * modelPath: "/Replace/with/path/to/your/model/gguf-llama2-q4_0.bin", + * const embeddings = await LlamaCppEmbeddings.initialize({ + * modelPath: llamaPath, * }); * * // Embed a query string using the Llama embeddings @@ -36,13 +36,27 @@ export class LlamaCppEmbeddings extends Embeddings { _context: LlamaContext; - constructor(inputs: LlamaCppEmbeddingsParams) { + public constructor(inputs: LlamaCppEmbeddingsParams) { super(inputs); const _inputs = inputs; _inputs.embedding = true; + } + + /** + * Initializes the llama_cpp model for usage in the embeddings wrapper. + * @param inputs - the inputs passed onto the model. + * @returns A Promise that resolves to the LlamaCppEmbeddings type class. + */ + public static async initialize( + inputs: LlamaBaseCppInputs + ): Promise { + const instance = new LlamaCppEmbeddings(inputs); + const llama = await getLlama(); + + instance._model = await createLlamaModel(inputs, llama); + instance._context = await createLlamaContext(instance._model, inputs); - this._model = createLlamaModel(_inputs); - this._context = createLlamaContext(this._model, _inputs); + return instance; } /** @@ -57,7 +71,7 @@ export class LlamaCppEmbeddings extends Embeddings { const encodings = await this.caller.call( () => new Promise((resolve) => { - resolve(this._context.encode(text)); + resolve(this._model.tokenize(text)); }) ); tokensArray.push(encodings); @@ -90,7 +104,7 @@ export class LlamaCppEmbeddings extends Embeddings { const encodings = await this.caller.call( () => new Promise((resolve) => { - resolve(this._context.encode(text)); + resolve(this._model.tokenize(text)); }) ); diff --git a/libs/langchain-community/src/embeddings/tests/llama_cpp.int.test.ts b/libs/langchain-community/src/embeddings/tests/llama_cpp.int.test.ts index b1819f943a21..60a805eab8fa 100644 --- a/libs/langchain-community/src/embeddings/tests/llama_cpp.int.test.ts +++ b/libs/langchain-community/src/embeddings/tests/llama_cpp.int.test.ts @@ -7,13 +7,17 @@ import { LlamaCppEmbeddings } from "../llama_cpp.js"; const llamaPath = getEnvironmentVariable("LLAMA_PATH")!; test.skip("Test LlamaCppEmbeddings.embedQuery", async () => { - const embeddings = new LlamaCppEmbeddings({ modelPath: llamaPath }); + const embeddings = await LlamaCppEmbeddings.initialize({ + modelPath: llamaPath, + }); const res = await embeddings.embedQuery("Hello Llama"); expect(typeof res[0]).toBe("number"); }); test.skip("Test LlamaCppEmbeddings.embedDocuments", async () => { - const embeddings = new LlamaCppEmbeddings({ modelPath: llamaPath }); + const embeddings = await LlamaCppEmbeddings.initialize({ + modelPath: llamaPath, + }); const res = await embeddings.embedDocuments(["Hello Llama", "Bye bye"]); expect(res).toHaveLength(2); expect(typeof res[0][0]).toBe("number"); @@ -21,7 +25,7 @@ test.skip("Test LlamaCppEmbeddings.embedDocuments", async () => { }); test.skip("Test LlamaCppEmbeddings concurrency", async () => { - const embeddings = new LlamaCppEmbeddings({ + const embeddings = await LlamaCppEmbeddings.initialize({ modelPath: llamaPath, batchSize: 1, }); diff --git a/libs/langchain-community/src/llms/llama_cpp.ts b/libs/langchain-community/src/llms/llama_cpp.ts index e39831299829..24fcc529a864 100644 --- a/libs/langchain-community/src/llms/llama_cpp.ts +++ b/libs/langchain-community/src/llms/llama_cpp.ts @@ -5,6 +5,7 @@ import { LlamaChatSession, LlamaJsonSchemaGrammar, LlamaGrammar, + getLlama, GbnfJsonSchema, } from "node-llama-cpp"; import { @@ -72,22 +73,38 @@ export class LlamaCpp extends LLM { return "LlamaCpp"; } - constructor(inputs: LlamaCppInputs) { + public constructor(inputs: LlamaCppInputs) { super(inputs); this.maxTokens = inputs?.maxTokens; this.temperature = inputs?.temperature; this.topK = inputs?.topK; this.topP = inputs?.topP; this.trimWhitespaceSuffix = inputs?.trimWhitespaceSuffix; - this._model = createLlamaModel(inputs); - this._context = createLlamaContext(this._model, inputs); - this._session = createLlamaSession(this._context); - this._jsonSchema = createLlamaJsonSchemaGrammar(inputs?.jsonSchema); - this._gbnf = createCustomGrammar(inputs?.gbnf); + } + + /** + * Initializes the llama_cpp model for usage. + * @param inputs - the inputs passed onto the model. + * @returns A Promise that resolves to the LlamaCpp type class. + */ + public static async initialize(inputs: LlamaCppInputs): Promise { + const instance = new LlamaCpp(inputs); + const llama = await getLlama(); + + instance._model = await createLlamaModel(inputs, llama); + instance._context = await createLlamaContext(instance._model, inputs); + instance._jsonSchema = await createLlamaJsonSchemaGrammar( + inputs?.jsonSchema, + llama + ); + instance._gbnf = await createCustomGrammar(inputs?.gbnf, llama); + instance._session = createLlamaSession(instance._context); + + return instance; } _llmType() { - return "llama2_cpp"; + return "llama_cpp"; } /** @ignore */ @@ -116,6 +133,11 @@ export class LlamaCpp extends LLM { }; const completion = await this._session.prompt(prompt, promptOptions); + + if (this._jsonSchema !== undefined && completion !== undefined) { + return this._jsonSchema.parse(completion) as unknown as string; + } + return completion; } catch (e) { throw new Error("Error getting prompt completion."); @@ -134,16 +156,24 @@ export class LlamaCpp extends LLM { topP: this?.topP, }; + if (this._context.sequencesLeft === 0) { + this._context = await createLlamaContext(this._model, LlamaCpp.inputs); + } + const sequence = this._context.getSequence(); + const tokens = this._model.tokenize(prompt); + const stream = await this.caller.call(async () => - this._context.evaluate(this._context.encode(prompt), promptOptions) + sequence.evaluate(tokens, promptOptions) ); for await (const chunk of stream) { yield new GenerationChunk({ - text: this._context.decode([chunk]), + text: this._model.detokenize([chunk]), generationInfo: {}, }); - await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? ""); + await runManager?.handleLLMNewToken( + this._model.detokenize([chunk]) ?? "" + ); } } } diff --git a/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts b/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts index 8f8d34e70a52..9f05bb0de10e 100644 --- a/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts +++ b/libs/langchain-community/src/llms/tests/llama_cpp.int.test.ts @@ -6,7 +6,7 @@ import { LlamaCpp } from "../llama_cpp.js"; const llamaPath = getEnvironmentVariable("LLAMA_PATH")!; test.skip("Test Llama_CPP", async () => { - const model = new LlamaCpp({ modelPath: llamaPath }); + const model = await LlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Where do Llamas live?"); @@ -14,7 +14,7 @@ test.skip("Test Llama_CPP", async () => { }, 100000); test.skip("Test Llama_CPP", async () => { - const model = new LlamaCpp({ modelPath: llamaPath }); + const model = await LlamaCpp.initialize({ modelPath: llamaPath }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Where do Pandas live?"); @@ -22,7 +22,7 @@ test.skip("Test Llama_CPP", async () => { }, 100000); test.skip("Test Llama_CPP", async () => { - const model = new LlamaCpp({ modelPath: llamaPath }); + const model = await LlamaCpp.initialize({ modelPath: llamaPath }); // Attempt to make several queries and make sure that the system prompt // is not returned as part of any follow-on query. @@ -35,7 +35,10 @@ test.skip("Test Llama_CPP", async () => { }, 100000); test.skip("Test Llama_CPP", async () => { - const model = new LlamaCpp({ modelPath: llamaPath, temperature: 0.7 }); + const model = await LlamaCpp.initialize({ + modelPath: llamaPath, + temperature: 0.7, + }); const stream = await model.stream( "Tell me a short story about a happy Llama." @@ -55,7 +58,10 @@ const gbnfListGrammer = 'root ::= item+ # Excludes various line break characters item ::= "- " [^\r\n\x0b\x0c\x85\u2028\u2029]+ "\n"'; test.skip("Test Llama_CPP", async () => { - const model = new LlamaCpp({ modelPath: llamaPath, gbnf: gbnfListGrammer }); + const model = await LlamaCpp.initialize({ + modelPath: llamaPath, + gbnf: gbnfListGrammer, + }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke( @@ -82,7 +88,10 @@ const schemaJSON = { }; test.skip("Test Llama_CPP", async () => { - const model = new LlamaCpp({ modelPath: llamaPath, jsonSchema: schemaJSON }); + const model = await LlamaCpp.initialize({ + modelPath: llamaPath, + jsonSchema: schemaJSON, + }); // @eslint-disable-next-line/@typescript-eslint/ban-ts-comment // @ts-expect-error unused var const res = await model.invoke("Where do llamas live?"); diff --git a/libs/langchain-community/src/utils/llama_cpp.ts b/libs/langchain-community/src/utils/llama_cpp.ts index 3bc37c6861d8..44ff601d137b 100644 --- a/libs/langchain-community/src/utils/llama_cpp.ts +++ b/libs/langchain-community/src/utils/llama_cpp.ts @@ -5,7 +5,10 @@ import { LlamaChatSession, LlamaJsonSchemaGrammar, LlamaGrammar, + type LlamaModelOptions, + LlamaContextOptions, GbnfJsonSchema, + Llama, } from "node-llama-cpp"; /** @@ -55,58 +58,59 @@ export interface LlamaBaseCppInputs { gbnf?: string; } -export function createLlamaModel(inputs: LlamaBaseCppInputs): LlamaModel { - const options = { +export async function createLlamaModel( + inputs: LlamaBaseCppInputs, + llama: Llama +): Promise { + const options: LlamaModelOptions = { gpuLayers: inputs?.gpuLayers, modelPath: inputs.modelPath, useMlock: inputs?.useMlock, useMmap: inputs?.useMmap, vocabOnly: inputs?.vocabOnly, - jsonSchema: inputs?.jsonSchema, - gbnf: inputs?.gbnf, }; - return new LlamaModel(options); + return llama.loadModel(options); } -export function createLlamaContext( +export async function createLlamaContext( model: LlamaModel, inputs: LlamaBaseCppInputs -): LlamaContext { - const options = { +): Promise { + const options: LlamaContextOptions = { batchSize: inputs?.batchSize, contextSize: inputs?.contextSize, - embedding: inputs?.embedding, - f16Kv: inputs?.f16Kv, - logitsAll: inputs?.logitsAll, - model, - prependBos: inputs?.prependBos, - seed: inputs?.seed, threads: inputs?.threads, }; - return new LlamaContext(options); + return model.createContext(options); } export function createLlamaSession(context: LlamaContext): LlamaChatSession { - return new LlamaChatSession({ context }); + return new LlamaChatSession({ contextSequence: context.getSequence() }); } -export function createLlamaJsonSchemaGrammar( - schemaString: object | undefined -): LlamaJsonSchemaGrammar | undefined { +export async function createLlamaJsonSchemaGrammar( + schemaString: object | undefined, + llama: Llama +): Promise | undefined> { if (schemaString === undefined) { return undefined; } const schemaJSON = schemaString as GbnfJsonSchema; - return new LlamaJsonSchemaGrammar(schemaJSON); + return await llama.createGrammarForJsonSchema(schemaJSON); } -export function createCustomGrammar( - filePath: string | undefined -): LlamaGrammar | undefined { - return filePath === undefined - ? undefined - : new LlamaGrammar({ grammar: filePath }); +export async function createCustomGrammar( + filePath: string | undefined, + llama: Llama +): Promise { + if (filePath === undefined) { + return undefined; + } + + return llama.createGrammar({ + grammar: filePath, + }); } diff --git a/yarn.lock b/yarn.lock index 543c3e5940ee..04aa2e456470 100644 --- a/yarn.lock +++ b/yarn.lock @@ -10441,6 +10441,13 @@ __metadata: languageName: node linkType: hard +"@huggingface/jinja@npm:^0.3.1": + version: 0.3.1 + resolution: "@huggingface/jinja@npm:0.3.1" + checksum: cd5dcc81b3690f9e4de7a6e4a236c4112b3a0e9e86e59d9b1fe49f634c459854f23492f7b66537978fd62fc125f5f3f8c8e56e299e61c633f5b3b429bb45494d + languageName: node + linkType: hard + "@humanwhocodes/config-array@npm:^0.11.11": version: 0.11.11 resolution: "@humanwhocodes/config-array@npm:0.11.11" @@ -11598,7 +11605,7 @@ __metadata: mongodb: ^5.2.0 mysql2: ^3.9.8 neo4j-driver: ^5.17.0 - node-llama-cpp: ^2 + node-llama-cpp: 3.1.1 notion-to-md: ^3.1.0 officeparser: ^4.0.4 pdf-parse: 1.1.1 @@ -13340,6 +13347,83 @@ __metadata: languageName: node linkType: hard +"@node-llama-cpp/linux-arm64@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/linux-arm64@npm:3.1.1" + conditions: os=linux & (cpu=arm64 | cpu=x64) & libc=glibc + languageName: node + linkType: hard + +"@node-llama-cpp/linux-armv7l@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/linux-armv7l@npm:3.1.1" + conditions: os=linux & (cpu=arm | cpu=x64) & libc=glibc + languageName: node + linkType: hard + +"@node-llama-cpp/linux-x64-cuda@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/linux-x64-cuda@npm:3.1.1" + conditions: os=linux & cpu=x64 & libc=glibc + languageName: node + linkType: hard + +"@node-llama-cpp/linux-x64-vulkan@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/linux-x64-vulkan@npm:3.1.1" + conditions: os=linux & cpu=x64 & libc=glibc + languageName: node + linkType: hard + +"@node-llama-cpp/linux-x64@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/linux-x64@npm:3.1.1" + conditions: os=linux & cpu=x64 & libc=glibc + languageName: node + linkType: hard + +"@node-llama-cpp/mac-arm64-metal@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/mac-arm64-metal@npm:3.1.1" + conditions: os=darwin & (cpu=arm64 | cpu=x64) + languageName: node + linkType: hard + +"@node-llama-cpp/mac-x64@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/mac-x64@npm:3.1.1" + conditions: os=darwin & cpu=x64 + languageName: node + linkType: hard + +"@node-llama-cpp/win-arm64@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/win-arm64@npm:3.1.1" + conditions: os=win32 & (cpu=arm64 | cpu=x64) + languageName: node + linkType: hard + +"@node-llama-cpp/win-x64-cuda@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/win-x64-cuda@npm:3.1.1" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + +"@node-llama-cpp/win-x64-vulkan@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/win-x64-vulkan@npm:3.1.1" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + +"@node-llama-cpp/win-x64@npm:3.1.1": + version: 3.1.1 + resolution: "@node-llama-cpp/win-x64@npm:3.1.1" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + "@nodelib/fs.scandir@npm:2.1.5": version: 2.1.5 resolution: "@nodelib/fs.scandir@npm:2.1.5" @@ -13431,76 +13515,72 @@ __metadata: languageName: node linkType: hard -"@octokit/app@npm:^14.0.0": - version: 14.0.0 - resolution: "@octokit/app@npm:14.0.0" +"@octokit/app@npm:^15.0.0": + version: 15.1.0 + resolution: "@octokit/app@npm:15.1.0" dependencies: - "@octokit/auth-app": ^6.0.0 - "@octokit/auth-unauthenticated": ^5.0.0 - "@octokit/core": ^5.0.0 - "@octokit/oauth-app": ^6.0.0 - "@octokit/plugin-paginate-rest": ^8.0.0 - "@octokit/types": ^11.1.0 - "@octokit/webhooks": ^12.0.1 - checksum: 6d1923f5e213ed52a656bd250a3d0504be67e51e95ba5f7ac9658bd19232a67c274267f61b5137905104ea98736d7e00b438dabf87f54596429f34836f14d7bf + "@octokit/auth-app": ^7.0.0 + "@octokit/auth-unauthenticated": ^6.0.0 + "@octokit/core": ^6.1.2 + "@octokit/oauth-app": ^7.0.0 + "@octokit/plugin-paginate-rest": ^11.0.0 + "@octokit/types": ^13.0.0 + "@octokit/webhooks": ^13.0.0 + checksum: 133c1b55646c85161f5fe09266aecdb877da298fe88daa0cc0b517dd45233f07462cfd4df2dc4a77452cdf4a7551bed9e8a36f2e172928519a9a2575e77aa7ad languageName: node linkType: hard -"@octokit/auth-app@npm:^6.0.0": - version: 6.0.0 - resolution: "@octokit/auth-app@npm:6.0.0" - dependencies: - "@octokit/auth-oauth-app": ^7.0.0 - "@octokit/auth-oauth-user": ^4.0.0 - "@octokit/request": ^8.0.2 - "@octokit/request-error": ^5.0.0 - "@octokit/types": ^11.0.0 - deprecation: ^2.3.1 +"@octokit/auth-app@npm:^7.0.0": + version: 7.1.1 + resolution: "@octokit/auth-app@npm:7.1.1" + dependencies: + "@octokit/auth-oauth-app": ^8.1.0 + "@octokit/auth-oauth-user": ^5.1.0 + "@octokit/request": ^9.1.1 + "@octokit/request-error": ^6.1.1 + "@octokit/types": ^13.4.1 lru-cache: ^10.0.0 - universal-github-app-jwt: ^1.1.1 - universal-user-agent: ^6.0.0 - checksum: e3eb3ff7be50557634c5712cfa1413522cfc167f87e096d4d344961fecdc4f8ec91cfb0a025705e48d2c1b663203d90a50bd5b99a6e0d667433d043f27fe3716 + universal-github-app-jwt: ^2.2.0 + universal-user-agent: ^7.0.0 + checksum: 2cad1a5eef4e458caacb16271284743c7c1d9b34a2617f6b17135b3910ddb33efe16e6a6b5b36407f1b0065324f7b7b1bfa7c2f7d338f6c59f312762e0c57a5c languageName: node linkType: hard -"@octokit/auth-oauth-app@npm:^7.0.0": - version: 7.0.0 - resolution: "@octokit/auth-oauth-app@npm:7.0.0" - dependencies: - "@octokit/auth-oauth-device": ^6.0.0 - "@octokit/auth-oauth-user": ^4.0.0 - "@octokit/request": ^8.0.2 - "@octokit/types": ^11.0.0 - "@types/btoa-lite": ^1.0.0 - btoa-lite: ^1.0.0 - universal-user-agent: ^6.0.0 - checksum: fa050fbd05cb8da888672f029d65bf6526a8b0c3da43b4c1efac396f48d423adedd0e1d80624de1c95c0e9f5a2e47ad42c71ddca39f9371d8462331905c5d452 +"@octokit/auth-oauth-app@npm:^8.0.0, @octokit/auth-oauth-app@npm:^8.1.0": + version: 8.1.1 + resolution: "@octokit/auth-oauth-app@npm:8.1.1" + dependencies: + "@octokit/auth-oauth-device": ^7.0.0 + "@octokit/auth-oauth-user": ^5.0.1 + "@octokit/request": ^9.0.0 + "@octokit/types": ^13.0.0 + universal-user-agent: ^7.0.0 + checksum: e61160a6cc6aefff7b8cb3c73c2fc26e327308800b85bf6bfcfb39009ee2cb813bc2034ce3ea29b240aca920515b2199466cf842bbef4905c5da7796aa813eb4 languageName: node linkType: hard -"@octokit/auth-oauth-device@npm:^6.0.0": - version: 6.0.0 - resolution: "@octokit/auth-oauth-device@npm:6.0.0" +"@octokit/auth-oauth-device@npm:^7.0.0, @octokit/auth-oauth-device@npm:^7.0.1": + version: 7.1.1 + resolution: "@octokit/auth-oauth-device@npm:7.1.1" dependencies: - "@octokit/oauth-methods": ^4.0.0 - "@octokit/request": ^8.0.0 - "@octokit/types": ^11.0.0 - universal-user-agent: ^6.0.0 - checksum: ba6e46a42a68ca0e6d345b10de257b4e0764e6abeedfa3b7bd27bbe4336cbabf3dc081c97eb860939e4fbcb9827c62be5c4396023342db4717f254f06cdbba43 + "@octokit/oauth-methods": ^5.0.0 + "@octokit/request": ^9.0.0 + "@octokit/types": ^13.0.0 + universal-user-agent: ^7.0.0 + checksum: 5338ae5a5ca1d03c03c3ceba21635b6e2d8d8fe9c1f9f746651ebea5a130e65388e418e730eefb394bbceba092b712181ce9a603eec761f4c8fd6f8790d7cd45 languageName: node linkType: hard -"@octokit/auth-oauth-user@npm:^4.0.0": - version: 4.0.0 - resolution: "@octokit/auth-oauth-user@npm:4.0.0" +"@octokit/auth-oauth-user@npm:^5.0.1, @octokit/auth-oauth-user@npm:^5.1.0": + version: 5.1.1 + resolution: "@octokit/auth-oauth-user@npm:5.1.1" dependencies: - "@octokit/auth-oauth-device": ^6.0.0 - "@octokit/oauth-methods": ^4.0.0 - "@octokit/request": ^8.0.2 - "@octokit/types": ^11.0.0 - btoa-lite: ^1.0.0 - universal-user-agent: ^6.0.0 - checksum: d4382d5fa3b52e5a7595da92e2bea921793bbce3f03c0c047720db10b1443f42d27c21453d00f4ee30383e2c4279175a53b613de25584b004200c201f63b5f45 + "@octokit/auth-oauth-device": ^7.0.1 + "@octokit/oauth-methods": ^5.0.0 + "@octokit/request": ^9.0.1 + "@octokit/types": ^13.0.0 + universal-user-agent: ^7.0.0 + checksum: fe2b2ec3f50a565efb37254c78be499d8fc1cf4d565f869b957037103296589c48c69cab26a0549311ed50b698dc9ae1fef5cc9a0cda2a11a519b053c30cb7fc languageName: node linkType: hard @@ -13525,13 +13605,13 @@ __metadata: languageName: node linkType: hard -"@octokit/auth-unauthenticated@npm:^5.0.0": - version: 5.0.0 - resolution: "@octokit/auth-unauthenticated@npm:5.0.0" +"@octokit/auth-unauthenticated@npm:^6.0.0, @octokit/auth-unauthenticated@npm:^6.0.0-beta.1": + version: 6.1.0 + resolution: "@octokit/auth-unauthenticated@npm:6.1.0" dependencies: - "@octokit/request-error": ^5.0.0 - "@octokit/types": ^11.0.0 - checksum: ec0715c2fbcfe5a4eaaf996c570e634d339ebbeaa00c1f336b3277c1477c96cdd09342fccdff63edc42ad80dff4ce0f02c3a5e0a6560a35188439330b6624690 + "@octokit/request-error": ^6.0.1 + "@octokit/types": ^13.0.0 + checksum: 0f8929cbca7fa34f2a4ebcdf92da7a0b5a4a2de7a5dd695b4308a827018db6cfa311e84fe326c19a0b8e66080aa152fa066ae434190e5a63eadbb1449b1d7105 languageName: node linkType: hard @@ -13550,21 +13630,6 @@ __metadata: languageName: node linkType: hard -"@octokit/core@npm:^5.0.0": - version: 5.0.0 - resolution: "@octokit/core@npm:5.0.0" - dependencies: - "@octokit/auth-token": ^4.0.0 - "@octokit/graphql": ^7.0.0 - "@octokit/request": ^8.0.2 - "@octokit/request-error": ^5.0.0 - "@octokit/types": ^11.0.0 - before-after-hook: ^2.2.0 - universal-user-agent: ^6.0.0 - checksum: 1a5d1112a2403d146aa1db7aaf81a31192ef6b0310a1e6f68c3e439fded22bd4b3a930f5071585e6ca0f2f5e7fc4a1aac68910525b71b03732c140e362d26a33 - languageName: node - linkType: hard - "@octokit/core@npm:^5.0.2": version: 5.2.0 resolution: "@octokit/core@npm:5.2.0" @@ -13580,7 +13645,7 @@ __metadata: languageName: node linkType: hard -"@octokit/core@npm:^6.1.2": +"@octokit/core@npm:^6.0.0, @octokit/core@npm:^6.1.2": version: 6.1.2 resolution: "@octokit/core@npm:6.1.2" dependencies: @@ -13616,17 +13681,6 @@ __metadata: languageName: node linkType: hard -"@octokit/endpoint@npm:^9.0.0": - version: 9.0.0 - resolution: "@octokit/endpoint@npm:9.0.0" - dependencies: - "@octokit/types": ^11.0.0 - is-plain-object: ^5.0.0 - universal-user-agent: ^6.0.0 - checksum: 0e402c4d0fbe5b8053630cedb30dde5074bb6410828a05dc93d7e0fdd6c17f9a44b66586ef1a4e4ee0baa8d34ef7d6f535e2f04d9ea42909b7fc7ff55ce56a48 - languageName: node - linkType: hard - "@octokit/endpoint@npm:^9.0.1": version: 9.0.5 resolution: "@octokit/endpoint@npm:9.0.5" @@ -13648,17 +13702,6 @@ __metadata: languageName: node linkType: hard -"@octokit/graphql@npm:^7.0.0": - version: 7.0.1 - resolution: "@octokit/graphql@npm:7.0.1" - dependencies: - "@octokit/request": ^8.0.1 - "@octokit/types": ^11.0.0 - universal-user-agent: ^6.0.0 - checksum: 7ee907987b1b8312c6f870c44455cbd3eed805bb1a4095038f4e7e62ee2e006bd766f2a71dfbe56b870cd8f7558309c602f00d3e252fe59578f4acf6249a4f17 - languageName: node - linkType: hard - "@octokit/graphql@npm:^7.1.0": version: 7.1.0 resolution: "@octokit/graphql@npm:7.1.0" @@ -13681,39 +13724,38 @@ __metadata: languageName: node linkType: hard -"@octokit/oauth-app@npm:^6.0.0": - version: 6.0.0 - resolution: "@octokit/oauth-app@npm:6.0.0" - dependencies: - "@octokit/auth-oauth-app": ^7.0.0 - "@octokit/auth-oauth-user": ^4.0.0 - "@octokit/auth-unauthenticated": ^5.0.0 - "@octokit/core": ^5.0.0 - "@octokit/oauth-authorization-url": ^6.0.2 - "@octokit/oauth-methods": ^4.0.0 +"@octokit/oauth-app@npm:^7.0.0": + version: 7.1.3 + resolution: "@octokit/oauth-app@npm:7.1.3" + dependencies: + "@octokit/auth-oauth-app": ^8.0.0 + "@octokit/auth-oauth-user": ^5.0.1 + "@octokit/auth-unauthenticated": ^6.0.0-beta.1 + "@octokit/core": ^6.0.0 + "@octokit/oauth-authorization-url": ^7.0.0 + "@octokit/oauth-methods": ^5.0.0 "@types/aws-lambda": ^8.10.83 - universal-user-agent: ^6.0.0 - checksum: 5d07c6fe15d4a670a3ca0c7c0d37c973912b3ac993375966a6bed0e084edbda972f575e2ab2dc17aa9718e5aeefbec7489f5aeb2dbc6e47768ad9633f27f842d + universal-user-agent: ^7.0.0 + checksum: 13582d8d6e2ec1be144b5ec2c559d93de2cafcdfebde5e17c2d87906148c66edf00e8fb99c06852c8f4e51c6bbccd4a053b60796eadd848703389c0418eaa7fd languageName: node linkType: hard -"@octokit/oauth-authorization-url@npm:^6.0.2": - version: 6.0.2 - resolution: "@octokit/oauth-authorization-url@npm:6.0.2" - checksum: 0f11169a3eeb782cc08312c923de1a702b25ae033b972ba40380b6d72cb3f684543c8b6a5cf6f05936fdc6b8892070d4f7581138d8efc1b4c4a55ae6d7762327 +"@octokit/oauth-authorization-url@npm:^7.0.0": + version: 7.1.1 + resolution: "@octokit/oauth-authorization-url@npm:7.1.1" + checksum: 02ad29fa4540c6b4b3a1e9f6936d40057174be91e9c7cad1afcd09d027fa2a50598dad5857699d1be25568bf70d86123dc9cd3874afe044ce6791e6805e97542 languageName: node linkType: hard -"@octokit/oauth-methods@npm:^4.0.0": - version: 4.0.0 - resolution: "@octokit/oauth-methods@npm:4.0.0" +"@octokit/oauth-methods@npm:^5.0.0": + version: 5.1.2 + resolution: "@octokit/oauth-methods@npm:5.1.2" dependencies: - "@octokit/oauth-authorization-url": ^6.0.2 - "@octokit/request": ^8.0.2 - "@octokit/request-error": ^5.0.0 - "@octokit/types": ^11.0.0 - btoa-lite: ^1.0.0 - checksum: 623f3031f56f5bfd6f142f9215bf47ba418b2f7fd9eb72d3e68cdd2b2c81345021b692edd18ad055df85e71e35497002e1fc4816235ed520dd71e677885c99cc + "@octokit/oauth-authorization-url": ^7.0.0 + "@octokit/request": ^9.1.0 + "@octokit/request-error": ^6.1.0 + "@octokit/types": ^13.0.0 + checksum: 64317d0fae0f2383ef0194bab7ed6521a1e2d698f2f0730b22dd4ffa2f103541be6e5ef4380e073d8086008ad5d311a66901e0cc6bc0f57b66dc64db6ed79922 languageName: node linkType: hard @@ -13731,12 +13773,19 @@ __metadata: languageName: node linkType: hard -"@octokit/plugin-paginate-graphql@npm:^4.0.0": - version: 4.0.0 - resolution: "@octokit/plugin-paginate-graphql@npm:4.0.0" +"@octokit/openapi-webhooks-types@npm:8.3.0": + version: 8.3.0 + resolution: "@octokit/openapi-webhooks-types@npm:8.3.0" + checksum: bc97f53a93ed11a65ccf06cc67d4fcd9987112fbedd62335bf55debe475fedffe45c100e9fd2df98833c5da7b5a2391c75e22d70354f3f6790f8c87213325b42 + languageName: node + linkType: hard + +"@octokit/plugin-paginate-graphql@npm:^5.0.0": + version: 5.2.4 + resolution: "@octokit/plugin-paginate-graphql@npm:5.2.4" peerDependencies: - "@octokit/core": ">=5" - checksum: 368121d74fc40a4cee96f2febc29ae43abd8f6b7d0b06d3520847827675128028c4fa10d0534c5f0466658e81257d103092154778625c886a9fcdd01c302e50e + "@octokit/core": ">=6" + checksum: f119999c8872f8c24eff653c3af53dea9d06b6863491ea52b888c1a9489019fcaa47423321b857073c609baaaf43fecf97ef335d780042334217abfe24b68bed languageName: node linkType: hard @@ -13774,17 +13823,6 @@ __metadata: languageName: node linkType: hard -"@octokit/plugin-paginate-rest@npm:^8.0.0": - version: 8.0.0 - resolution: "@octokit/plugin-paginate-rest@npm:8.0.0" - dependencies: - "@octokit/types": ^11.0.0 - peerDependencies: - "@octokit/core": ">=5" - checksum: b5d7cee50523862c6ce7be057f7200e14ee4dcded462f27304c822c960a37efa23ed51080ea879f5d1e56e78f74baa17d2ce32eed5d726794abc35755777e32c - languageName: node - linkType: hard - "@octokit/plugin-request-log@npm:^1.0.4": version: 1.0.4 resolution: "@octokit/plugin-request-log@npm:1.0.4" @@ -13845,39 +13883,28 @@ __metadata: languageName: node linkType: hard -"@octokit/plugin-rest-endpoint-methods@npm:^9.0.0": - version: 9.0.0 - resolution: "@octokit/plugin-rest-endpoint-methods@npm:9.0.0" - dependencies: - "@octokit/types": ^11.0.0 - peerDependencies: - "@octokit/core": ">=5" - checksum: 8795cb29be042c839098886a03c2ec6051e3fd7a29f16f4f8a487aa2d85ceb00df8a4432499a43af550369bd730ce9b1b9d7eeff768745b80a3e67698ca9a5dd - languageName: node - linkType: hard - -"@octokit/plugin-retry@npm:^6.0.0": - version: 6.0.0 - resolution: "@octokit/plugin-retry@npm:6.0.0" +"@octokit/plugin-retry@npm:^7.0.0": + version: 7.1.2 + resolution: "@octokit/plugin-retry@npm:7.1.2" dependencies: - "@octokit/request-error": ^5.0.0 - "@octokit/types": ^11.0.0 + "@octokit/request-error": ^6.0.0 + "@octokit/types": ^13.0.0 bottleneck: ^2.15.3 peerDependencies: - "@octokit/core": ">=5" - checksum: 84c047309d6b3ad8d796cd6aca9a73c61ebea3894a01067ec6bd40d6ba9aaab779a1085749c04f90b25c0fc3a100c6553474d830e5c2e0dde4ffc42b5e0a2e89 + "@octokit/core": ">=6" + checksum: 484da4d0deffb5612d9ad918e82158c7c0e98e0be76ffe9046fe48c3f11ed4b7ff2d6807d9704c470dbc7d017bfa6e89cd89346ccdad788ac4fa5d02ccc99f94 languageName: node linkType: hard -"@octokit/plugin-throttling@npm:^7.0.0": - version: 7.0.0 - resolution: "@octokit/plugin-throttling@npm:7.0.0" +"@octokit/plugin-throttling@npm:^9.0.0": + version: 9.3.2 + resolution: "@octokit/plugin-throttling@npm:9.3.2" dependencies: - "@octokit/types": ^11.0.0 + "@octokit/types": ^13.0.0 bottleneck: ^2.15.3 peerDependencies: - "@octokit/core": ^5.0.0 - checksum: 772dd3405cb89ac8e4f6e81cee4e1cbf61010461c6c88ebc9f3a557eefc8a039b2368e615b2bf5d97352f5faf0dc133d70ad8eb568fd429f58332292d29113c1 + "@octokit/core": ^6.0.0 + checksum: d3e11bd4bbee7df0885789c018f9e0cc48b2226fa4c23f9f68b53acd670eb30303762fb56034650620dbf4e72497e27620140bc9cad5355207b4b5f0e1129e90 languageName: node linkType: hard @@ -13892,17 +13919,6 @@ __metadata: languageName: node linkType: hard -"@octokit/request-error@npm:^5.0.0": - version: 5.0.0 - resolution: "@octokit/request-error@npm:5.0.0" - dependencies: - "@octokit/types": ^11.0.0 - deprecation: ^2.0.0 - once: ^1.4.0 - checksum: 2012eca66f6b8fa4038b3bfe81d65a7134ec58e2caf45d229aca13b9653ab260abd95229bd1a8c11180ee0bcf738e2556831a85de28f39b175175653c3b79fdd - languageName: node - linkType: hard - "@octokit/request-error@npm:^5.1.0": version: 5.1.0 resolution: "@octokit/request-error@npm:5.1.0" @@ -13914,6 +13930,15 @@ __metadata: languageName: node linkType: hard +"@octokit/request-error@npm:^6.0.0, @octokit/request-error@npm:^6.1.0, @octokit/request-error@npm:^6.1.1": + version: 6.1.5 + resolution: "@octokit/request-error@npm:6.1.5" + dependencies: + "@octokit/types": ^13.0.0 + checksum: a0891df29957d9911ef34281fefffac4a98baa96ffffeb1a2b8f0c8e229911ca3da2be42e5bbe6a4b994a12fd100f4d0d86be095fada60384cd6728705eae859 + languageName: node + linkType: hard + "@octokit/request-error@npm:^6.0.1": version: 6.1.4 resolution: "@octokit/request-error@npm:6.1.4" @@ -13937,19 +13962,6 @@ __metadata: languageName: node linkType: hard -"@octokit/request@npm:^8.0.0, @octokit/request@npm:^8.0.1, @octokit/request@npm:^8.0.2": - version: 8.1.1 - resolution: "@octokit/request@npm:8.1.1" - dependencies: - "@octokit/endpoint": ^9.0.0 - "@octokit/request-error": ^5.0.0 - "@octokit/types": ^11.1.0 - is-plain-object: ^5.0.0 - universal-user-agent: ^6.0.0 - checksum: dec3ba2cba14739159cd8d1653ad8ac6d58095e4ac294d312d20ce2c63c60c3cad2e5499137244dba3d681fd5cd7f74b4b5d4df024a19c0ee1831204e5a3a894 - languageName: node - linkType: hard - "@octokit/request@npm:^8.3.0, @octokit/request@npm:^8.3.1": version: 8.4.0 resolution: "@octokit/request@npm:8.4.0" @@ -13962,7 +13974,7 @@ __metadata: languageName: node linkType: hard -"@octokit/request@npm:^9.0.0": +"@octokit/request@npm:^9.0.0, @octokit/request@npm:^9.0.1, @octokit/request@npm:^9.1.0, @octokit/request@npm:^9.1.1": version: 9.1.3 resolution: "@octokit/request@npm:9.1.3" dependencies: @@ -14026,15 +14038,6 @@ __metadata: languageName: node linkType: hard -"@octokit/types@npm:^11.0.0, @octokit/types@npm:^11.1.0": - version: 11.1.0 - resolution: "@octokit/types@npm:11.1.0" - dependencies: - "@octokit/openapi-types": ^18.0.0 - checksum: 72627a94ddaf7bc14db06572bcde67649aad608cd86548818380db9305f4c0ca9ca078a62dd883858a267e8ec8fd596a0fce416aa04197c439b9548efef609a7 - languageName: node - linkType: hard - "@octokit/types@npm:^13.0.0, @octokit/types@npm:^13.1.0, @octokit/types@npm:^13.5.0": version: 13.5.0 resolution: "@octokit/types@npm:13.5.0" @@ -14044,6 +14047,15 @@ __metadata: languageName: node linkType: hard +"@octokit/types@npm:^13.4.1": + version: 13.6.1 + resolution: "@octokit/types@npm:13.6.1" + dependencies: + "@octokit/openapi-types": ^22.2.0 + checksum: 05bb427bc3c84088e2367b8d1b7a9834732116bb3d35ef51d1aae34b3919027159dd496b9362dab1cb047918da15be1dc1cafc512c97f9b77458bd273b5a2ba9 + languageName: node + linkType: hard + "@octokit/types@npm:^9.0.0, @octokit/types@npm:^9.2.3": version: 9.3.2 resolution: "@octokit/types@npm:9.3.2" @@ -14053,29 +14065,21 @@ __metadata: languageName: node linkType: hard -"@octokit/webhooks-methods@npm:^4.0.0": - version: 4.0.0 - resolution: "@octokit/webhooks-methods@npm:4.0.0" - checksum: 07010438e53a6a659f0d7d3596bf89e6795776165066553e76384d90cef077a1e259122733913468299a1a76c71536914eb871d0508fcbbd453468b21eeb30c7 - languageName: node - linkType: hard - -"@octokit/webhooks-types@npm:7.1.0": - version: 7.1.0 - resolution: "@octokit/webhooks-types@npm:7.1.0" - checksum: 5aea38c38e97cb1b8d54c805c17c4015ee937d0b1ad550adc64eaf2e90bfbaf1e00c878490c10b43e31a11563e8d02183b86268ed588b04e39b22d5fd27807cf +"@octokit/webhooks-methods@npm:^5.0.0": + version: 5.1.0 + resolution: "@octokit/webhooks-methods@npm:5.1.0" + checksum: 6b0185f62b30b1d267456c449732d1c381e22533bcfeea3002bb88bc9f50a6ec5e4863be092473e7c47bee8c01b863ebd93980dd378495860dfd8d762044a212 languageName: node linkType: hard -"@octokit/webhooks@npm:^12.0.1": - version: 12.0.3 - resolution: "@octokit/webhooks@npm:12.0.3" +"@octokit/webhooks@npm:^13.0.0": + version: 13.3.0 + resolution: "@octokit/webhooks@npm:13.3.0" dependencies: - "@octokit/request-error": ^5.0.0 - "@octokit/webhooks-methods": ^4.0.0 - "@octokit/webhooks-types": 7.1.0 - aggregate-error: ^3.1.0 - checksum: 2db63122eab1852047379f9cb0024cdb22a2bf16356907b66bc71d5a01814b069178aee07fe61faeb80cde3dcd513a39526974f8bae5ec6e26b63fe87877ace1 + "@octokit/openapi-webhooks-types": 8.3.0 + "@octokit/request-error": ^6.0.1 + "@octokit/webhooks-methods": ^5.0.0 + checksum: 4a790e7a0551f057a14cf3b5df8e20cec43c10a8f331e19db7b0e5f6bfbc7577e817ad8543c7a99fb6dd7c713d93f0bbaf2fedc3c88f858693da084e9ef1463d languageName: node linkType: hard @@ -14453,6 +14457,95 @@ __metadata: languageName: node linkType: hard +"@reflink/reflink-darwin-arm64@npm:0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink-darwin-arm64@npm:0.1.16" + conditions: os=darwin & cpu=arm64 + languageName: node + linkType: hard + +"@reflink/reflink-darwin-x64@npm:0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink-darwin-x64@npm:0.1.16" + conditions: os=darwin & cpu=x64 + languageName: node + linkType: hard + +"@reflink/reflink-linux-arm64-gnu@npm:0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink-linux-arm64-gnu@npm:0.1.16" + conditions: os=linux & cpu=arm64 & libc=glibc + languageName: node + linkType: hard + +"@reflink/reflink-linux-arm64-musl@npm:0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink-linux-arm64-musl@npm:0.1.16" + conditions: os=linux & cpu=arm64 & libc=musl + languageName: node + linkType: hard + +"@reflink/reflink-linux-x64-gnu@npm:0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink-linux-x64-gnu@npm:0.1.16" + conditions: os=linux & cpu=x64 & libc=glibc + languageName: node + linkType: hard + +"@reflink/reflink-linux-x64-musl@npm:0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink-linux-x64-musl@npm:0.1.16" + conditions: os=linux & cpu=x64 & libc=musl + languageName: node + linkType: hard + +"@reflink/reflink-win32-arm64-msvc@npm:0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink-win32-arm64-msvc@npm:0.1.16" + conditions: os=win32 & cpu=arm64 + languageName: node + linkType: hard + +"@reflink/reflink-win32-x64-msvc@npm:0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink-win32-x64-msvc@npm:0.1.16" + conditions: os=win32 & cpu=x64 + languageName: node + linkType: hard + +"@reflink/reflink@npm:^0.1.16": + version: 0.1.16 + resolution: "@reflink/reflink@npm:0.1.16" + dependencies: + "@reflink/reflink-darwin-arm64": 0.1.16 + "@reflink/reflink-darwin-x64": 0.1.16 + "@reflink/reflink-linux-arm64-gnu": 0.1.16 + "@reflink/reflink-linux-arm64-musl": 0.1.16 + "@reflink/reflink-linux-x64-gnu": 0.1.16 + "@reflink/reflink-linux-x64-musl": 0.1.16 + "@reflink/reflink-win32-arm64-msvc": 0.1.16 + "@reflink/reflink-win32-x64-msvc": 0.1.16 + dependenciesMeta: + "@reflink/reflink-darwin-arm64": + optional: true + "@reflink/reflink-darwin-x64": + optional: true + "@reflink/reflink-linux-arm64-gnu": + optional: true + "@reflink/reflink-linux-arm64-musl": + optional: true + "@reflink/reflink-linux-x64-gnu": + optional: true + "@reflink/reflink-linux-x64-musl": + optional: true + "@reflink/reflink-win32-arm64-msvc": + optional: true + "@reflink/reflink-win32-x64-msvc": + optional: true + checksum: dcc35c4a63d79a5126c5405d6e034e022c6639372486ed98ab93c1d5dd5620660adedf8f075356f9f7fbb15719b646765404d859cc188b5d124fa16b90bcf750 + languageName: node + linkType: hard + "@rockset/client@npm:^0.9.1": version: 0.9.1 resolution: "@rockset/client@npm:0.9.1" @@ -18524,6 +18617,13 @@ __metadata: languageName: node linkType: hard +"@tinyhttp/content-disposition@npm:^2.2.0": + version: 2.2.2 + resolution: "@tinyhttp/content-disposition@npm:2.2.2" + checksum: ec36962a263742b5d1303e9eb52955b616d83edee71d559a9ed051568ace9206d8bfa625de406c10694765b3fa117eaca65f1c0c5330d91282170ba9f3654dfb + languageName: node + linkType: hard + "@tokenizer/token@npm:^0.3.0": version: 0.3.0 resolution: "@tokenizer/token@npm:0.3.0" @@ -18673,13 +18773,6 @@ __metadata: languageName: node linkType: hard -"@types/btoa-lite@npm:^1.0.0": - version: 1.0.0 - resolution: "@types/btoa-lite@npm:1.0.0" - checksum: 4d0c3c36cc8aa5669d286d62ca45d925e3ea0db75222ebacb0d9f4fd7822b8e162da8773887e045c11d64c42373807d2ab2ad97a5d8a683d2e1c981e6a05ce33 - languageName: node - linkType: hard - "@types/caseless@npm:*": version: 0.12.5 resolution: "@types/caseless@npm:0.12.5" @@ -20789,7 +20882,7 @@ __metadata: languageName: node linkType: hard -"aggregate-error@npm:^3.0.0, aggregate-error@npm:^3.1.0": +"aggregate-error@npm:^3.0.0": version: 3.1.0 resolution: "aggregate-error@npm:3.1.0" dependencies: @@ -20922,6 +21015,13 @@ __metadata: languageName: node linkType: hard +"ansi-escapes@npm:^6.2.0": + version: 6.2.1 + resolution: "ansi-escapes@npm:6.2.1" + checksum: 4bdbabe0782a1d4007157798f8acab745d1d5e440c872e6792880d08025e0baababa6b85b36846e955fde7d1e4bf572cdb1fddf109de196e9388d7a1c55ce30d + languageName: node + linkType: hard + "ansi-html-community@npm:^0.0.8": version: 0.0.8 resolution: "ansi-html-community@npm:0.0.8" @@ -20977,7 +21077,7 @@ __metadata: languageName: node linkType: hard -"ansi-styles@npm:^6.0.0, ansi-styles@npm:^6.1.0": +"ansi-styles@npm:^6.0.0, ansi-styles@npm:^6.1.0, ansi-styles@npm:^6.2.1": version: 6.2.1 resolution: "ansi-styles@npm:6.2.1" checksum: ef940f2f0ced1a6347398da88a91da7930c33ecac3c77b72c5905f8b8fe402c52e6fde304ff5347f616e27a742da3f1dc76de98f6866c69251ad0b07a66776d9 @@ -22242,13 +22342,6 @@ __metadata: languageName: node linkType: hard -"btoa-lite@npm:^1.0.0": - version: 1.0.0 - resolution: "btoa-lite@npm:1.0.0" - checksum: c2d61993b801f8e35a96f20692a45459c753d9baa29d86d1343e714f8d6bbe7069f1a20a5ae868488f3fb137d5bd0c560f6fbbc90b5a71050919d2d2c97c0475 - languageName: node - linkType: hard - "buffer-alloc-unsafe@npm:^1.1.0": version: 1.1.0 resolution: "buffer-alloc-unsafe@npm:1.1.0" @@ -22374,7 +22467,7 @@ __metadata: languageName: node linkType: hard -"bytes@npm:3.1.2": +"bytes@npm:3.1.2, bytes@npm:^3.1.2": version: 3.1.2 resolution: "bytes@npm:3.1.2" checksum: e4bcd3948d289c5127591fbedf10c0b639ccbf00243504e4e127374a15c3bc8eed0d28d4aaab08ff6f1cf2abc0cce6ba3085ed32f4f90e82a5683ce0014e1b6e @@ -22829,6 +22922,13 @@ __metadata: languageName: node linkType: hard +"ci-info@npm:^4.0.0": + version: 4.0.0 + resolution: "ci-info@npm:4.0.0" + checksum: 122fe41c5eb8d0b5fa0ab6fd674c5ddcf2dc59766528b062a0144ff0d913cfb210ef925ec52110e7c2a7f4e603d5f0e8b91cfe68867e196e9212fa0b94d0a08a + languageName: node + linkType: hard + "cjs-module-lexer@npm:^1.0.0": version: 1.2.2 resolution: "cjs-module-lexer@npm:1.2.2" @@ -22904,6 +23004,15 @@ __metadata: languageName: node linkType: hard +"cli-cursor@npm:^5.0.0": + version: 5.0.0 + resolution: "cli-cursor@npm:5.0.0" + dependencies: + restore-cursor: ^5.0.0 + checksum: 1eb9a3f878b31addfe8d82c6d915ec2330cec8447ab1f117f4aa34f0137fbb3137ec3466e1c9a65bcb7557f6e486d343f2da57f253a2f668d691372dfa15c090 + languageName: node + linkType: hard + "cli-highlight@npm:^2.1.11": version: 2.1.11 resolution: "cli-highlight@npm:2.1.11" @@ -22920,15 +23029,6 @@ __metadata: languageName: node linkType: hard -"cli-progress@npm:^3.12.0": - version: 3.12.0 - resolution: "cli-progress@npm:3.12.0" - dependencies: - string-width: ^4.2.3 - checksum: e8390dc3cdf3c72ecfda0a1e8997bfed63a0d837f97366bbce0ca2ff1b452da386caed007b389f0fe972625037b6c8e7ab087c69d6184cc4dfc8595c4c1d3e6e - languageName: node - linkType: hard - "cli-spinners@npm:^2.5.0": version: 2.7.0 resolution: "cli-spinners@npm:2.7.0" @@ -22943,13 +23043,6 @@ __metadata: languageName: node linkType: hard -"cli-spinners@npm:^2.9.0": - version: 2.9.0 - resolution: "cli-spinners@npm:2.9.0" - checksum: a9c56e1f44457d4a9f4f535364e729cb8726198efa9e98990cfd9eda9e220dfa4ba12f92808d1be5e29029cdfead781db82dc8549b97b31c907d55f96aa9b0e2 - languageName: node - linkType: hard - "cli-table3@npm:^0.6.2": version: 0.6.3 resolution: "cli-table3@npm:0.6.3" @@ -23162,29 +23255,6 @@ __metadata: languageName: node linkType: hard -"cmake-js@npm:^7.2.1": - version: 7.2.1 - resolution: "cmake-js@npm:7.2.1" - dependencies: - axios: ^1.3.2 - debug: ^4 - fs-extra: ^10.1.0 - lodash.isplainobject: ^4.0.6 - memory-stream: ^1.0.0 - node-api-headers: ^0.0.2 - npmlog: ^6.0.2 - rc: ^1.2.7 - semver: ^7.3.8 - tar: ^6.1.11 - url-join: ^4.0.1 - which: ^2.0.2 - yargs: ^17.6.0 - bin: - cmake-js: bin/cmake-js - checksum: 567d83f2718b0a66d5207905214792ccaa24ed7aa2f3661f214144e91f49480f3e9464e52f609b494a57ed901db789981e2daeae493f8d0e2d4aaed5cee71c17 - languageName: node - linkType: hard - "cmake-js@npm:^7.3.0": version: 7.3.0 resolution: "cmake-js@npm:7.3.0" @@ -25172,7 +25242,7 @@ __metadata: languageName: node linkType: hard -"deprecation@npm:^2.0.0, deprecation@npm:^2.3.1": +"deprecation@npm:^2.0.0": version: 2.3.1 resolution: "deprecation@npm:2.3.1" checksum: f56a05e182c2c195071385455956b0c4106fe14e36245b00c689ceef8e8ab639235176a96977ba7c74afb173317fac2e0ec6ec7a1c6d1e6eaa401c586c714132 @@ -25708,13 +25778,6 @@ __metadata: languageName: node linkType: hard -"emoji-regex@npm:^10.2.1": - version: 10.2.1 - resolution: "emoji-regex@npm:10.2.1" - checksum: 1aa2d16881c56531fdfc03d0b36f5c2b6221cc4097499a5665b88b711dc3fb4d5b8804f0ca6f00c56e5dcf89bac75f0487eee85da1da77df3a33accc6ecbe426 - languageName: node - linkType: hard - "emoji-regex@npm:^10.3.0": version: 10.3.0 resolution: "emoji-regex@npm:10.3.0" @@ -25820,10 +25883,10 @@ __metadata: languageName: node linkType: hard -"env-var@npm:^7.3.1": - version: 7.4.1 - resolution: "env-var@npm:7.4.1" - checksum: 35cbb504d6b803d837a34b84bf9913905d5e627400aeef9529d3a0acd6af9e0b1f9d87b833ac30c76323bd97314e1890514e4ce795643f8a432f39990796e97d +"env-var@npm:^7.5.0": + version: 7.5.0 + resolution: "env-var@npm:7.5.0" + checksum: 7be2a834693cc1d03f3b86ca2d5899fa08cbdcdec3468368ada85c60f6dcd83dc166db3e5dd59f6a85a5e5995a9bdc648082a62dc6f33d8a2351f0ab7d9cab60 languageName: node linkType: hard @@ -28045,6 +28108,22 @@ __metadata: languageName: node linkType: hard +"filename-reserved-regex@npm:^3.0.0": + version: 3.0.0 + resolution: "filename-reserved-regex@npm:3.0.0" + checksum: 1803e19ce64d7cb88ee5a1bd3ce282470a5c263987269222426d889049fc857e302284fa71937de9582eba7a9f39539557d45e0562f2fa51cade8efc68c65dd9 + languageName: node + linkType: hard + +"filenamify@npm:^6.0.0": + version: 6.0.0 + resolution: "filenamify@npm:6.0.0" + dependencies: + filename-reserved-regex: ^3.0.0 + checksum: 5914b64a760d49323d0454efb1f5e33338d3840df447f40556fc68730c4649797451931d60035c66068dacf326f045a912287ce8b63e15a5fba311a961f8f4b1 + languageName: node + linkType: hard + "filesize@npm:^8.0.6": version: 8.0.7 resolution: "filesize@npm:8.0.7" @@ -30259,6 +30338,13 @@ __metadata: languageName: node linkType: hard +"ignore@npm:^5.3.2": + version: 5.3.2 + resolution: "ignore@npm:5.3.2" + checksum: 2acfd32a573260ea522ea0bfeff880af426d68f6831f973129e2ba7363f422923cf53aab62f8369cbf4667c7b25b6f8a3761b34ecdb284ea18e87a5262a865be + languageName: node + linkType: hard + "image-size@npm:^1.0.1": version: 1.0.2 resolution: "image-size@npm:1.0.2" @@ -30569,6 +30655,39 @@ __metadata: languageName: node linkType: hard +"ipull@npm:^3.7.4": + version: 3.9.0 + resolution: "ipull@npm:3.9.0" + dependencies: + "@reflink/reflink": ^0.1.16 + "@tinyhttp/content-disposition": ^2.2.0 + async-retry: ^1.3.3 + chalk: ^5.3.0 + ci-info: ^4.0.0 + cli-spinners: ^2.9.2 + commander: ^10.0.0 + eventemitter3: ^5.0.1 + filenamify: ^6.0.0 + fs-extra: ^11.1.1 + is-unicode-supported: ^2.0.0 + lifecycle-utils: ^1.3.1 + lodash.debounce: ^4.0.8 + lowdb: ^7.0.1 + pretty-bytes: ^6.1.0 + pretty-ms: ^8.0.0 + sleep-promise: ^9.1.0 + slice-ansi: ^7.1.0 + stdout-update: ^4.0.1 + strip-ansi: ^7.1.0 + dependenciesMeta: + "@reflink/reflink": + optional: true + bin: + ipull: dist/cli/cli.js + checksum: 7a9f5a0e1715e6f6a73bd4edb8916fb44640d589617a39f233601f9fe2f93709ba5652ee0603938fd7ba691870567d26ac0ccc2388ca5f327339d7daef806376 + languageName: node + linkType: hard + "is-alphabetical@npm:1.0.4, is-alphabetical@npm:^1.0.0": version: 1.0.4 resolution: "is-alphabetical@npm:1.0.4" @@ -30825,6 +30944,15 @@ __metadata: languageName: node linkType: hard +"is-fullwidth-code-point@npm:^5.0.0": + version: 5.0.0 + resolution: "is-fullwidth-code-point@npm:5.0.0" + dependencies: + get-east-asian-width: ^1.0.0 + checksum: 8dfb2d2831b9e87983c136f5c335cd9d14c1402973e357a8ff057904612ed84b8cba196319fabedf9aefe4639e14fe3afe9d9966d1d006ebeb40fe1fed4babe5 + languageName: node + linkType: hard + "is-generator-fn@npm:^2.0.0": version: 2.1.0 resolution: "is-generator-fn@npm:2.1.0" @@ -31239,6 +31367,13 @@ __metadata: languageName: node linkType: hard +"is-unicode-supported@npm:^2.1.0": + version: 2.1.0 + resolution: "is-unicode-supported@npm:2.1.0" + checksum: f254e3da6b0ab1a57a94f7273a7798dd35d1d45b227759f600d0fa9d5649f9c07fa8d3c8a6360b0e376adf916d151ec24fc9a50c5295c58bae7ca54a76a063f9 + languageName: node + linkType: hard + "is-weakmap@npm:^2.0.1": version: 2.0.1 resolution: "is-weakmap@npm:2.0.1" @@ -33213,6 +33348,13 @@ __metadata: languageName: node linkType: hard +"lifecycle-utils@npm:^1.3.1, lifecycle-utils@npm:^1.7.0": + version: 1.7.0 + resolution: "lifecycle-utils@npm:1.7.0" + checksum: 083b48cf370ff7d9284a0551bea53a104d844ee4b6b57b7b84d04dba4e7c0b157543770c4f582b9fe94aa6ceea4eaa64bf587919a5ab80479a1c5267cde44120 + languageName: node + linkType: hard + "lilconfig@npm:2.1.0, lilconfig@npm:^2.0.3, lilconfig@npm:^2.0.5, lilconfig@npm:^2.1.0": version: 2.1.0 resolution: "lilconfig@npm:2.1.0" @@ -33584,6 +33726,16 @@ __metadata: languageName: node linkType: hard +"log-symbols@npm:^7.0.0": + version: 7.0.0 + resolution: "log-symbols@npm:7.0.0" + dependencies: + is-unicode-supported: ^2.0.0 + yoctocolors: ^2.1.1 + checksum: a6cb6e90bfe9f0774a09ff783e2035cd7e375a42757d7e401b391916a67f6da382f4966b57dda89430faaebe2ed13803ea867e104f8d67caf66082943a7153f0 + languageName: node + linkType: hard + "log-update@npm:^4.0.0": version: 4.0.0 resolution: "log-update@npm:4.0.0" @@ -33667,6 +33819,15 @@ __metadata: languageName: node linkType: hard +"lowdb@npm:^7.0.1": + version: 7.0.1 + resolution: "lowdb@npm:7.0.1" + dependencies: + steno: ^4.0.2 + checksum: 124cbc41d6095ed3765fd50749815ab9d80ee9b9ace87575aaad43979c949604fd6ad055c5e33815e1f03de59940d0e37836a156166521dc16c711ea1f6e027c + languageName: node + linkType: hard + "lower-case@npm:^2.0.2": version: 2.0.2 resolution: "lower-case@npm:2.0.2" @@ -34225,6 +34386,13 @@ __metadata: languageName: node linkType: hard +"mimic-function@npm:^5.0.0": + version: 5.0.1 + resolution: "mimic-function@npm:5.0.1" + checksum: eb5893c99e902ccebbc267c6c6b83092966af84682957f79313311edb95e8bb5f39fb048d77132b700474d1c86d90ccc211e99bae0935447a4834eb4c882982c + languageName: node + linkType: hard + "mimic-response@npm:^1.0.0, mimic-response@npm:^1.0.1": version: 1.0.1 resolution: "mimic-response@npm:1.0.1" @@ -34750,6 +34918,15 @@ __metadata: languageName: node linkType: hard +"nanoid@npm:^5.0.7": + version: 5.0.8 + resolution: "nanoid@npm:5.0.8" + bin: + nanoid: bin/nanoid.js + checksum: df131a515465053ff25c8cf0450ef191e1db83b45fe125af43f50d39feddf1f161d3b2abb34cb993df35a76b427f8d6d982e16e47d67b2fbe843664af025b5e2 + languageName: node + linkType: hard + "napi-build-utils@npm:^1.0.1": version: 1.0.2 resolution: "napi-build-utils@npm:1.0.2" @@ -34984,10 +35161,12 @@ __metadata: languageName: node linkType: hard -"node-api-headers@npm:^0.0.2": - version: 0.0.2 - resolution: "node-api-headers@npm:0.0.2" - checksum: 6b0960d5a9d6bc6640329dd6398d3ee37bcdb5fb649ba8964f1104c9e9f95d96c746cdc13664aa24fd1d7cc5c7b60dd865f7aa02f4d12c7266df151eaf6934db +"node-addon-api@npm:^8.1.0": + version: 8.2.1 + resolution: "node-addon-api@npm:8.2.1" + dependencies: + node-gyp: latest + checksum: ee5e65f387d2a8c137b6f5d733633caa9ba73ee6e781fa5ed4836060fd6e8c1e610af0095fe8d01748c39ecc8eb5f872225b6153e83197bf79253cabd9c97d46 languageName: node linkType: hard @@ -35164,34 +35343,83 @@ __metadata: languageName: node linkType: hard -"node-llama-cpp@npm:^2": - version: 2.8.16 - resolution: "node-llama-cpp@npm:2.8.16" - dependencies: +"node-llama-cpp@npm:3.1.1": + version: 3.1.1 + resolution: "node-llama-cpp@npm:3.1.1" + dependencies: + "@huggingface/jinja": ^0.3.1 + "@node-llama-cpp/linux-arm64": 3.1.1 + "@node-llama-cpp/linux-armv7l": 3.1.1 + "@node-llama-cpp/linux-x64": 3.1.1 + "@node-llama-cpp/linux-x64-cuda": 3.1.1 + "@node-llama-cpp/linux-x64-vulkan": 3.1.1 + "@node-llama-cpp/mac-arm64-metal": 3.1.1 + "@node-llama-cpp/mac-x64": 3.1.1 + "@node-llama-cpp/win-arm64": 3.1.1 + "@node-llama-cpp/win-x64": 3.1.1 + "@node-llama-cpp/win-x64-cuda": 3.1.1 + "@node-llama-cpp/win-x64-vulkan": 3.1.1 + async-retry: ^1.3.3 + bytes: ^3.1.2 chalk: ^5.3.0 chmodrp: ^1.0.2 - cli-progress: ^3.12.0 - cmake-js: ^7.2.1 + cmake-js: ^7.3.0 cross-env: ^7.0.3 cross-spawn: ^7.0.3 - env-var: ^7.3.1 - fs-extra: ^11.1.1 - log-symbols: ^5.1.0 - node-addon-api: ^7.0.0 - octokit: ^3.1.0 - ora: ^7.0.1 - simple-git: ^3.19.1 - uuid: ^9.0.0 + env-var: ^7.5.0 + filenamify: ^6.0.0 + fs-extra: ^11.2.0 + ignore: ^5.3.2 + ipull: ^3.7.4 + is-unicode-supported: ^2.1.0 + lifecycle-utils: ^1.7.0 + log-symbols: ^7.0.0 + nanoid: ^5.0.7 + node-addon-api: ^8.1.0 + octokit: ^4.0.2 + ora: ^8.1.0 + pretty-ms: ^9.1.0 + proper-lockfile: ^4.1.2 + semver: ^7.6.3 + simple-git: ^3.27.0 + slice-ansi: ^7.1.0 + stdout-update: ^4.0.1 + strip-ansi: ^7.1.0 + validate-npm-package-name: ^5.0.1 which: ^4.0.0 yargs: ^17.7.2 peerDependencies: typescript: ">=5.0.0" + dependenciesMeta: + "@node-llama-cpp/linux-arm64": + optional: true + "@node-llama-cpp/linux-armv7l": + optional: true + "@node-llama-cpp/linux-x64": + optional: true + "@node-llama-cpp/linux-x64-cuda": + optional: true + "@node-llama-cpp/linux-x64-vulkan": + optional: true + "@node-llama-cpp/mac-arm64-metal": + optional: true + "@node-llama-cpp/mac-x64": + optional: true + "@node-llama-cpp/win-arm64": + optional: true + "@node-llama-cpp/win-x64": + optional: true + "@node-llama-cpp/win-x64-cuda": + optional: true + "@node-llama-cpp/win-x64-vulkan": + optional: true peerDependenciesMeta: typescript: optional: true bin: + nlc: dist/cli/cli.js node-llama-cpp: dist/cli/cli.js - checksum: 51607ec21f02a702a440fae6a8c06b00b039cbfcb09a7d92eaa16e97972ff71874285d9d2561429bae03b52da81e0d12d2d22356dd162f30ce92d37c928014c4 + checksum: 25c644a64d3f42b766b6d8ef1d84e5be84c1fa664cad961efa41001e68b4de65b0d926e25fbbc745d2702579567d9ea59cbd69df01158c2516ab68907ee0926f languageName: node linkType: hard @@ -35526,21 +35754,21 @@ __metadata: languageName: node linkType: hard -"octokit@npm:^3.1.0": - version: 3.1.0 - resolution: "octokit@npm:3.1.0" +"octokit@npm:^4.0.2": + version: 4.0.2 + resolution: "octokit@npm:4.0.2" dependencies: - "@octokit/app": ^14.0.0 - "@octokit/core": ^5.0.0 - "@octokit/oauth-app": ^6.0.0 - "@octokit/plugin-paginate-graphql": ^4.0.0 - "@octokit/plugin-paginate-rest": ^8.0.0 - "@octokit/plugin-rest-endpoint-methods": ^9.0.0 - "@octokit/plugin-retry": ^6.0.0 - "@octokit/plugin-throttling": ^7.0.0 - "@octokit/request-error": ^5.0.0 - "@octokit/types": ^11.1.0 - checksum: 148c9dcf3a65a824f69802a950bdc5dae6fd70e16024d08b1fdd65a7b080bccc037326e0145a8a4aa6b041f1ceefe1d04a7228750de8256d55e82beaa1f2df37 + "@octokit/app": ^15.0.0 + "@octokit/core": ^6.0.0 + "@octokit/oauth-app": ^7.0.0 + "@octokit/plugin-paginate-graphql": ^5.0.0 + "@octokit/plugin-paginate-rest": ^11.0.0 + "@octokit/plugin-rest-endpoint-methods": ^13.0.0 + "@octokit/plugin-retry": ^7.0.0 + "@octokit/plugin-throttling": ^9.0.0 + "@octokit/request-error": ^6.0.0 + "@octokit/types": ^13.0.0 + checksum: 6e3d6ccd67738d05b8b1a101798b9fcd5cb57560ba761761918d9d7e93e551b846b51f31125ee642fca443e37babd81b42c09a69fed79e33de0f97d8b694a8f7 languageName: node linkType: hard @@ -35620,6 +35848,15 @@ __metadata: languageName: node linkType: hard +"onetime@npm:^7.0.0": + version: 7.0.0 + resolution: "onetime@npm:7.0.0" + dependencies: + mimic-function: ^5.0.0 + checksum: eb08d2da9339819e2f9d52cab9caf2557d80e9af8c7d1ae86e1a0fef027d00a88e9f5bd67494d350df360f7c559fbb44e800b32f310fb989c860214eacbb561c + languageName: node + linkType: hard + "onnx-proto@npm:^4.0.4": version: 4.0.4 resolution: "onnx-proto@npm:4.0.4" @@ -35868,20 +36105,20 @@ __metadata: languageName: node linkType: hard -"ora@npm:^7.0.1": - version: 7.0.1 - resolution: "ora@npm:7.0.1" +"ora@npm:^8.1.0": + version: 8.1.0 + resolution: "ora@npm:8.1.0" dependencies: chalk: ^5.3.0 - cli-cursor: ^4.0.0 - cli-spinners: ^2.9.0 + cli-cursor: ^5.0.0 + cli-spinners: ^2.9.2 is-interactive: ^2.0.0 - is-unicode-supported: ^1.3.0 - log-symbols: ^5.1.0 - stdin-discarder: ^0.1.0 - string-width: ^6.1.0 + is-unicode-supported: ^2.0.0 + log-symbols: ^6.0.0 + stdin-discarder: ^0.2.2 + string-width: ^7.2.0 strip-ansi: ^7.1.0 - checksum: 0842b8b9a96a8586085cafdc25077c76fed8ade072c52c53e748cf40a214731d2215a4d6081d8fbd6203d2b897e834332bda53eb64afd1a5968da17daf020bff + checksum: 81b9a2627a687c2b16fa08b0ae0b3641b320bdbeca831eb323df0cbb1e5ddc096b94391ff342839a1db47f5a895cebb2a8d06c319a5d935fc48628f35a036107 languageName: node linkType: hard @@ -36213,6 +36450,20 @@ __metadata: languageName: node linkType: hard +"parse-ms@npm:^3.0.0": + version: 3.0.0 + resolution: "parse-ms@npm:3.0.0" + checksum: fc602bba093835562321a67a9d6c8c9687ca4f26a09459a77e07ebd7efddd1a5766725ec60eb0c83a2abe67f7a23808f7deb1c1226727776eaf7f9607ae09db2 + languageName: node + linkType: hard + +"parse-ms@npm:^4.0.0": + version: 4.0.0 + resolution: "parse-ms@npm:4.0.0" + checksum: 673c801d9f957ff79962d71ed5a24850163f4181a90dd30c4e3666b3a804f53b77f1f0556792e8b2adbb5d58757907d1aa51d7d7dc75997c2a56d72937cbc8b7 + languageName: node + linkType: hard + "parse-numeric-range@npm:^1.3.0": version: 1.3.0 resolution: "parse-numeric-range@npm:1.3.0" @@ -37411,6 +37662,13 @@ __metadata: languageName: node linkType: hard +"pretty-bytes@npm:^6.1.0": + version: 6.1.1 + resolution: "pretty-bytes@npm:6.1.1" + checksum: 43d29d909d2d88072da2c3d72f8fd0f2d2523c516bfa640aff6e31f596ea1004b6601f4cabc50d14b2cf10e82635ebe5b7d9378f3d5bae1c0067131829421b8a + languageName: node + linkType: hard + "pretty-error@npm:^4.0.0": version: 4.0.0 resolution: "pretty-error@npm:4.0.0" @@ -37476,6 +37734,24 @@ __metadata: languageName: node linkType: hard +"pretty-ms@npm:^8.0.0": + version: 8.0.0 + resolution: "pretty-ms@npm:8.0.0" + dependencies: + parse-ms: ^3.0.0 + checksum: b7d2a8182887af0e5ab93f9df331f10db9b8eda86855e2de115eb01a6c501bde5631a8813b1b0abdd7d045e79b08ae875369a8fd279a3dacd6d9e572bdd3bfa6 + languageName: node + linkType: hard + +"pretty-ms@npm:^9.1.0": + version: 9.1.0 + resolution: "pretty-ms@npm:9.1.0" + dependencies: + parse-ms: ^4.0.0 + checksum: 0f66507467f2005040cccdcb36f35b82674d7809f41c4432009235ed6c920787afa17f621c25b7ccb8ccd80b0840c7b71f7f4a3addb8f0eeef3a033ff1e5cf71 + languageName: node + linkType: hard + "pretty-time@npm:^1.1.0": version: 1.1.0 resolution: "pretty-time@npm:1.1.0" @@ -37621,6 +37897,17 @@ __metadata: languageName: node linkType: hard +"proper-lockfile@npm:^4.1.2": + version: 4.1.2 + resolution: "proper-lockfile@npm:4.1.2" + dependencies: + graceful-fs: ^4.2.4 + retry: ^0.12.0 + signal-exit: ^3.0.2 + checksum: 00078ee6a61c216a56a6140c7d2a98c6c733b3678503002dc073ab8beca5d50ca271de4c85fca13b9b8ee2ff546c36674d1850509b84a04a5d0363bcb8638939 + languageName: node + linkType: hard + "property-information@npm:^5.0.0, property-information@npm:^5.3.0": version: 5.6.0 resolution: "property-information@npm:5.6.0" @@ -38962,6 +39249,16 @@ __metadata: languageName: node linkType: hard +"restore-cursor@npm:^5.0.0": + version: 5.1.0 + resolution: "restore-cursor@npm:5.1.0" + dependencies: + onetime: ^7.0.0 + signal-exit: ^4.1.0 + checksum: 838dd54e458d89cfbc1a923b343c1b0f170a04100b4ce1733e97531842d7b440463967e521216e8ab6c6f8e89df877acc7b7f4c18ec76e99fb9bf5a60d358d2c + languageName: node + linkType: hard + "retry-axios@npm:^2.6.0": version: 2.6.0 resolution: "retry-axios@npm:2.6.0" @@ -39854,14 +40151,14 @@ __metadata: languageName: node linkType: hard -"simple-git@npm:^3.19.1": - version: 3.19.1 - resolution: "simple-git@npm:3.19.1" +"simple-git@npm:^3.27.0": + version: 3.27.0 + resolution: "simple-git@npm:3.27.0" dependencies: "@kwsites/file-exists": ^1.1.1 "@kwsites/promise-deferred": ^1.1.1 - debug: ^4.3.4 - checksum: ab7c6901130eadd5758c5f1b0d957573b13cd981549cb5853e757cf13f8cb53b79a50e1acde8eaceba88da814d252f90cf7feab9bd67c64db903250adc51b02f + debug: ^4.3.5 + checksum: bc602d67317a5421363f4cbe446bc71336387a7ea9864b23993dcbbd7e4847e346a234aa5b46bf9d80130d2448cbaeb21cf8f7b62572dce093fb4643ff7ffafd languageName: node linkType: hard @@ -39927,6 +40224,13 @@ __metadata: languageName: node linkType: hard +"sleep-promise@npm:^9.1.0": + version: 9.1.0 + resolution: "sleep-promise@npm:9.1.0" + checksum: a93359c18d2e4d586f2ecd4575cbdbc92bcbd9d387bd4bca249892abb512ca4e80075f67e53ec1279ea4555e8a1f6dc9df9e90f8aa71178ad943e9ba1f27debb + languageName: node + linkType: hard + "slice-ansi@npm:^3.0.0": version: 3.0.0 resolution: "slice-ansi@npm:3.0.0" @@ -39959,6 +40263,16 @@ __metadata: languageName: node linkType: hard +"slice-ansi@npm:^7.1.0": + version: 7.1.0 + resolution: "slice-ansi@npm:7.1.0" + dependencies: + ansi-styles: ^6.2.1 + is-fullwidth-code-point: ^5.0.0 + checksum: 10313dd3cf7a2e4b265f527b1684c7c568210b09743fd1bd74f2194715ed13ffba653dc93a5fa79e3b1711518b8990a732cb7143aa01ddafe626e99dfa6474b2 + languageName: node + linkType: hard + "smart-buffer@npm:^4.2.0": version: 4.2.0 resolution: "smart-buffer@npm:4.2.0" @@ -40289,13 +40603,32 @@ __metadata: languageName: node linkType: hard -"stdin-discarder@npm:^0.2.1": +"stdin-discarder@npm:^0.2.1, stdin-discarder@npm:^0.2.2": version: 0.2.2 resolution: "stdin-discarder@npm:0.2.2" checksum: 642ffd05bd5b100819d6b24a613d83c6e3857c6de74eb02fc51506fa61dc1b0034665163831873868157c4538d71e31762bcf319be86cea04c3aba5336470478 languageName: node linkType: hard +"stdout-update@npm:^4.0.1": + version: 4.0.1 + resolution: "stdout-update@npm:4.0.1" + dependencies: + ansi-escapes: ^6.2.0 + ansi-styles: ^6.2.1 + string-width: ^7.1.0 + strip-ansi: ^7.1.0 + checksum: 3b5207b3a747bb917ca3b4a4894f163afae83a292ab151fad759cab736d240098ecc54ccedc7266fa3fd85eb61bb0f0f73acd562f6b02da8fa42e1c5ba2fddaf + languageName: node + linkType: hard + +"steno@npm:^4.0.2": + version: 4.0.2 + resolution: "steno@npm:4.0.2" + checksum: 1d135aac18058e4629993d036e46b8a9af155e2a49260ece7047df8eb916f2c04f396b364db8543ad5b8c16d89c8747a6bdc43f3d85c79dc1728ffc9a2239a32 + languageName: node + linkType: hard + "stop-iteration-iterator@npm:^1.0.0": version: 1.0.0 resolution: "stop-iteration-iterator@npm:1.0.0" @@ -40399,18 +40732,7 @@ __metadata: languageName: node linkType: hard -"string-width@npm:^6.1.0": - version: 6.1.0 - resolution: "string-width@npm:6.1.0" - dependencies: - eastasianwidth: ^0.2.0 - emoji-regex: ^10.2.1 - strip-ansi: ^7.0.1 - checksum: 8aefb456a230c8d7fe254049b1b2d62603da1a3b6c7fc9f3332f6779583cc1c72653f9b6e4cd0c1c92befee1565d4a0a7542d09ba4ceb6d96af02fbd8425bb03 - languageName: node - linkType: hard - -"string-width@npm:^7.0.0": +"string-width@npm:^7.0.0, string-width@npm:^7.1.0, string-width@npm:^7.2.0": version: 7.2.0 resolution: "string-width@npm:7.2.0" dependencies: @@ -42460,13 +42782,10 @@ __metadata: languageName: node linkType: hard -"universal-github-app-jwt@npm:^1.1.1": - version: 1.1.1 - resolution: "universal-github-app-jwt@npm:1.1.1" - dependencies: - "@types/jsonwebtoken": ^9.0.0 - jsonwebtoken: ^9.0.0 - checksum: 31d30150b9eafa9fa8bf57bd6f97d7d91d4509ad24fa673a6e29ac2295b8f1fc293a70cb44fa807af7cfd787db1cf6edd3876fc0cae31230c5292b76677159fc +"universal-github-app-jwt@npm:^2.2.0": + version: 2.2.0 + resolution: "universal-github-app-jwt@npm:2.2.0" + checksum: 09f8e9710453749bd669fb6511157f03683674066f04696b10d42c18d87cb40d77a5b7504b5bd6f4e329229fff8715e01958217560accd941381c6b4cb7a46fe languageName: node linkType: hard @@ -42889,6 +43208,13 @@ __metadata: languageName: node linkType: hard +"validate-npm-package-name@npm:^5.0.1": + version: 5.0.1 + resolution: "validate-npm-package-name@npm:5.0.1" + checksum: 0d583a1af23aeffea7748742cf22b6802458736fb8b60323ba5949763824d46f796474b0e1b9206beb716f9d75269e19dbd7795d6b038b29d561be95dd827381 + languageName: node + linkType: hard + "value-equal@npm:^1.0.1": version: 1.0.1 resolution: "value-equal@npm:1.0.1" @@ -43986,7 +44312,7 @@ __metadata: languageName: node linkType: hard -"yargs@npm:^17.6.0, yargs@npm:^17.7.2": +"yargs@npm:^17.7.2": version: 17.7.2 resolution: "yargs@npm:17.7.2" dependencies: @@ -44025,6 +44351,13 @@ __metadata: languageName: node linkType: hard +"yoctocolors@npm:^2.1.1": + version: 2.1.1 + resolution: "yoctocolors@npm:2.1.1" + checksum: 563fbec88bce9716d1044bc98c96c329e1d7a7c503e6f1af68f1ff914adc3ba55ce953c871395e2efecad329f85f1632f51a99c362032940321ff80c42a6f74d + languageName: node + linkType: hard + "youtube-transcript@npm:^1.0.6": version: 1.0.6 resolution: "youtube-transcript@npm:1.0.6" From 306f31e61b6fa993bfb498cd671bb2ff4615777a Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 16:14:06 -0800 Subject: [PATCH 078/100] chore(vertexai): Release 0.1.2 (#7185) --- libs/langchain-google-common/package.json | 2 +- libs/langchain-google-gauth/package.json | 4 ++-- libs/langchain-google-vertexai-web/package.json | 4 ++-- libs/langchain-google-vertexai/package.json | 4 ++-- libs/langchain-google-webauth/package.json | 4 ++-- yarn.lock | 14 +++++++------- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/libs/langchain-google-common/package.json b/libs/langchain-google-common/package.json index 4209d3c70f66..1564f1cb9db7 100644 --- a/libs/langchain-google-common/package.json +++ b/libs/langchain-google-common/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-common", - "version": "0.1.1", + "version": "0.1.2", "description": "Core types and classes for Google services.", "type": "module", "engines": { diff --git a/libs/langchain-google-gauth/package.json b/libs/langchain-google-gauth/package.json index 2614af2df617..f1accfaf6a78 100644 --- a/libs/langchain-google-gauth/package.json +++ b/libs/langchain-google-gauth/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-gauth", - "version": "0.1.0", + "version": "0.1.2", "description": "Google auth based authentication support for Google services", "type": "module", "engines": { @@ -35,7 +35,7 @@ "author": "LangChain", "license": "MIT", "dependencies": { - "@langchain/google-common": "~0.1.0", + "@langchain/google-common": "~0.1.2", "google-auth-library": "^8.9.0" }, "peerDependencies": { diff --git a/libs/langchain-google-vertexai-web/package.json b/libs/langchain-google-vertexai-web/package.json index 5fdcb89f3314..363b3032a0a6 100644 --- a/libs/langchain-google-vertexai-web/package.json +++ b/libs/langchain-google-vertexai-web/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-vertexai-web", - "version": "0.1.0", + "version": "0.1.2", "description": "LangChain.js support for Google Vertex AI Web", "type": "module", "engines": { @@ -32,7 +32,7 @@ "author": "LangChain", "license": "MIT", "dependencies": { - "@langchain/google-webauth": "~0.1.0" + "@langchain/google-webauth": "~0.1.2" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" diff --git a/libs/langchain-google-vertexai/package.json b/libs/langchain-google-vertexai/package.json index 7274f14f58ae..9379589af442 100644 --- a/libs/langchain-google-vertexai/package.json +++ b/libs/langchain-google-vertexai/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-vertexai", - "version": "0.1.0", + "version": "0.1.2", "description": "LangChain.js support for Google Vertex AI", "type": "module", "engines": { @@ -32,7 +32,7 @@ "author": "LangChain", "license": "MIT", "dependencies": { - "@langchain/google-gauth": "~0.1.0" + "@langchain/google-gauth": "~0.1.2" }, "peerDependencies": { "@langchain/core": ">=0.2.21 <0.4.0" diff --git a/libs/langchain-google-webauth/package.json b/libs/langchain-google-webauth/package.json index 149054defe14..014ae081f302 100644 --- a/libs/langchain-google-webauth/package.json +++ b/libs/langchain-google-webauth/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-webauth", - "version": "0.1.0", + "version": "0.1.2", "description": "Web-based authentication support for Google services", "type": "module", "engines": { @@ -32,7 +32,7 @@ "author": "LangChain", "license": "MIT", "dependencies": { - "@langchain/google-common": "~0.1.0", + "@langchain/google-common": "~0.1.2", "web-auth-library": "^1.0.3" }, "peerDependencies": { diff --git a/yarn.lock b/yarn.lock index 04aa2e456470..16cc8eb38d04 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12080,7 +12080,7 @@ __metadata: languageName: unknown linkType: soft -"@langchain/google-common@^0.1.0, @langchain/google-common@workspace:*, @langchain/google-common@workspace:libs/langchain-google-common, @langchain/google-common@~0.1.0": +"@langchain/google-common@^0.1.0, @langchain/google-common@workspace:*, @langchain/google-common@workspace:libs/langchain-google-common, @langchain/google-common@~0.1.2": version: 0.0.0-use.local resolution: "@langchain/google-common@workspace:libs/langchain-google-common" dependencies: @@ -12115,13 +12115,13 @@ __metadata: languageName: unknown linkType: soft -"@langchain/google-gauth@workspace:libs/langchain-google-gauth, @langchain/google-gauth@~0.1.0": +"@langchain/google-gauth@workspace:libs/langchain-google-gauth, @langchain/google-gauth@~0.1.2": version: 0.0.0-use.local resolution: "@langchain/google-gauth@workspace:libs/langchain-google-gauth" dependencies: "@jest/globals": ^29.5.0 "@langchain/core": "workspace:*" - "@langchain/google-common": ~0.1.0 + "@langchain/google-common": ~0.1.2 "@langchain/scripts": ">=0.1.0 <0.2.0" "@swc/core": ^1.3.90 "@swc/jest": ^0.2.29 @@ -12194,7 +12194,7 @@ __metadata: "@jest/globals": ^29.5.0 "@langchain/core": "workspace:*" "@langchain/google-common": ^0.1.0 - "@langchain/google-webauth": ~0.1.0 + "@langchain/google-webauth": ~0.1.2 "@langchain/scripts": ">=0.1.0 <0.2.0" "@langchain/standard-tests": 0.0.0 "@swc/core": ^1.3.90 @@ -12230,7 +12230,7 @@ __metadata: "@jest/globals": ^29.5.0 "@langchain/core": "workspace:*" "@langchain/google-common": ^0.1.0 - "@langchain/google-gauth": ~0.1.0 + "@langchain/google-gauth": ~0.1.2 "@langchain/scripts": ">=0.1.0 <0.2.0" "@langchain/standard-tests": 0.0.0 "@swc/core": ^1.3.90 @@ -12259,13 +12259,13 @@ __metadata: languageName: unknown linkType: soft -"@langchain/google-webauth@workspace:libs/langchain-google-webauth, @langchain/google-webauth@~0.1.0": +"@langchain/google-webauth@workspace:libs/langchain-google-webauth, @langchain/google-webauth@~0.1.2": version: 0.0.0-use.local resolution: "@langchain/google-webauth@workspace:libs/langchain-google-webauth" dependencies: "@jest/globals": ^29.5.0 "@langchain/core": "workspace:*" - "@langchain/google-common": ~0.1.0 + "@langchain/google-common": ~0.1.2 "@langchain/scripts": ">=0.1.0 <0.2.0" "@swc/core": ^1.3.90 "@swc/jest": ^0.2.29 From 18b1810656d07fd9a2f654fd2a0adc173e7323fa Mon Sep 17 00:00:00 2001 From: Pavlo Sobchuk Date: Tue, 12 Nov 2024 00:18:22 +0000 Subject: [PATCH 079/100] feat(community): Add AirtableLoader to load documents from Airtable with retry and pagination handling (#7106) Co-authored-by: Jacob Lee --- .../document_loaders/web_loaders/airtable.mdx | 25 +++ .../src/document_loaders/airtable_load.ts | 44 +++++ libs/langchain-community/.gitignore | 4 + libs/langchain-community/langchain.config.js | 14 +- libs/langchain-community/package.json | 13 ++ .../tests/airtable.int.test.ts | 59 ++++++ .../document_loaders/tests/airtable.test.ts | 177 ++++++++++++++++++ .../src/document_loaders/web/airtable.ts | 172 +++++++++++++++++ .../src/load/import_map.ts | 1 + 9 files changed, 504 insertions(+), 5 deletions(-) create mode 100644 docs/core_docs/docs/integrations/document_loaders/web_loaders/airtable.mdx create mode 100644 examples/src/document_loaders/airtable_load.ts create mode 100644 libs/langchain-community/src/document_loaders/tests/airtable.int.test.ts create mode 100644 libs/langchain-community/src/document_loaders/tests/airtable.test.ts create mode 100644 libs/langchain-community/src/document_loaders/web/airtable.ts diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/airtable.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/airtable.mdx new file mode 100644 index 000000000000..eb294c21aa1c --- /dev/null +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/airtable.mdx @@ -0,0 +1,25 @@ +--- +hide_table_of_contents: true +--- + +import loadExample from "@examples/document_loaders/airtable_load"; +import CodeBlock from "@theme/CodeBlock"; + +# AirtableLoader + +The `AirtableLoader` class provides functionality to load documents from Airtable tables. It supports two main methods: + +1. `load()`: Retrieves all records at once, ideal for small to moderate datasets. +2. `loadLazy()`: Fetches records one by one, which is more memory-efficient for large datasets. + +## Prerequisites + +Ensure that your Airtable API token is available as an environment variable: + +```typescript +process.env.AIRTABLE_API_TOKEN = "YOUR_AIRTABLE_API_TOKEN"; +``` + +## Usage + +{loadExample} diff --git a/examples/src/document_loaders/airtable_load.ts b/examples/src/document_loaders/airtable_load.ts new file mode 100644 index 000000000000..e05a7600b3e5 --- /dev/null +++ b/examples/src/document_loaders/airtable_load.ts @@ -0,0 +1,44 @@ +import { AirtableLoader } from "@langchain/community/document_loaders/web/airtable"; +import { Document } from "@langchain/core/documents"; + +// Default airtable loader +const loader = new AirtableLoader({ + tableId: "YOUR_TABLE_ID", + baseId: "YOUR_BASE_ID", +}); + +try { + const documents: Document[] = await loader.load(); + console.log("Loaded documents:", documents); +} catch (error) { + console.error("Error loading documents:", error); +} + +// Lazy airtable loader +const loaderLazy = new AirtableLoader({ + tableId: "YOUR_TABLE_ID", + baseId: "YOUR_BASE_ID", +}); + +try { + console.log("Lazily loading documents:"); + for await (const document of loader.loadLazy()) { + console.log("Loaded document:", document); + } +} catch (error) { + console.error("Error loading documents lazily:", error); +} + +// Airtable loader with specific view +const loaderView = new AirtableLoader({ + tableId: "YOUR_TABLE_ID", + baseId: "YOUR_BASE_ID", + kwargs: { view: "YOUR_VIEW_NAME" }, +}); + +try { + const documents: Document[] = await loader.load(); + console.log("Loaded documents with view:", documents); +} catch (error) { + console.error("Error loading documents with view:", error); +} diff --git a/libs/langchain-community/.gitignore b/libs/langchain-community/.gitignore index 890c93717dea..7efe23166245 100644 --- a/libs/langchain-community/.gitignore +++ b/libs/langchain-community/.gitignore @@ -998,6 +998,10 @@ document_loaders/fs/pptx.cjs document_loaders/fs/pptx.js document_loaders/fs/pptx.d.ts document_loaders/fs/pptx.d.cts +document_loaders/web/airtable.cjs +document_loaders/web/airtable.js +document_loaders/web/airtable.d.ts +document_loaders/web/airtable.d.cts utils/convex.cjs utils/convex.js utils/convex.d.ts diff --git a/libs/langchain-community/langchain.config.js b/libs/langchain-community/langchain.config.js index 63b495f92f2c..e4d9102124de 100644 --- a/libs/langchain-community/langchain.config.js +++ b/libs/langchain-community/langchain.config.js @@ -189,7 +189,8 @@ export const config = { // callbacks "callbacks/handlers/llmonitor": "callbacks/handlers/llmonitor", "callbacks/handlers/lunary": "callbacks/handlers/lunary", - "callbacks/handlers/upstash_ratelimit": "callbacks/handlers/upstash_ratelimit", + "callbacks/handlers/upstash_ratelimit": + "callbacks/handlers/upstash_ratelimit", // retrievers "retrievers/amazon_kendra": "retrievers/amazon_kendra", "retrievers/amazon_knowledge_base": "retrievers/amazon_knowledge_base", @@ -260,6 +261,7 @@ export const config = { "indexes/memory": "indexes/memory", "indexes/sqlite": "indexes/sqlite", // document_loaders + "document_loaders/web/airtable": "document_loaders/web/airtable", "document_loaders/web/apify_dataset": "document_loaders/web/apify_dataset", "document_loaders/web/assemblyai": "document_loaders/web/assemblyai", "document_loaders/web/azure_blob_storage_container": @@ -310,18 +312,20 @@ export const config = { "utils/event_source_parse": "utils/event_source_parse", "utils/cassandra": "utils/cassandra", // experimental - "experimental/callbacks/handlers/datadog": "experimental/callbacks/handlers/datadog", + "experimental/callbacks/handlers/datadog": + "experimental/callbacks/handlers/datadog", "experimental/graph_transformers/llm": "experimental/graph_transformers/llm", "experimental/multimodal_embeddings/googlevertexai": "experimental/multimodal_embeddings/googlevertexai", "experimental/hubs/makersuite/googlemakersuitehub": "experimental/hubs/makersuite/googlemakersuitehub", - "experimental/chat_models/ollama_functions": "experimental/chat_models/ollama_functions", + "experimental/chat_models/ollama_functions": + "experimental/chat_models/ollama_functions", "experimental/llms/chrome_ai": "experimental/llms/chrome_ai", "experimental/tools/pyinterpreter": "experimental/tools/pyinterpreter", // chains - "chains/graph_qa/cypher": "chains/graph_qa/cypher" + "chains/graph_qa/cypher": "chains/graph_qa/cypher", }, requiresOptionalDependency: [ "tools/aws_sfn", @@ -520,7 +524,7 @@ export const config = { // chains "chains/graph_qa/cypher", // langgraph checkpointers - "langgraph/checkpointers/vercel_kv" + "langgraph/checkpointers/vercel_kv", ], packageSuffix: "community", tsConfigPath: resolve("./tsconfig.json"), diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 23517ee984d2..2d8662b45bc4 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -2958,6 +2958,15 @@ "import": "./document_loaders/fs/pptx.js", "require": "./document_loaders/fs/pptx.cjs" }, + "./document_loaders/web/airtable": { + "types": { + "import": "./document_loaders/web/airtable.d.ts", + "require": "./document_loaders/web/airtable.d.cts", + "default": "./document_loaders/web/airtable.d.ts" + }, + "import": "./document_loaders/web/airtable.js", + "require": "./document_loaders/web/airtable.cjs" + }, "./utils/convex": { "types": { "import": "./utils/convex.d.ts", @@ -4061,6 +4070,10 @@ "document_loaders/fs/pptx.js", "document_loaders/fs/pptx.d.ts", "document_loaders/fs/pptx.d.cts", + "document_loaders/web/airtable.cjs", + "document_loaders/web/airtable.js", + "document_loaders/web/airtable.d.ts", + "document_loaders/web/airtable.d.cts", "utils/convex.cjs", "utils/convex.js", "utils/convex.d.ts", diff --git a/libs/langchain-community/src/document_loaders/tests/airtable.int.test.ts b/libs/langchain-community/src/document_loaders/tests/airtable.int.test.ts new file mode 100644 index 000000000000..c76dc860a745 --- /dev/null +++ b/libs/langchain-community/src/document_loaders/tests/airtable.int.test.ts @@ -0,0 +1,59 @@ +/** + * NOTE: AIRTABLE_API_TOKEN should be set in environment variables + */ +import { Document } from "@langchain/core/documents"; +import { AirtableLoader } from "../web/airtable.js"; + +describe("AirtableLoader Integration Tests", () => { + // Ensure that the environment variables are set + + const baseId = "BASE_ID"; + const tableId = "TABLE_ID"; + + // Integration tests for the load method + describe("load", () => { + it("should load documents from Airtable", async () => { + const loader = new AirtableLoader({ tableId, baseId }); + + const documents = await loader.load(); + + expect(documents).toBeDefined(); + expect(documents.length).toBeGreaterThan(0); + + documents.forEach((doc) => { + expect(doc).toBeInstanceOf(Document); + expect(doc.pageContent).toBeDefined(); + expect(doc.metadata).toMatchObject({ + source: `${baseId}_${tableId}`, + base_id: baseId, + table_id: tableId, + }); + }); + }, 20000); + }); + + // Integration tests for the loadLazy method + describe("loadLazy", () => { + it("should lazily load documents from Airtable", async () => { + const loader = new AirtableLoader({ tableId, baseId }); + + const documents: Document[] = []; + for await (const doc of loader.loadLazy()) { + documents.push(doc); + } + + expect(documents).toBeDefined(); + expect(documents.length).toBeGreaterThan(0); + + documents.forEach((doc) => { + expect(doc).toBeInstanceOf(Document); + expect(doc.pageContent).toBeDefined(); + expect(doc.metadata).toMatchObject({ + source: `${baseId}_${tableId}`, + base_id: baseId, + table_id: tableId, + }); + }); + }, 20000); + }); +}); diff --git a/libs/langchain-community/src/document_loaders/tests/airtable.test.ts b/libs/langchain-community/src/document_loaders/tests/airtable.test.ts new file mode 100644 index 000000000000..aa4b3f023058 --- /dev/null +++ b/libs/langchain-community/src/document_loaders/tests/airtable.test.ts @@ -0,0 +1,177 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +/* eslint-disable no-process-env */ +import { Document } from "@langchain/core/documents"; +import { expect, jest } from "@jest/globals"; +import { AirtableLoader } from "../web/airtable.js"; + +// Mock the global fetch function +(global as any).fetch = jest.fn(); + +describe("AirtableLoader", () => { + beforeEach(() => { + jest.clearAllMocks(); + process.env.AIRTABLE_API_TOKEN = "foobar"; + }); + + // Tests for the load method + describe("load", () => { + it("should load documents correctly", async () => { + const loader = new AirtableLoader({ + tableId: "tableId", + baseId: "baseId", + kwargs: { view: "test-view" }, + }); + + // Spy on the private fetchRecords method + const mockFetchRecords = jest.spyOn(loader as any, "fetchRecords"); + + // Mock data to be returned by fetchRecords + const mockRecords = [ + { + id: "rec1", + fields: { Name: "Record 1" }, + createdTime: "2021-01-01T00:00:00.000Z", + }, + { + id: "rec2", + fields: { Name: "Record 2" }, + createdTime: "2021-01-02T00:00:00.000Z", + }, + ]; + + // Mock the resolved value of fetchRecords + mockFetchRecords.mockResolvedValue({ records: mockRecords }); + + const documents = await loader.load(); + + expect(documents).toHaveLength(2); + expect(documents[0].pageContent).toBe(JSON.stringify(mockRecords[0])); + expect(documents[1].pageContent).toBe(JSON.stringify(mockRecords[1])); + expect(mockFetchRecords).toHaveBeenCalledTimes(1); + }); + + it("should handle pagination correctly", async () => { + const loader = new AirtableLoader({ + tableId: "tableId", + baseId: "baseId", + }); + + const mockFetchRecords = jest.spyOn(loader as any, "fetchRecords"); + const mockRecordsPage1 = [ + { + id: "rec1", + fields: { Name: "Record 1" }, + createdTime: "2021-01-01T00:00:00.000Z", + }, + ]; + const mockRecordsPage2 = [ + { + id: "rec2", + fields: { Name: "Record 2" }, + createdTime: "2021-01-02T00:00:00.000Z", + }, + ]; + + // Mock fetchRecords to simulate pagination + mockFetchRecords + .mockResolvedValueOnce({ + records: mockRecordsPage1, + offset: "next-page", + }) + .mockResolvedValueOnce({ records: mockRecordsPage2 }); + + const documents = await loader.load(); + + expect(documents).toHaveLength(2); + expect(documents[0].pageContent).toBe( + JSON.stringify(mockRecordsPage1[0]) + ); + expect(documents[1].pageContent).toBe( + JSON.stringify(mockRecordsPage2[0]) + ); + expect(mockFetchRecords).toHaveBeenCalledTimes(2); + }); + + it("should retry fetchRecords on failure", async () => { + const loader = new AirtableLoader({ + tableId: "tableId", + baseId: "baseId", + }); + + const mockFetchRecords = jest.spyOn(loader as any, "fetchRecords"); + const mockError = new Error("Network Error"); + const mockRecords = [ + { + id: "rec1", + fields: { Name: "Record 1" }, + createdTime: "2021-01-01T00:00:00.000Z", + }, + ]; + + // Simulate a failure on the first call and success on the second + mockFetchRecords + .mockRejectedValueOnce(mockError) + .mockResolvedValueOnce({ records: mockRecords }); + + const documents = await loader.load(); + + expect(documents).toHaveLength(1); + expect(documents[0].pageContent).toBe(JSON.stringify(mockRecords[0])); + expect(mockFetchRecords).toHaveBeenCalledTimes(2); + }); + }); + + // Tests for the loadLazy method + describe("loadLazy", () => { + it("should yield documents correctly", async () => { + const loader = new AirtableLoader({ + tableId: "tableId", + baseId: "baseId", + }); + + const mockFetchRecords = jest.spyOn(loader as any, "fetchRecords"); + const mockRecords = [ + { + id: "rec1", + fields: { Name: "Record 1" }, + createdTime: "2021-01-01T00:00:00.000Z", + }, + { + id: "rec2", + fields: { Name: "Record 2" }, + createdTime: "2021-01-02T00:00:00.000Z", + }, + ]; + + mockFetchRecords.mockResolvedValue({ records: mockRecords }); + + const documents: Document[] = []; + for await (const doc of loader.loadLazy()) { + documents.push(doc); + } + + expect(documents).toHaveLength(2); + expect(documents[0].pageContent).toBe(JSON.stringify(mockRecords[0])); + expect(documents[1].pageContent).toBe(JSON.stringify(mockRecords[1])); + expect(mockFetchRecords).toHaveBeenCalledTimes(1); + }); + + it("should handle errors in loadLazy", async () => { + const loader = new AirtableLoader({ + tableId: "tableId", + baseId: "baseId", + }); + + const mockFetchRecords = jest.spyOn(loader as any, "fetchRecords"); + const mockError = new Error("Network Error"); + + mockFetchRecords.mockRejectedValue(mockError); + + const iterator = loader.loadLazy(); + await expect(iterator.next()).rejects.toThrow( + "Failed to load Airtable records lazily" + ); + expect(mockFetchRecords).toHaveBeenCalled(); + }); + }); +}); diff --git a/libs/langchain-community/src/document_loaders/web/airtable.ts b/libs/langchain-community/src/document_loaders/web/airtable.ts new file mode 100644 index 000000000000..7d01bfa9ea64 --- /dev/null +++ b/libs/langchain-community/src/document_loaders/web/airtable.ts @@ -0,0 +1,172 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; +import { Document } from "@langchain/core/documents"; +import { getEnvironmentVariable } from "@langchain/core/utils/env"; +import { AsyncCaller } from "@langchain/core/utils/async_caller"; + +export interface AirtableLoaderOptions { + tableId: string; + baseId: string; + kwargs?: Record; +} + +interface AirtableRecord { + id: string; + fields: Record; + createdTime: string; +} + +interface AirtableResponse { + records: AirtableRecord[]; + offset?: string; +} + +export class AirtableLoader extends BaseDocumentLoader { + private readonly apiToken: string; + + private readonly tableId: string; + + private readonly baseId: string; + + private readonly kwargs: Record; + + private static readonly BASE_URL = "https://api.airtable.com/v0"; + + private asyncCaller: AsyncCaller; + + /** + * Initializes the AirtableLoader with configuration options. + * Retrieves the API token from environment variables and validates it. + * + * @param tableId - ID of the Airtable table. + * @param baseId - ID of the Airtable base. + * @param kwargs - Additional query parameters for Airtable requests. + * @param config - Loader configuration for retry options. + */ + constructor({ tableId, baseId, kwargs = {} }: AirtableLoaderOptions) { + super(); + this.apiToken = getEnvironmentVariable("AIRTABLE_API_TOKEN") || ""; + this.tableId = tableId; + this.baseId = baseId; + this.kwargs = kwargs; + + if (!this.apiToken) { + throw new Error( + "Missing Airtable API token. Please set AIRTABLE_API_TOKEN environment variable." + ); + } + + this.asyncCaller = new AsyncCaller({ maxRetries: 3, maxConcurrency: 5 }); + } + + /** + * Loads documents from Airtable, handling pagination and retries. + * + * @returns A promise that resolves to an array of Document objects. + */ + public async load(): Promise { + const documents: Document[] = []; + let offset: string | undefined; + + try { + do { + const url = this.constructUrl(offset); + const data = await this.asyncCaller.call(() => this.fetchRecords(url)); + data.records.forEach((record: AirtableRecord) => + documents.push(this.createDocument(record)) + ); + offset = data.offset; + } while (offset); + } catch (error) { + console.error("Error loading Airtable records:", error); + throw new Error("Failed to load Airtable records"); + } + + return documents; + } + + /** + * Asynchronous generator function for lazily loading documents from Airtable. + * This method yields each document individually, enabling memory-efficient + * handling of large datasets by fetching records in pages. + * + * @returns An asynchronous generator yielding Document objects one by one. + */ + public async *loadLazy(): AsyncGenerator { + let offset: string | undefined; + try { + do { + const url = this.constructUrl(offset); + const data = await this.asyncCaller.call(() => this.fetchRecords(url)); + + for (const record of data.records) { + yield this.createDocument(record); + } + + offset = data.offset; + } while (offset); + } catch (error) { + console.error("Error loading Airtable records lazily:", error); + throw new Error("Failed to load Airtable records lazily"); + } + } + + /** + * Constructs the Airtable API request URL with pagination and query parameters. + * + * @param offset - The pagination offset returned by the previous request. + * @returns A fully constructed URL for the API request. + */ + private constructUrl(offset?: string): string { + const url = new URL( + `${AirtableLoader.BASE_URL}/${this.baseId}/${this.tableId}` + ); + if (offset) url.searchParams.append("offset", offset); + if (this.kwargs.view) url.searchParams.append("view", this.kwargs.view); + return url.toString(); + } + + /** + * Sends the API request to Airtable and handles the response. + * Includes a timeout to prevent hanging on unresponsive requests. + * + * @param url - The Airtable API request URL. + * @returns A promise that resolves to an AirtableResponse object. + */ + private async fetchRecords(url: string): Promise { + try { + const response = await fetch(url, { + headers: { + Authorization: `Bearer ${this.apiToken}`, + }, + }); + + if (!response.ok) { + throw new Error( + `Airtable API request failed with status ${response.status}: ${response.statusText}` + ); + } + + return (await response.json()) as AirtableResponse; + } catch (error) { + console.error("Error during fetch:", error); + throw error; + } + } + + /** + * Converts an Airtable record into a Document object with metadata. + * + * @param record - An Airtable record to convert. + * @returns A Document object with page content and metadata. + */ + private createDocument(record: AirtableRecord): Document { + const metadata: Record = { + source: `${this.baseId}_${this.tableId}`, + base_id: this.baseId, + table_id: this.tableId, + ...(this.kwargs.view && { view: this.kwargs.view }), + }; + return new Document({ pageContent: JSON.stringify(record), metadata }); + } +} diff --git a/libs/langchain-community/src/load/import_map.ts b/libs/langchain-community/src/load/import_map.ts index 5bbd9e4d0a01..ef2edeb81889 100644 --- a/libs/langchain-community/src/load/import_map.ts +++ b/libs/langchain-community/src/load/import_map.ts @@ -74,6 +74,7 @@ export * as indexes__memory from "../indexes/memory.js"; export * as document_loaders__web__searchapi from "../document_loaders/web/searchapi.js"; export * as document_loaders__web__serpapi from "../document_loaders/web/serpapi.js"; export * as document_loaders__web__sort_xyz_blockchain from "../document_loaders/web/sort_xyz_blockchain.js"; +export * as document_loaders__web__airtable from "../document_loaders/web/airtable.js"; export * as utils__event_source_parse from "../utils/event_source_parse.js"; export * as experimental__callbacks__handlers__datadog from "../experimental/callbacks/handlers/datadog.js"; export * as experimental__graph_transformers__llm from "../experimental/graph_transformers/llm.js"; From 913732bf8e3edc856acea888f69e9327b5ef3066 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 16:29:29 -0800 Subject: [PATCH 080/100] fix(openai,core): Make OpenAI withStructuredOutput typing compatible with other models (#6957) --- langchain-core/src/language_models/base.ts | 3 +- .../src/language_models/chat_models.ts | 6 +++- .../chat_models/tests/chatbedrock.int.test.ts | 29 +++++++++++++++++++ libs/langchain-openai/src/chat_models.ts | 4 +-- 4 files changed, 37 insertions(+), 5 deletions(-) diff --git a/langchain-core/src/language_models/base.ts b/langchain-core/src/language_models/base.ts index 4f1233426724..4177721339cd 100644 --- a/langchain-core/src/language_models/base.ts +++ b/langchain-core/src/language_models/base.ts @@ -262,6 +262,8 @@ export type StructuredOutputMethodOptions = name?: string; method?: "functionCalling" | "jsonMode" | "jsonSchema" | string; includeRaw?: IncludeRaw; + /** Whether to use strict mode. Currently only supported by OpenAI models. */ + strict?: boolean; }; /** @deprecated Use StructuredOutputMethodOptions instead */ @@ -514,7 +516,6 @@ export abstract class BaseLanguageModel< withStructuredOutput?< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record = Record - // eslint-disable-next-line @typescript-eslint/no-explicit-any >( schema: | z.ZodType diff --git a/langchain-core/src/language_models/chat_models.ts b/langchain-core/src/language_models/chat_models.ts index c3824a3cbd95..0878d67def8e 100644 --- a/langchain-core/src/language_models/chat_models.ts +++ b/langchain-core/src/language_models/chat_models.ts @@ -802,7 +802,6 @@ export abstract class BaseChatModel< withStructuredOutput< // eslint-disable-next-line @typescript-eslint/no-explicit-any RunOutput extends Record = Record - // eslint-disable-next-line @typescript-eslint/no-explicit-any >( outputSchema: | z.ZodType @@ -845,6 +844,11 @@ export abstract class BaseChatModel< `Chat model must implement ".bindTools()" to use withStructuredOutput.` ); } + if (config?.strict) { + throw new Error( + `"strict" mode is not supported for this model by default.` + ); + } // eslint-disable-next-line @typescript-eslint/no-explicit-any const schema: z.ZodType | Record = outputSchema; const name = config?.name; diff --git a/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts b/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts index 926a47e0fd2b..ebce2edee869 100644 --- a/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts +++ b/libs/langchain-community/src/chat_models/tests/chatbedrock.int.test.ts @@ -9,6 +9,7 @@ import { ChatPromptTemplate } from "@langchain/core/prompts"; import { concat } from "@langchain/core/utils/stream"; import { z } from "zod"; import { zodToJsonSchema } from "zod-to-json-schema"; +import { ChatOpenAI } from "@langchain/openai"; import { BedrockChat as BedrockChatWeb } from "../bedrock/web.js"; import { TavilySearchResults } from "../../tools/tavily_search.js"; @@ -531,3 +532,31 @@ test("Streaming tool calls with Anthropic", async () => { expect(finalChunk?.tool_calls?.[0].name).toBe("weather_tool"); expect(finalChunk?.tool_calls?.[0].args?.city).toBeDefined(); }); + +test("withStructuredOutput result should be compatible with OpenAI typing", async () => { + const testSchema = z.object({ + thinking_process: z + .string() + .describe( + "Think before generating variants and put your reasoning here." + ), + variants: z + .array( + z.object({ + name: z.string(), + value: z.string(), + }) + ) + .describe("Variants of the input"), + }); + + const _prepareClient = () => { + if (Math.random() > 0.5) { + return new ChatOpenAI(); + } + + return new BedrockChatWeb(); + }; + + _prepareClient().withStructuredOutput(testSchema); +}); diff --git a/libs/langchain-openai/src/chat_models.ts b/libs/langchain-openai/src/chat_models.ts index 803dd13ca1d7..1db33c8728ab 100644 --- a/libs/langchain-openai/src/chat_models.ts +++ b/libs/langchain-openai/src/chat_models.ts @@ -343,6 +343,7 @@ function _convertChatOpenAIToolTypeToOpenAITool( return _convertToOpenAITool(tool, fields); } +// TODO: Use the base structured output options param in next breaking release. export interface ChatOpenAIStructuredOutputMethodOptions< IncludeRaw extends boolean > extends StructuredOutputMethodOptions { @@ -1940,7 +1941,6 @@ export class ChatOpenAI< RunOutput extends Record = Record >( outputSchema: - | StructuredOutputMethodParams | z.ZodType // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record, @@ -1952,7 +1952,6 @@ export class ChatOpenAI< RunOutput extends Record = Record >( outputSchema: - | StructuredOutputMethodParams | z.ZodType // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record, @@ -1964,7 +1963,6 @@ export class ChatOpenAI< RunOutput extends Record = Record >( outputSchema: - | StructuredOutputMethodParams | z.ZodType // eslint-disable-next-line @typescript-eslint/no-explicit-any | Record, From 5d526bc64a0a710e6132ee97c1d0b5cb378b2240 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 16:44:06 -0800 Subject: [PATCH 081/100] chore(core): Release 0.3.18 (#7186) --- langchain-core/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain-core/package.json b/langchain-core/package.json index ec40b3e6419d..8b0650ded804 100644 --- a/langchain-core/package.json +++ b/langchain-core/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/core", - "version": "0.3.17", + "version": "0.3.18", "description": "Core LangChain.js abstractions and schemas", "type": "module", "engines": { From c5f336be5f3b95cde35f4154add245d1e44ab8cc Mon Sep 17 00:00:00 2001 From: commenthol Date: Tue, 12 Nov 2024 01:44:24 +0100 Subject: [PATCH 082/100] fix(community): chroma search without filter (#7183) --- libs/langchain-community/src/vectorstores/chroma.ts | 3 ++- libs/langchain-community/src/vectorstores/tests/chroma.test.ts | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/libs/langchain-community/src/vectorstores/chroma.ts b/libs/langchain-community/src/vectorstores/chroma.ts index f7dd250872b2..fd9cc08eaa9f 100644 --- a/libs/langchain-community/src/vectorstores/chroma.ts +++ b/libs/langchain-community/src/vectorstores/chroma.ts @@ -354,6 +354,7 @@ export class Chroma extends VectorStore { throw new Error("cannot provide both `filter` and `this.filter`"); } const _filter = filter ?? this.filter; + const where = _filter === undefined ? undefined : { ..._filter }; const collection = await this.ensureCollection(); @@ -362,7 +363,7 @@ export class Chroma extends VectorStore { const result = await collection.query({ queryEmbeddings: query, nResults: k, - where: { ..._filter }, + where, }); const { ids, distances, documents, metadatas } = result; diff --git a/libs/langchain-community/src/vectorstores/tests/chroma.test.ts b/libs/langchain-community/src/vectorstores/tests/chroma.test.ts index 023c36560df1..3fc58c3bedcc 100644 --- a/libs/langchain-community/src/vectorstores/tests/chroma.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/chroma.test.ts @@ -128,7 +128,7 @@ describe("Chroma", () => { expect(mockCollection.query).toHaveBeenCalledWith({ queryEmbeddings: query, nResults: expectedResultCount, - where: {}, + where: undefined, }); expect(results).toHaveLength(5); }); From 8477618db8477f0fbbe9b527e4c341dd690af4d6 Mon Sep 17 00:00:00 2001 From: Felipe Martins Diel <41558831+felipediel@users.noreply.github.com> Date: Mon, 11 Nov 2024 21:44:35 -0300 Subject: [PATCH 083/100] feat(qdrant): Add a Function to Delete Points in Qdrant (#7176) Co-authored-by: jacoblee93 --- libs/langchain-qdrant/src/vectorstores.ts | 39 +++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/libs/langchain-qdrant/src/vectorstores.ts b/libs/langchain-qdrant/src/vectorstores.ts index 4e48421f71f2..c70e2e36656a 100644 --- a/libs/langchain-qdrant/src/vectorstores.ts +++ b/libs/langchain-qdrant/src/vectorstores.ts @@ -36,6 +36,14 @@ export type QdrantAddDocumentOptions = { customPayload: Record[]; }; +/** + * Type that defines the parameters for the delete operation in the + * QdrantStore class. It includes ids, filter and shard key. + */ +export type QdrantDeleteParams = + | { ids: string[]; shardKey?: string; filter?: never } + | { filter: object; shardKey?: string; ids?: never }; + export type QdrantFilter = QdrantSchemas["Filter"]; export type QdrantCondition = QdrantSchemas["FieldCondition"]; @@ -174,6 +182,37 @@ export class QdrantVectorStore extends VectorStore { } } + /** + * Method that deletes points from the Qdrant database. + * @param params Parameters for the delete operation. + * @returns Promise that resolves when the delete operation is complete. + */ + async delete(params: QdrantDeleteParams): Promise { + const { ids, filter, shardKey } = params; + + if (ids) { + const batchSize = 1000; + for (let i = 0; i < ids.length; i += batchSize) { + const batchIds = ids.slice(i, i + batchSize); + await this.client.delete(this.collectionName, { + wait: true, + ordering: "weak", + points: batchIds, + shard_key: shardKey, + }); + } + } else if (filter) { + await this.client.delete(this.collectionName, { + wait: true, + ordering: "weak", + filter, + shard_key: shardKey, + }); + } else { + throw new Error("Either ids or filter must be provided."); + } + } + /** * Method to search for vectors in the Qdrant database that are similar to * a given query vector. The search results include the score and payload From 53a1c1f3c60b71c48e079f8b175d560a638fe273 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 16:46:35 -0800 Subject: [PATCH 084/100] chore(openai): Release 0.3.13 (#7187) --- libs/langchain-openai/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-openai/package.json b/libs/langchain-openai/package.json index 94244ad02932..aeed4d5af302 100644 --- a/libs/langchain-openai/package.json +++ b/libs/langchain-openai/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/openai", - "version": "0.3.12", + "version": "0.3.13", "description": "OpenAI integrations for LangChain.js", "type": "module", "engines": { From be5958689b04bacfaac236d4162c3a1c1c9d157f Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 16:51:13 -0800 Subject: [PATCH 085/100] fix(ci): Fix build artifact (#7188) --- libs/langchain-community/src/load/import_map.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/src/load/import_map.ts b/libs/langchain-community/src/load/import_map.ts index ef2edeb81889..7425c4331ded 100644 --- a/libs/langchain-community/src/load/import_map.ts +++ b/libs/langchain-community/src/load/import_map.ts @@ -71,10 +71,10 @@ export * as stores__message__in_memory from "../stores/message/in_memory.js"; export * as memory__chat_memory from "../memory/chat_memory.js"; export * as indexes__base from "../indexes/base.js"; export * as indexes__memory from "../indexes/memory.js"; +export * as document_loaders__web__airtable from "../document_loaders/web/airtable.js"; export * as document_loaders__web__searchapi from "../document_loaders/web/searchapi.js"; export * as document_loaders__web__serpapi from "../document_loaders/web/serpapi.js"; export * as document_loaders__web__sort_xyz_blockchain from "../document_loaders/web/sort_xyz_blockchain.js"; -export * as document_loaders__web__airtable from "../document_loaders/web/airtable.js"; export * as utils__event_source_parse from "../utils/event_source_parse.js"; export * as experimental__callbacks__handlers__datadog from "../experimental/callbacks/handlers/datadog.js"; export * as experimental__graph_transformers__llm from "../experimental/graph_transformers/llm.js"; From 5ac5d2f8c0e196c2c7a9bc5117347498b95140c2 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 16:55:58 -0800 Subject: [PATCH 086/100] fix(ci): Fix build artifacts (#7189) --- libs/langchain-community/.gitignore | 8 ++++---- libs/langchain-community/package.json | 26 +++++++++++++------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/libs/langchain-community/.gitignore b/libs/langchain-community/.gitignore index 7efe23166245..24abb6e79bc7 100644 --- a/libs/langchain-community/.gitignore +++ b/libs/langchain-community/.gitignore @@ -838,6 +838,10 @@ indexes/sqlite.cjs indexes/sqlite.js indexes/sqlite.d.ts indexes/sqlite.d.cts +document_loaders/web/airtable.cjs +document_loaders/web/airtable.js +document_loaders/web/airtable.d.ts +document_loaders/web/airtable.d.cts document_loaders/web/apify_dataset.cjs document_loaders/web/apify_dataset.js document_loaders/web/apify_dataset.d.ts @@ -998,10 +1002,6 @@ document_loaders/fs/pptx.cjs document_loaders/fs/pptx.js document_loaders/fs/pptx.d.ts document_loaders/fs/pptx.d.cts -document_loaders/web/airtable.cjs -document_loaders/web/airtable.js -document_loaders/web/airtable.d.ts -document_loaders/web/airtable.d.cts utils/convex.cjs utils/convex.js utils/convex.d.ts diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 2d8662b45bc4..f64337dda013 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -2598,6 +2598,15 @@ "import": "./indexes/sqlite.js", "require": "./indexes/sqlite.cjs" }, + "./document_loaders/web/airtable": { + "types": { + "import": "./document_loaders/web/airtable.d.ts", + "require": "./document_loaders/web/airtable.d.cts", + "default": "./document_loaders/web/airtable.d.ts" + }, + "import": "./document_loaders/web/airtable.js", + "require": "./document_loaders/web/airtable.cjs" + }, "./document_loaders/web/apify_dataset": { "types": { "import": "./document_loaders/web/apify_dataset.d.ts", @@ -2958,15 +2967,6 @@ "import": "./document_loaders/fs/pptx.js", "require": "./document_loaders/fs/pptx.cjs" }, - "./document_loaders/web/airtable": { - "types": { - "import": "./document_loaders/web/airtable.d.ts", - "require": "./document_loaders/web/airtable.d.cts", - "default": "./document_loaders/web/airtable.d.ts" - }, - "import": "./document_loaders/web/airtable.js", - "require": "./document_loaders/web/airtable.cjs" - }, "./utils/convex": { "types": { "import": "./utils/convex.d.ts", @@ -3910,6 +3910,10 @@ "indexes/sqlite.js", "indexes/sqlite.d.ts", "indexes/sqlite.d.cts", + "document_loaders/web/airtable.cjs", + "document_loaders/web/airtable.js", + "document_loaders/web/airtable.d.ts", + "document_loaders/web/airtable.d.cts", "document_loaders/web/apify_dataset.cjs", "document_loaders/web/apify_dataset.js", "document_loaders/web/apify_dataset.d.ts", @@ -4070,10 +4074,6 @@ "document_loaders/fs/pptx.js", "document_loaders/fs/pptx.d.ts", "document_loaders/fs/pptx.d.cts", - "document_loaders/web/airtable.cjs", - "document_loaders/web/airtable.js", - "document_loaders/web/airtable.d.ts", - "document_loaders/web/airtable.d.cts", "utils/convex.cjs", "utils/convex.js", "utils/convex.d.ts", From 3bcca4ed3430c9fb15a63cb02e2753d8e377b8e5 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 17:07:47 -0800 Subject: [PATCH 087/100] chore(community): Release 0.3.13 (#7190) --- libs/langchain-community/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index f64337dda013..1d8156ab3387 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/community", - "version": "0.3.12", + "version": "0.3.13", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { From 60329e2f8cdfc8899ebd85d16e3526b0118a206f Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Mon, 11 Nov 2024 17:19:37 -0800 Subject: [PATCH 088/100] chore(qdrant): Release 0.1.1 (#7191) --- libs/langchain-qdrant/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-qdrant/package.json b/libs/langchain-qdrant/package.json index a0bba7310e9d..d8dd4d2ce46c 100644 --- a/libs/langchain-qdrant/package.json +++ b/libs/langchain-qdrant/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/qdrant", - "version": "0.1.0", + "version": "0.1.1", "description": "LangChain.js integration for the Qdrant vector database", "type": "module", "engines": { From 1a1b7a65bea8142ba0f98f856a585463df397950 Mon Sep 17 00:00:00 2001 From: FilipZmijewski Date: Tue, 12 Nov 2024 17:54:09 +0100 Subject: [PATCH 089/100] fix[community]: Fixed missing tool_choice in call options of chat IBM (#7192) --- libs/langchain-community/src/chat_models/ibm.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libs/langchain-community/src/chat_models/ibm.ts b/libs/langchain-community/src/chat_models/ibm.ts index d4dc6a64ba28..399e6d4d2909 100644 --- a/libs/langchain-community/src/chat_models/ibm.ts +++ b/libs/langchain-community/src/chat_models/ibm.ts @@ -41,6 +41,7 @@ import { TextChatResultChoice, TextChatResultMessage, TextChatToolCall, + TextChatToolChoiceTool, TextChatUsage, } from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; import { WatsonXAI } from "@ibm-cloud/watsonx-ai"; @@ -86,6 +87,7 @@ export interface WatsonxCallOptionsChat extends Omit, WatsonxCallParams { promptIndex?: number; + tool_choice?: TextChatToolChoiceTool; } type ChatWatsonxToolType = BindToolsInput | TextChatParameterTools; @@ -470,7 +472,7 @@ export class ChatWatsonx< tools: options.tools ? _convertToolToWatsonxTool(options.tools) : undefined, - toolChoice: options.toolChoice, + toolChoice: options.tool_choice, responseFormat: options.responseFormat, toolChoiceOption: options.toolChoiceOption, }; From 474994fa1c3f22eb9f3ebac7c6ea574b7baa188b Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Tue, 12 Nov 2024 17:48:22 -0800 Subject: [PATCH 090/100] fix(anthropic, bedrock): Remove message merging logic (#7196) --- .../src/utils/message_inputs.ts | 36 ++++----------- .../src/utils/bedrock/anthropic.ts | 46 ++++++++----------- 2 files changed, 26 insertions(+), 56 deletions(-) diff --git a/libs/langchain-anthropic/src/utils/message_inputs.ts b/libs/langchain-anthropic/src/utils/message_inputs.ts index 4082405de828..c8db15ba5b9e 100644 --- a/libs/langchain-anthropic/src/utils/message_inputs.ts +++ b/libs/langchain-anthropic/src/utils/message_inputs.ts @@ -35,15 +35,15 @@ function _formatImage(imageUrl: string) { } as any; } -function _mergeMessages( +function _ensureMessageContents( messages: BaseMessage[] ): (SystemMessage | HumanMessage | AIMessage)[] { // Merge runs of human/tool messages into single human messages with content blocks. - const merged = []; + const updatedMsgs = []; for (const message of messages) { if (message._getType() === "tool") { if (typeof message.content === "string") { - const previousMessage = merged[merged.length - 1]; + const previousMessage = updatedMsgs[updatedMsgs.length - 1]; if ( previousMessage?._getType() === "human" && Array.isArray(previousMessage.content) && @@ -58,7 +58,7 @@ function _mergeMessages( }); } else { // If not, we create a new human message with the tool result. - merged.push( + updatedMsgs.push( new HumanMessage({ content: [ { @@ -71,7 +71,7 @@ function _mergeMessages( ); } } else { - merged.push( + updatedMsgs.push( new HumanMessage({ content: [ { @@ -84,30 +84,10 @@ function _mergeMessages( ); } } else { - const previousMessage = merged[merged.length - 1]; - if ( - previousMessage?._getType() === "human" && - message._getType() === "human" - ) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - let combinedContent: Record[]; - if (typeof previousMessage.content === "string") { - combinedContent = [{ type: "text", text: previousMessage.content }]; - } else { - combinedContent = previousMessage.content; - } - if (typeof message.content === "string") { - combinedContent.push({ type: "text", text: message.content }); - } else { - combinedContent = combinedContent.concat(message.content); - } - previousMessage.content = combinedContent; - } else { - merged.push(message); - } + updatedMsgs.push(message); } } - return merged; + return updatedMsgs; } export function _convertLangChainToolCallToAnthropic( @@ -202,7 +182,7 @@ function _formatContent(content: MessageContent) { export function _convertMessagesToAnthropicPayload( messages: BaseMessage[] ): AnthropicMessageCreateParams { - const mergedMessages = _mergeMessages(messages); + const mergedMessages = _ensureMessageContents(messages); let system; if (mergedMessages.length > 0 && mergedMessages[0]._getType() === "system") { system = messages[0].content; diff --git a/libs/langchain-community/src/utils/bedrock/anthropic.ts b/libs/langchain-community/src/utils/bedrock/anthropic.ts index 4565a2f1615d..3f440bd2b014 100644 --- a/libs/langchain-community/src/utils/bedrock/anthropic.ts +++ b/libs/langchain-community/src/utils/bedrock/anthropic.ts @@ -47,15 +47,15 @@ function _formatImage(imageUrl: string) { } as any; } -function _mergeMessages( +function _ensureMessageContents( messages: BaseMessage[] ): (SystemMessage | HumanMessage | AIMessage)[] { // Merge runs of human/tool messages into single human messages with content blocks. - const merged = []; + const updatedMsgs = []; for (const message of messages) { if (message._getType() === "tool") { if (typeof message.content === "string") { - const previousMessage = merged[merged.length - 1]; + const previousMessage = updatedMsgs[updatedMsgs.length - 1]; if ( previousMessage?._getType() === "human" && Array.isArray(previousMessage.content) && @@ -70,7 +70,7 @@ function _mergeMessages( }); } else { // If not, we create a new human message with the tool result. - merged.push( + updatedMsgs.push( new HumanMessage({ content: [ { @@ -83,33 +83,23 @@ function _mergeMessages( ); } } else { - merged.push(new HumanMessage({ content: message.content })); + updatedMsgs.push( + new HumanMessage({ + content: [ + { + type: "tool_result", + content: _formatContent(message.content), + tool_use_id: (message as ToolMessage).tool_call_id, + }, + ], + }) + ); } } else { - const previousMessage = merged[merged.length - 1]; - if ( - previousMessage?._getType() === "human" && - message._getType() === "human" - ) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - let combinedContent: Record[]; - if (typeof previousMessage.content === "string") { - combinedContent = [{ type: "text", text: previousMessage.content }]; - } else { - combinedContent = previousMessage.content; - } - if (typeof message.content === "string") { - combinedContent.push({ type: "text", text: message.content }); - } else { - combinedContent = combinedContent.concat(message.content); - } - previousMessage.content = combinedContent; - } else { - merged.push(message); - } + updatedMsgs.push(message); } } - return merged; + return updatedMsgs; } export function _convertLangChainToolCallToAnthropic( @@ -170,7 +160,7 @@ export function formatMessagesForAnthropic(messages: BaseMessage[]): { system?: string; messages: Record[]; } { - const mergedMessages = _mergeMessages(messages); + const mergedMessages = _ensureMessageContents(messages); let system: string | undefined; if (mergedMessages.length > 0 && mergedMessages[0]._getType() === "system") { if (typeof messages[0].content !== "string") { From ca211300138733c88ba9e35521e212fa79e1371e Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Tue, 12 Nov 2024 18:13:06 -0800 Subject: [PATCH 091/100] feat(community): Release 0.3.14 (#7197) --- libs/langchain-community/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 1d8156ab3387..8ca1c034abd9 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/community", - "version": "0.3.13", + "version": "0.3.14", "description": "Third-party integrations for LangChain.js", "type": "module", "engines": { From a173e300ef9ada416220876a2739e024b3a7f268 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Tue, 12 Nov 2024 18:14:46 -0800 Subject: [PATCH 092/100] feat(anthropic): Release 0.3.8 (#7198) --- libs/langchain-anthropic/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-anthropic/package.json b/libs/langchain-anthropic/package.json index 6d37b6303602..2f62e77b6a78 100644 --- a/libs/langchain-anthropic/package.json +++ b/libs/langchain-anthropic/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/anthropic", - "version": "0.3.7", + "version": "0.3.8", "description": "Anthropic integrations for LangChain.js", "type": "module", "engines": { From 2edd05acca807910e613ae5e93d024859a4501ab Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Fri, 15 Nov 2024 16:55:58 -0800 Subject: [PATCH 093/100] fix(google-genai): Force tool usage in WSO (#7213) --- .../langchain-google-genai/src/chat_models.ts | 1 + .../langchain-google-genai/src/utils/tools.ts | 32 +++++++++++++++++++ .../src/utils/zod_to_genai_parameters.ts | 3 ++ 3 files changed, 36 insertions(+) diff --git a/libs/langchain-google-genai/src/chat_models.ts b/libs/langchain-google-genai/src/chat_models.ts index 6fd1be59cf36..6fc5433babe3 100644 --- a/libs/langchain-google-genai/src/chat_models.ts +++ b/libs/langchain-google-genai/src/chat_models.ts @@ -953,6 +953,7 @@ export class ChatGoogleGenerativeAI } const llm = this.bind({ tools, + tool_choice: functionName, }); if (!includeRaw) { diff --git a/libs/langchain-google-genai/src/utils/tools.ts b/libs/langchain-google-genai/src/utils/tools.ts index 8e362d2a7877..942ec106b4fc 100644 --- a/libs/langchain-google-genai/src/utils/tools.ts +++ b/libs/langchain-google-genai/src/utils/tools.ts @@ -3,12 +3,19 @@ import { ToolConfig, FunctionCallingMode, FunctionDeclaration, + FunctionDeclarationsTool, + FunctionDeclarationSchema, } from "@google/generative-ai"; import { ToolChoice } from "@langchain/core/language_models/chat_models"; import { StructuredToolInterface } from "@langchain/core/tools"; import { isLangChainTool } from "@langchain/core/utils/function_calling"; +import { + isOpenAITool, + ToolDefinition, +} from "@langchain/core/language_models/base"; import { convertToGenerativeAITools } from "./common.js"; import { GoogleGenerativeAIToolType } from "../types.js"; +import { removeAdditionalProperties } from "./zod_to_genai_parameters.js"; export function convertToolsToGenAI( tools: GoogleGenerativeAIToolType[], @@ -41,6 +48,15 @@ function processTools(tools: GoogleGenerativeAIToolType[]): GenerativeAITool[] { if (convertedTool.functionDeclarations) { functionDeclarationTools.push(...convertedTool.functionDeclarations); } + } else if (isOpenAITool(tool)) { + const { functionDeclarations } = convertOpenAIToolToGenAI(tool); + if (functionDeclarations) { + functionDeclarationTools.push(...functionDeclarations); + } else { + throw new Error( + "Failed to convert OpenAI structured tool to GenerativeAI tool" + ); + } } else { genAITools.push(tool as GenerativeAITool); } @@ -81,6 +97,22 @@ function processTools(tools: GoogleGenerativeAIToolType[]): GenerativeAITool[] { ]; } +function convertOpenAIToolToGenAI( + tool: ToolDefinition +): FunctionDeclarationsTool { + return { + functionDeclarations: [ + { + name: tool.function.name, + description: tool.function.description, + parameters: removeAdditionalProperties( + tool.function.parameters + ) as FunctionDeclarationSchema, + }, + ], + }; +} + function createToolConfig( genAITools: GenerativeAITool[], extra?: { diff --git a/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts b/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts index 7323612504a9..bfd054548643 100644 --- a/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts +++ b/libs/langchain-google-genai/src/utils/zod_to_genai_parameters.ts @@ -27,6 +27,9 @@ export function removeAdditionalProperties( if ("additionalProperties" in newObj) { delete newObj.additionalProperties; } + if ("$schema" in newObj) { + delete newObj.$schema; + } for (const key in newObj) { if (key in newObj) { From c30ae29ae92cc5486b04d8fc438f41b7814001a0 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Fri, 15 Nov 2024 16:57:53 -0800 Subject: [PATCH 094/100] fix(google-genai): Release 0.1.4 (#7215) --- libs/langchain-google-genai/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-google-genai/package.json b/libs/langchain-google-genai/package.json index 2e4243fa9c34..4188867ca254 100644 --- a/libs/langchain-google-genai/package.json +++ b/libs/langchain-google-genai/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/google-genai", - "version": "0.1.3", + "version": "0.1.4", "description": "Google Generative AI integration for LangChain.js", "type": "module", "engines": { From 54decfe9b33469437bfd17822fcf77cea7e72c75 Mon Sep 17 00:00:00 2001 From: Phil Nash Date: Sun, 17 Nov 2024 12:13:23 +1100 Subject: [PATCH 095/100] feat(community): Adds an HTML loader for URLS (#7184) Co-authored-by: Jacob Lee --- .../mozilla_readability.ts | 6 +- libs/langchain-community/.gitignore | 4 + libs/langchain-community/langchain.config.js | 1 + libs/langchain-community/package.json | 13 +++ .../document_loaders/tests/html.int.test.ts | 24 ++++++ .../src/document_loaders/web/cheerio.ts | 39 ++++----- .../src/document_loaders/web/html.ts | 81 +++++++++++++++++++ .../src/document_loaders/web/sitemap.ts | 6 +- .../mozilla_readability.ts | 4 +- .../src/load/import_map.ts | 1 + 10 files changed, 146 insertions(+), 33 deletions(-) create mode 100644 libs/langchain-community/src/document_loaders/tests/html.int.test.ts create mode 100644 libs/langchain-community/src/document_loaders/web/html.ts diff --git a/examples/src/document_transformers/mozilla_readability.ts b/examples/src/document_transformers/mozilla_readability.ts index b3ac3c2b155a..22b19e463a92 100644 --- a/examples/src/document_transformers/mozilla_readability.ts +++ b/examples/src/document_transformers/mozilla_readability.ts @@ -1,8 +1,8 @@ -import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; +import { HTMLWebBaseLoader } from "@langchain/community/document_loaders/web/html"; import { MozillaReadabilityTransformer } from "@langchain/community/document_transformers/mozilla_readability"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; -const loader = new CheerioWebBaseLoader( +const loader = new HTMLWebBaseLoader( "https://news.ycombinator.com/item?id=34817881" ); @@ -11,7 +11,7 @@ const docs = await loader.load(); const splitter = RecursiveCharacterTextSplitter.fromLanguage("html"); const transformer = new MozillaReadabilityTransformer(); -const sequence = splitter.pipe(transformer); +const sequence = transformer.pipe(splitter); const newDocuments = await sequence.invoke(docs); diff --git a/libs/langchain-community/.gitignore b/libs/langchain-community/.gitignore index 24abb6e79bc7..b554afb6f1ec 100644 --- a/libs/langchain-community/.gitignore +++ b/libs/langchain-community/.gitignore @@ -866,6 +866,10 @@ document_loaders/web/cheerio.cjs document_loaders/web/cheerio.js document_loaders/web/cheerio.d.ts document_loaders/web/cheerio.d.cts +document_loaders/web/html.cjs +document_loaders/web/html.js +document_loaders/web/html.d.ts +document_loaders/web/html.d.cts document_loaders/web/puppeteer.cjs document_loaders/web/puppeteer.js document_loaders/web/puppeteer.d.ts diff --git a/libs/langchain-community/langchain.config.js b/libs/langchain-community/langchain.config.js index e4d9102124de..631c2a12879e 100644 --- a/libs/langchain-community/langchain.config.js +++ b/libs/langchain-community/langchain.config.js @@ -270,6 +270,7 @@ export const config = { "document_loaders/web/azure_blob_storage_file", "document_loaders/web/browserbase": "document_loaders/web/browserbase", "document_loaders/web/cheerio": "document_loaders/web/cheerio", + "document_loaders/web/html": "document_loaders/web/html", "document_loaders/web/puppeteer": "document_loaders/web/puppeteer", "document_loaders/web/playwright": "document_loaders/web/playwright", "document_loaders/web/college_confidential": diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 8ca1c034abd9..654a00fd51dc 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -2661,6 +2661,15 @@ "import": "./document_loaders/web/cheerio.js", "require": "./document_loaders/web/cheerio.cjs" }, + "./document_loaders/web/html": { + "types": { + "import": "./document_loaders/web/html.d.ts", + "require": "./document_loaders/web/html.d.cts", + "default": "./document_loaders/web/html.d.ts" + }, + "import": "./document_loaders/web/html.js", + "require": "./document_loaders/web/html.cjs" + }, "./document_loaders/web/puppeteer": { "types": { "import": "./document_loaders/web/puppeteer.d.ts", @@ -3938,6 +3947,10 @@ "document_loaders/web/cheerio.js", "document_loaders/web/cheerio.d.ts", "document_loaders/web/cheerio.d.cts", + "document_loaders/web/html.cjs", + "document_loaders/web/html.js", + "document_loaders/web/html.d.ts", + "document_loaders/web/html.d.cts", "document_loaders/web/puppeteer.cjs", "document_loaders/web/puppeteer.js", "document_loaders/web/puppeteer.d.ts", diff --git a/libs/langchain-community/src/document_loaders/tests/html.int.test.ts b/libs/langchain-community/src/document_loaders/tests/html.int.test.ts new file mode 100644 index 000000000000..afd308a19b4f --- /dev/null +++ b/libs/langchain-community/src/document_loaders/tests/html.int.test.ts @@ -0,0 +1,24 @@ +import { expect, test } from "@jest/globals"; +import { HTMLWebBaseLoader } from "../web/html.js"; + +test("Test HTML web scraper loader", async () => { + const loader = new HTMLWebBaseLoader( + "https://news.ycombinator.com/item?id=34817881" + ); + const docs = await loader.load(); + expect(docs[0].pageContent).toEqual( + expect.stringContaining("What Lights the Universe’s Standard Candles?") + ); +}); + +test("Test HTML web scraper loader with textDecoder", async () => { + const loader = new HTMLWebBaseLoader( + "https://corp.163.com/gb/about/management.html", + { + textDecoder: new TextDecoder("gbk"), + } + ); + + const docs = await loader.load(); + expect(docs[0].pageContent.trim()).toEqual(expect.stringContaining("网易")); +}); diff --git a/libs/langchain-community/src/document_loaders/web/cheerio.ts b/libs/langchain-community/src/document_loaders/web/cheerio.ts index abdd5a7e15b2..106b1ffe9d33 100644 --- a/libs/langchain-community/src/document_loaders/web/cheerio.ts +++ b/libs/langchain-community/src/document_loaders/web/cheerio.ts @@ -5,38 +5,27 @@ import type { SelectorType, } from "cheerio"; import { Document } from "@langchain/core/documents"; -import { - AsyncCaller, - AsyncCallerParams, -} from "@langchain/core/utils/async_caller"; +import { AsyncCaller } from "@langchain/core/utils/async_caller"; import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; -import type { DocumentLoader } from "@langchain/core/document_loaders/base"; +import type { WebBaseLoaderParams, WebBaseLoader } from "./html.js"; /** - * Represents the parameters for configuring the CheerioWebBaseLoader. It - * extends the AsyncCallerParams interface and adds additional parameters - * specific to web-based loaders. + * @deprecated Either import the CheerioWebBaseLoaderParams from @langchain/community/document_loaders/web/cheerio + * or use the WebBaseLoaderParams from @langchain/community/document_loaders/web/html. */ -export interface WebBaseLoaderParams extends AsyncCallerParams { - /** - * The timeout in milliseconds for the fetch request. Defaults to 10s. - */ - timeout?: number; +export { WebBaseLoaderParams }; +/** + * Represents the parameters for configuring the CheerioWebBaseLoader. It + * extends the WebBaseLoaderParams interface and adds additional parameters + * specific to loading with Cheerio. + */ +export interface CheerioWebBaseLoaderParams extends WebBaseLoaderParams { /** * The selector to use to extract the text from the document. Defaults to * "body". */ selector?: SelectorType; - - /** - * The text decoder to use to decode the response. Defaults to UTF-8. - */ - textDecoder?: TextDecoder; - /** - * The headers to use in the fetch request. - */ - headers?: HeadersInit; } /** @@ -45,14 +34,14 @@ export interface WebBaseLoaderParams extends AsyncCallerParams { * web-based documents using Cheerio. * @example * ```typescript - * const loader = new CheerioWebBaseLoader("https:exampleurl.com"); + * const loader = new CheerioWebBaseLoader("https://exampleurl.com"); * const docs = await loader.load(); * console.log({ docs }); * ``` */ export class CheerioWebBaseLoader extends BaseDocumentLoader - implements DocumentLoader + implements WebBaseLoader { timeout: number; @@ -64,7 +53,7 @@ export class CheerioWebBaseLoader headers?: HeadersInit; - constructor(public webPath: string, fields?: WebBaseLoaderParams) { + constructor(public webPath: string, fields?: CheerioWebBaseLoaderParams) { super(); const { timeout, selector, textDecoder, headers, ...rest } = fields ?? {}; this.timeout = timeout ?? 10000; diff --git a/libs/langchain-community/src/document_loaders/web/html.ts b/libs/langchain-community/src/document_loaders/web/html.ts new file mode 100644 index 000000000000..b07d61912c13 --- /dev/null +++ b/libs/langchain-community/src/document_loaders/web/html.ts @@ -0,0 +1,81 @@ +import { + AsyncCaller, + AsyncCallerParams, +} from "@langchain/core/utils/async_caller"; +import { BaseDocumentLoader } from "@langchain/core/document_loaders/base"; +import { Document } from "@langchain/core/documents"; +import type { DocumentLoader } from "@langchain/core/document_loaders/base"; + +/** + * Represents the parameters for configuring WebBaseLoaders. It extends the + * AsyncCallerParams interface and adds additional parameters specific to + * web-based loaders. + */ +export interface WebBaseLoaderParams extends AsyncCallerParams { + /** + * The timeout in milliseconds for the fetch request. Defaults to 10s. + */ + timeout?: number; + + /** + * The text decoder to use to decode the response. Defaults to UTF-8. + */ + textDecoder?: TextDecoder; + /** + * The headers to use in the fetch request. + */ + headers?: HeadersInit; + /** + * The selector to use to extract the text from the document. + * Defaults to "body". + * @deprecated Use CheerioWebBaseLoaderParams from @langchain/community/document_loaders/web/cheerio + * instead. + */ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + selector?: any; +} + +export interface WebBaseLoader extends DocumentLoader { + timeout: number; + + caller: AsyncCaller; + + textDecoder?: TextDecoder; + + headers?: HeadersInit; +} + +export class HTMLWebBaseLoader + extends BaseDocumentLoader + implements WebBaseLoader +{ + timeout: number; + + caller: AsyncCaller; + + textDecoder?: TextDecoder; + + headers?: HeadersInit; + + constructor(public webPath: string, fields?: WebBaseLoaderParams) { + super(); + const { timeout, textDecoder, headers, ...rest } = fields ?? {}; + this.timeout = timeout ?? 10000; + this.caller = new AsyncCaller(rest); + this.textDecoder = textDecoder; + this.headers = headers; + } + + async load(): Promise { + const response = await this.caller.call(fetch, this.webPath, { + signal: this.timeout ? AbortSignal.timeout(this.timeout) : undefined, + headers: this.headers, + }); + + const html = + this.textDecoder?.decode(await response.arrayBuffer()) ?? + (await response.text()); + + return [new Document({ pageContent: html })]; + } +} diff --git a/libs/langchain-community/src/document_loaders/web/sitemap.ts b/libs/langchain-community/src/document_loaders/web/sitemap.ts index aa6a6e41cb33..1cf5efcf75ba 100644 --- a/libs/langchain-community/src/document_loaders/web/sitemap.ts +++ b/libs/langchain-community/src/document_loaders/web/sitemap.ts @@ -1,13 +1,13 @@ import { Document, DocumentInterface } from "@langchain/core/documents"; import { chunkArray } from "@langchain/core/utils/chunk_array"; -import { CheerioWebBaseLoader, WebBaseLoaderParams } from "./cheerio.js"; +import { CheerioWebBaseLoader, CheerioWebBaseLoaderParams } from "./cheerio.js"; /** * Interface representing the parameters for initializing a SitemapLoader. * @interface SitemapLoaderParams - * @extends WebBaseLoaderParams + * @extends CheerioWebBaseLoaderParams */ -export interface SitemapLoaderParams extends WebBaseLoaderParams { +export interface SitemapLoaderParams extends CheerioWebBaseLoaderParams { /** * @property {(string | RegExp)[] | undefined} filterUrls - A list of regexes. Only URLs that match one of the filter URLs will be loaded. * WARNING: The filter URLs are interpreted as regular expressions. Escape special characters if needed. diff --git a/libs/langchain-community/src/document_transformers/mozilla_readability.ts b/libs/langchain-community/src/document_transformers/mozilla_readability.ts index a26b42a6d6c7..e8003c3d0514 100644 --- a/libs/langchain-community/src/document_transformers/mozilla_readability.ts +++ b/libs/langchain-community/src/document_transformers/mozilla_readability.ts @@ -11,7 +11,7 @@ import { * main content from a web page. * @example * ```typescript - * const loader = new CheerioWebBaseLoader("https://example.com/article"); + * const loader = new HTMLWebBaseLoader("https://example.com/article"); * const docs = await loader.load(); * * const splitter = new RecursiveCharacterTextSplitter({ @@ -20,7 +20,7 @@ import { * const transformer = new MozillaReadabilityTransformer(); * * // The sequence processes the loaded documents through the splitter and then the transformer. - * const sequence = splitter.pipe(transformer); + * const sequence = transformer.pipe(splitter); * * // Invoke the sequence to transform the documents into a more readable format. * const newDocuments = await sequence.invoke(docs); diff --git a/libs/langchain-community/src/load/import_map.ts b/libs/langchain-community/src/load/import_map.ts index 7425c4331ded..8b3b734a82c1 100644 --- a/libs/langchain-community/src/load/import_map.ts +++ b/libs/langchain-community/src/load/import_map.ts @@ -72,6 +72,7 @@ export * as memory__chat_memory from "../memory/chat_memory.js"; export * as indexes__base from "../indexes/base.js"; export * as indexes__memory from "../indexes/memory.js"; export * as document_loaders__web__airtable from "../document_loaders/web/airtable.js"; +export * as document_loaders__web__html from "../document_loaders/web/html.js"; export * as document_loaders__web__searchapi from "../document_loaders/web/searchapi.js"; export * as document_loaders__web__serpapi from "../document_loaders/web/serpapi.js"; export * as document_loaders__web__sort_xyz_blockchain from "../document_loaders/web/sort_xyz_blockchain.js"; From 074d1e1c14e94d3a4a514e56e15f84ce1e2b1ceb Mon Sep 17 00:00:00 2001 From: Christopher Dierkens Date: Sat, 16 Nov 2024 21:01:20 -0500 Subject: [PATCH 096/100] feat(community): allow metadata generics to flow through LibSQLVectorStore (#7208) --- .../src/vectorstores/libsql.ts | 39 +++++++++++-------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/libs/langchain-community/src/vectorstores/libsql.ts b/libs/langchain-community/src/vectorstores/libsql.ts index dfdaeaca167b..93c878073951 100644 --- a/libs/langchain-community/src/vectorstores/libsql.ts +++ b/libs/langchain-community/src/vectorstores/libsql.ts @@ -3,6 +3,9 @@ import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import type { Client, InStatement } from "@libsql/client"; +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type MetadataDefault = Record; + /** * Interface for LibSQLVectorStore configuration options. */ @@ -18,8 +21,10 @@ export interface LibSQLVectorStoreArgs { /** * A vector store using LibSQL/Turso for storage and retrieval. */ -export class LibSQLVectorStore extends VectorStore { - declare FilterType: (doc: Document) => boolean; +export class LibSQLVectorStore< + Metadata extends MetadataDefault = MetadataDefault +> extends VectorStore { + declare FilterType: (doc: Document) => boolean; private db; @@ -51,10 +56,10 @@ export class LibSQLVectorStore extends VectorStore { /** * Adds documents to the vector store. - * @param {Document[]} documents - The documents to add. + * @param {Document[]} documents - The documents to add. * @returns {Promise} The IDs of the added documents. */ - async addDocuments(documents: Document[]): Promise { + async addDocuments(documents: Document[]): Promise { const texts = documents.map(({ pageContent }) => pageContent); const embeddings = await this.embeddings.embedDocuments(texts); @@ -64,12 +69,12 @@ export class LibSQLVectorStore extends VectorStore { /** * Adds vectors to the vector store. * @param {number[][]} vectors - The vectors to add. - * @param {Document[]} documents - The documents associated with the vectors. + * @param {Document[]} documents - The documents associated with the vectors. * @returns {Promise} The IDs of the added vectors. */ async addVectors( vectors: number[][], - documents: Document[] + documents: Document[] ): Promise { const rows = vectors.map((embedding, idx) => ({ content: documents[idx].pageContent, @@ -102,14 +107,14 @@ export class LibSQLVectorStore extends VectorStore { * Performs a similarity search using a vector query and returns documents with their scores. * @param {number[]} query - The query vector. * @param {number} k - The number of results to return. - * @returns {Promise<[Document, number][]>} An array of tuples containing the similar documents and their scores. + * @returns {Promise<[Document, number][]>} An array of tuples containing the similar documents and their scores. */ async similaritySearchVectorWithScore( query: number[], k: number // filter is currently unused // filter?: this["FilterType"] - ): Promise<[Document, number][]> { + ): Promise<[Document, number][]> { // Potential SQL injection risk if query vector is not properly sanitized. if (!query.every((num) => typeof num === "number" && !Number.isNaN(num))) { throw new Error("Invalid query vector: all elements must be numbers"); @@ -130,7 +135,7 @@ export class LibSQLVectorStore extends VectorStore { return results.rows.map((row: any) => { const metadata = JSON.parse(row.metadata); - const doc = new Document({ + const doc = new Document({ id: String(row.id), metadata, pageContent: row.content, @@ -175,12 +180,12 @@ export class LibSQLVectorStore extends VectorStore { * @param {LibSQLVectorStoreArgs} [options] - Configuration options for the vector store. * @returns {Promise} A new LibSQLVectorStore instance. */ - static async fromTexts( + static async fromTexts( texts: string[], - metadatas: object[] | object, + metadatas: Metadata[] | Metadata, embeddings: EmbeddingsInterface, options: LibSQLVectorStoreArgs - ): Promise { + ): Promise> { const docs = texts.map((text, i) => { const metadata = Array.isArray(metadatas) ? metadatas[i] : metadatas; @@ -198,12 +203,14 @@ export class LibSQLVectorStore extends VectorStore { * @param {LibSQLVectorStoreArgs} [options] - Configuration options for the vector store. * @returns {Promise} A new LibSQLVectorStore instance. */ - static async fromDocuments( - docs: Document[], + static async fromDocuments< + Metadata extends MetadataDefault = MetadataDefault + >( + docs: Document[], embeddings: EmbeddingsInterface, options: LibSQLVectorStoreArgs - ): Promise { - const instance = new this(embeddings, options); + ): Promise> { + const instance = new this(embeddings, options); await instance.addDocuments(docs); From cbc706999b1e3654cbb413275a2554fc09cd15f8 Mon Sep 17 00:00:00 2001 From: Will Jones Date: Sat, 16 Nov 2024 18:01:45 -0800 Subject: [PATCH 097/100] feat(community): replace `vectordb` package with new `@lancedb/lancedb` (#7202) Co-authored-by: jacoblee93 --- .../integrations/vectorstores/lancedb.mdx | 4 +- examples/package.json | 2 +- .../src/indexes/vector_stores/lancedb/load.ts | 2 +- libs/langchain-community/package.json | 10 +- .../src/vectorstores/lancedb.ts | 16 ++- .../vectorstores/tests/lancedb.int.test.ts | 2 +- yarn.lock | 109 ++++++++---------- 7 files changed, 70 insertions(+), 75 deletions(-) diff --git a/docs/core_docs/docs/integrations/vectorstores/lancedb.mdx b/docs/core_docs/docs/integrations/vectorstores/lancedb.mdx index aa3511ceac26..8ee73c78e360 100644 --- a/docs/core_docs/docs/integrations/vectorstores/lancedb.mdx +++ b/docs/core_docs/docs/integrations/vectorstores/lancedb.mdx @@ -12,10 +12,10 @@ LanceDB datasets are persisted to disk and can be shared between Node.js and Pyt ## Setup -Install the [LanceDB](https://github.com/lancedb/lancedb) [Node.js bindings](https://www.npmjs.com/package/vectordb): +Install the [LanceDB](https://github.com/lancedb/lancedb) [Node.js bindings](https://www.npmjs.com/package/@lancedb/lancedb): ```bash npm2yarn -npm install -S vectordb +npm install -S @lancedb/lancedb ``` import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; diff --git a/examples/package.json b/examples/package.json index 3b3c3340186f..75ab6e9eb4d6 100644 --- a/examples/package.json +++ b/examples/package.json @@ -33,6 +33,7 @@ "@getzep/zep-js": "^0.9.0", "@gomomento/sdk": "^1.51.1", "@google/generative-ai": "^0.7.0", + "@lancedb/lancedb": "^0.13.0", "@langchain/anthropic": "workspace:*", "@langchain/aws": "workspace:*", "@langchain/azure-cosmosdb": "workspace:*", @@ -102,7 +103,6 @@ "typeorm": "^0.3.20", "typesense": "^1.5.3", "uuid": "^10.0.0", - "vectordb": "^0.9.0", "voy-search": "0.6.2", "weaviate-ts-client": "^2.0.0", "zod": "^3.22.4", diff --git a/examples/src/indexes/vector_stores/lancedb/load.ts b/examples/src/indexes/vector_stores/lancedb/load.ts index afa7d6c5524a..3592c64df650 100644 --- a/examples/src/indexes/vector_stores/lancedb/load.ts +++ b/examples/src/indexes/vector_stores/lancedb/load.ts @@ -1,6 +1,6 @@ import { LanceDB } from "@langchain/community/vectorstores/lancedb"; import { OpenAIEmbeddings } from "@langchain/openai"; -import { connect } from "vectordb"; +import { connect } from "@lancedb/lancedb"; import * as fs from "node:fs/promises"; import * as path from "node:path"; import os from "node:os"; diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 654a00fd51dc..11a4d954536d 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -79,6 +79,7 @@ "@huggingface/inference": "^2.6.4", "@ibm-cloud/watsonx-ai": "^1.1.0", "@jest/globals": "^29.5.0", + "@lancedb/lancedb": "^0.13.0", "@langchain/core": "workspace:*", "@langchain/scripts": ">=0.1.0 <0.2.0", "@langchain/standard-tests": "0.0.0", @@ -210,7 +211,6 @@ "typescript": "~5.1.6", "typesense": "^1.5.3", "usearch": "^1.1.1", - "vectordb": "^0.9.0", "voy-search": "0.6.2", "weaviate-ts-client": "^1.4.0", "web-auth-library": "^1.0.3", @@ -246,6 +246,7 @@ "@gradientai/nodejs-sdk": "^1.2.0", "@huggingface/inference": "^2.6.4", "@ibm-cloud/watsonx-ai": "*", + "@lancedb/lancedb": "^0.12.0", "@langchain/core": ">=0.2.21 <0.4.0", "@layerup/layerup-security": "^1.5.12", "@libsql/client": "^0.14.0", @@ -334,7 +335,6 @@ "typeorm": "^0.3.20", "typesense": "^1.5.3", "usearch": "^1.1.1", - "vectordb": "^0.1.4", "voy-search": "0.6.2", "weaviate-ts-client": "*", "web-auth-library": "^1.0.3", @@ -424,6 +424,9 @@ "@huggingface/inference": { "optional": true }, + "@lancedb/lancedb": { + "optional": true + }, "@layerup/layerup-security": { "optional": true }, @@ -682,9 +685,6 @@ "usearch": { "optional": true }, - "vectordb": { - "optional": true - }, "voy-search": { "optional": true }, diff --git a/libs/langchain-community/src/vectorstores/lancedb.ts b/libs/langchain-community/src/vectorstores/lancedb.ts index 7df73aac93e7..f482b32c2148 100644 --- a/libs/langchain-community/src/vectorstores/lancedb.ts +++ b/libs/langchain-community/src/vectorstores/lancedb.ts @@ -1,4 +1,4 @@ -import { connect, Table, Connection, WriteMode } from "vectordb"; +import { connect, Table, Connection } from "@lancedb/lancedb"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import { Document } from "@langchain/core/documents"; @@ -12,7 +12,7 @@ export type LanceDBArgs = { textKey?: string; uri?: string; tableName?: string; - mode?: WriteMode; + mode?: "create" | "overwrite"; }; /** @@ -29,7 +29,7 @@ export class LanceDB extends VectorStore { private tableName: string; - private mode?: WriteMode; + private mode?: "create" | "overwrite"; constructor(embeddings: EmbeddingsInterface, args?: LanceDBArgs) { super(embeddings, args || {}); @@ -38,7 +38,7 @@ export class LanceDB extends VectorStore { this.textKey = args?.textKey || "text"; this.uri = args?.uri || "~/lancedb"; this.tableName = args?.tableName || "langchain"; - this.mode = args?.mode || WriteMode.Overwrite; + this.mode = args?.mode || "overwrite"; } /** @@ -86,7 +86,7 @@ export class LanceDB extends VectorStore { if (!this.table) { const db: Connection = await connect(this.uri); this.table = await db.createTable(this.tableName, data, { - writeMode: this.mode, + mode: this.mode, }); return; @@ -110,7 +110,11 @@ export class LanceDB extends VectorStore { "Table not found. Please add vectors to the table first." ); } - const results = await this.table.search(query).limit(k).execute(); + const results = await this.table + .query() + .nearestTo(query) + .limit(k) + .toArray(); const docsAndScore: [Document, number][] = []; results.forEach((item) => { diff --git a/libs/langchain-community/src/vectorstores/tests/lancedb.int.test.ts b/libs/langchain-community/src/vectorstores/tests/lancedb.int.test.ts index 3d561c903440..c8af6e384889 100644 --- a/libs/langchain-community/src/vectorstores/tests/lancedb.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/lancedb.int.test.ts @@ -2,7 +2,7 @@ import { beforeEach, describe, expect, test } from "@jest/globals"; import * as fs from "node:fs/promises"; import * as path from "node:path"; import * as os from "node:os"; -import { connect, Table } from "vectordb"; +import { connect, Table } from "@lancedb/lancedb"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; diff --git a/yarn.lock b/yarn.lock index 16cc8eb38d04..42dd445ba3fe 100644 --- a/yarn.lock +++ b/yarn.lock @@ -11126,41 +11126,68 @@ __metadata: languageName: node linkType: hard -"@lancedb/vectordb-darwin-arm64@npm:0.4.20": - version: 0.4.20 - resolution: "@lancedb/vectordb-darwin-arm64@npm:0.4.20" +"@lancedb/lancedb-darwin-arm64@npm:0.13.0": + version: 0.13.0 + resolution: "@lancedb/lancedb-darwin-arm64@npm:0.13.0" conditions: os=darwin & cpu=arm64 languageName: node linkType: hard -"@lancedb/vectordb-darwin-x64@npm:0.4.20": - version: 0.4.20 - resolution: "@lancedb/vectordb-darwin-x64@npm:0.4.20" +"@lancedb/lancedb-darwin-x64@npm:0.13.0": + version: 0.13.0 + resolution: "@lancedb/lancedb-darwin-x64@npm:0.13.0" conditions: os=darwin & cpu=x64 languageName: node linkType: hard -"@lancedb/vectordb-linux-arm64-gnu@npm:0.4.20": - version: 0.4.20 - resolution: "@lancedb/vectordb-linux-arm64-gnu@npm:0.4.20" - conditions: os=linux & cpu=arm64 +"@lancedb/lancedb-linux-arm64-gnu@npm:0.13.0": + version: 0.13.0 + resolution: "@lancedb/lancedb-linux-arm64-gnu@npm:0.13.0" + conditions: os=linux & cpu=arm64 & libc=glibc languageName: node linkType: hard -"@lancedb/vectordb-linux-x64-gnu@npm:0.4.20": - version: 0.4.20 - resolution: "@lancedb/vectordb-linux-x64-gnu@npm:0.4.20" - conditions: os=linux & cpu=x64 +"@lancedb/lancedb-linux-x64-gnu@npm:0.13.0": + version: 0.13.0 + resolution: "@lancedb/lancedb-linux-x64-gnu@npm:0.13.0" + conditions: os=linux & cpu=x64 & libc=glibc languageName: node linkType: hard -"@lancedb/vectordb-win32-x64-msvc@npm:0.4.20": - version: 0.4.20 - resolution: "@lancedb/vectordb-win32-x64-msvc@npm:0.4.20" +"@lancedb/lancedb-win32-x64-msvc@npm:0.13.0": + version: 0.13.0 + resolution: "@lancedb/lancedb-win32-x64-msvc@npm:0.13.0" conditions: os=win32 & cpu=x64 languageName: node linkType: hard +"@lancedb/lancedb@npm:^0.13.0": + version: 0.13.0 + resolution: "@lancedb/lancedb@npm:0.13.0" + dependencies: + "@lancedb/lancedb-darwin-arm64": 0.13.0 + "@lancedb/lancedb-darwin-x64": 0.13.0 + "@lancedb/lancedb-linux-arm64-gnu": 0.13.0 + "@lancedb/lancedb-linux-x64-gnu": 0.13.0 + "@lancedb/lancedb-win32-x64-msvc": 0.13.0 + reflect-metadata: ^0.2.2 + peerDependencies: + apache-arrow: ">=13.0.0 <=17.0.0" + dependenciesMeta: + "@lancedb/lancedb-darwin-arm64": + optional: true + "@lancedb/lancedb-darwin-x64": + optional: true + "@lancedb/lancedb-linux-arm64-gnu": + optional: true + "@lancedb/lancedb-linux-x64-gnu": + optional: true + "@lancedb/lancedb-win32-x64-msvc": + optional: true + conditions: (os=darwin | os=linux | os=win32) & (cpu=x64 | cpu=arm64) + languageName: node + linkType: hard + "@langchain/anthropic@*, @langchain/anthropic@workspace:*, @langchain/anthropic@workspace:libs/langchain-anthropic": version: 0.0.0-use.local resolution: "@langchain/anthropic@workspace:libs/langchain-anthropic" @@ -11490,6 +11517,7 @@ __metadata: "@huggingface/inference": ^2.6.4 "@ibm-cloud/watsonx-ai": ^1.1.0 "@jest/globals": ^29.5.0 + "@lancedb/lancedb": ^0.13.0 "@langchain/core": "workspace:*" "@langchain/openai": ">=0.2.0 <0.4.0" "@langchain/scripts": ">=0.1.0 <0.2.0" @@ -11629,7 +11657,6 @@ __metadata: typesense: ^1.5.3 usearch: ^1.1.1 uuid: ^10.0.0 - vectordb: ^0.9.0 voy-search: 0.6.2 weaviate-ts-client: ^1.4.0 web-auth-library: ^1.0.3 @@ -11666,6 +11693,7 @@ __metadata: "@gradientai/nodejs-sdk": ^1.2.0 "@huggingface/inference": ^2.6.4 "@ibm-cloud/watsonx-ai": "*" + "@lancedb/lancedb": ^0.12.0 "@langchain/core": ">=0.2.21 <0.4.0" "@layerup/layerup-security": ^1.5.12 "@libsql/client": ^0.14.0 @@ -11754,7 +11782,6 @@ __metadata: typeorm: ^0.3.20 typesense: ^1.5.3 usearch: ^1.1.1 - vectordb: ^0.1.4 voy-search: 0.6.2 weaviate-ts-client: "*" web-auth-library: ^1.0.3 @@ -11816,6 +11843,8 @@ __metadata: optional: true "@huggingface/inference": optional: true + "@lancedb/lancedb": + optional: true "@layerup/layerup-security": optional: true "@libsql/client": @@ -11988,8 +12017,6 @@ __metadata: optional: true usearch: optional: true - vectordb: - optional: true voy-search: optional: true weaviate-ts-client: @@ -13234,13 +13261,6 @@ __metadata: languageName: node linkType: hard -"@neon-rs/load@npm:^0.0.74": - version: 0.0.74 - resolution: "@neon-rs/load@npm:0.0.74" - checksum: d26ec9b08cdf1a7c5aeefe98f77112d205d11b4005a7934b21fe8fd27528847e08e4749e7e6c3fc05ae9f701175a58c11a095ae6af449634df3991a2c82e1dfa - languageName: node - linkType: hard - "@neondatabase/serverless@npm:0.6.0": version: 0.6.0 resolution: "@neondatabase/serverless@npm:0.6.0" @@ -27373,6 +27393,7 @@ __metadata: "@getzep/zep-js": ^0.9.0 "@gomomento/sdk": ^1.51.1 "@google/generative-ai": ^0.7.0 + "@lancedb/lancedb": ^0.13.0 "@langchain/anthropic": "workspace:*" "@langchain/aws": "workspace:*" "@langchain/azure-cosmosdb": "workspace:*" @@ -27457,7 +27478,6 @@ __metadata: typescript: ~5.1.6 typesense: ^1.5.3 uuid: ^10.0.0 - vectordb: ^0.9.0 voy-search: 0.6.2 weaviate-ts-client: ^2.0.0 zod: ^3.22.4 @@ -38701,7 +38721,7 @@ __metadata: languageName: node linkType: hard -"reflect-metadata@npm:^0.2.1": +"reflect-metadata@npm:^0.2.1, reflect-metadata@npm:^0.2.2": version: 0.2.2 resolution: "reflect-metadata@npm:0.2.2" checksum: a66c7b583e4efdd8f3c3124fbff33da2d0c86d8280617516308b32b2159af7a3698c961db3246387f56f6316b1d33a608f39bb2b49d813316dfc58f6d3bf3210 @@ -43229,35 +43249,6 @@ __metadata: languageName: node linkType: hard -"vectordb@npm:^0.9.0": - version: 0.9.0 - resolution: "vectordb@npm:0.9.0" - dependencies: - "@lancedb/vectordb-darwin-arm64": 0.4.20 - "@lancedb/vectordb-darwin-x64": 0.4.20 - "@lancedb/vectordb-linux-arm64-gnu": 0.4.20 - "@lancedb/vectordb-linux-x64-gnu": 0.4.20 - "@lancedb/vectordb-win32-x64-msvc": 0.4.20 - "@neon-rs/load": ^0.0.74 - axios: ^1.4.0 - peerDependencies: - "@apache-arrow/ts": ^14.0.2 - apache-arrow: ^14.0.2 - dependenciesMeta: - "@lancedb/vectordb-darwin-arm64": - optional: true - "@lancedb/vectordb-darwin-x64": - optional: true - "@lancedb/vectordb-linux-arm64-gnu": - optional: true - "@lancedb/vectordb-linux-x64-gnu": - optional: true - "@lancedb/vectordb-win32-x64-msvc": - optional: true - conditions: (os=darwin | os=linux | os=win32) & (cpu=x64 | cpu=arm64) - languageName: node - linkType: hard - "vfile-location@npm:^3.0.0, vfile-location@npm:^3.2.0": version: 3.2.0 resolution: "vfile-location@npm:3.2.0" From eb266578f5a50301c2b59fffc31072e07540ebd9 Mon Sep 17 00:00:00 2001 From: aditishree1 <141712869+aditishree1@users.noreply.github.com> Date: Sun, 17 Nov 2024 07:31:58 +0530 Subject: [PATCH 098/100] feat(cosmosdbnosql): Add Chat History Integration (#7057) --- .../memory/azure_cosmosdb_nosql.mdx | 42 ++++ .../docs/integrations/platforms/microsoft.mdx | 16 ++ examples/src/memory/azure_cosmosdb_nosql.ts | 58 +++++ .../src/chat_histories.ts | 204 ++++++++++++++++++ libs/langchain-azure-cosmosdb/src/index.ts | 1 + .../src/tests/chat_histories.int.test.ts | 168 +++++++++++++++ 6 files changed, 489 insertions(+) create mode 100644 docs/core_docs/docs/integrations/memory/azure_cosmosdb_nosql.mdx create mode 100644 examples/src/memory/azure_cosmosdb_nosql.ts create mode 100644 libs/langchain-azure-cosmosdb/src/chat_histories.ts create mode 100644 libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts diff --git a/docs/core_docs/docs/integrations/memory/azure_cosmosdb_nosql.mdx b/docs/core_docs/docs/integrations/memory/azure_cosmosdb_nosql.mdx new file mode 100644 index 000000000000..f70bbf15df73 --- /dev/null +++ b/docs/core_docs/docs/integrations/memory/azure_cosmosdb_nosql.mdx @@ -0,0 +1,42 @@ +--- +hide_table_of_contents: true +--- + +import CodeBlock from "@theme/CodeBlock"; + +# Azure Cosmos DB NoSQL Chat Message History + +The AzureCosmosDBNoSQLChatMessageHistory uses Cosmos DB to store chat message history. For longer-term persistence across chat sessions, you can swap out the default in-memory `chatHistory` that backs chat memory classes like `BufferMemory`. +If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started. + +## Setup + +You'll first need to install the [`@langchain/azure-cosmosdb`](https://www.npmjs.com/package/@langchain/azure-cosmosdb) package: + +```bash npm2yarn +npm install @langchain/azure-cosmosdb @langchain/core +``` + +import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; + + + +```bash npm2yarn +npm install @langchain/openai @langchain/community @langchain/core +``` + +You'll also need to have an Azure Cosmos DB for NoSQL instance running. You can deploy a free version on Azure Portal without any cost, following [this guide](https://learn.microsoft.com/azure/cosmos-db/nosql/quickstart-portal). + +Once you have your instance running, make sure you have the connection string. If you are using Managed Identity, you need to have the endpoint. You can find them in the Azure Portal, under the "Settings / Keys" section of your instance. + +:::info + +When using Azure Managed Identity and role-based access control, you must ensure that the database and container have been created beforehand. RBAC does not provide permissions to create databases and containers. You can get more information about the permission model in the [Azure Cosmos DB documentation](https://learn.microsoft.com/azure/cosmos-db/how-to-setup-rbac#permission-model). + +::: + +## Usage + +import Example from "@examples/memory/azure_cosmosdb_nosql.ts"; + +{Example} diff --git a/docs/core_docs/docs/integrations/platforms/microsoft.mdx b/docs/core_docs/docs/integrations/platforms/microsoft.mdx index e9f3d7fd4922..d3d9a6416f83 100644 --- a/docs/core_docs/docs/integrations/platforms/microsoft.mdx +++ b/docs/core_docs/docs/integrations/platforms/microsoft.mdx @@ -150,6 +150,22 @@ See a [usage example](/docs/integrations/llm_caching/azure_cosmosdb_nosql). import { AzureCosmosDBNoSQLSemanticCache } from "@langchain/azure-cosmosdb"; ``` +## Chat Message History + +### Azure Cosmos DB NoSQL Chat Message History + +> The AzureCosmosDBNoSQLChatMessageHistory uses Cosmos DB to store chat message history. For longer-term persistence across chat sessions, you can swap out the default in-memory `chatHistory` that backs chat memory classes like `BufferMemory`. + +```bash npm2yarn +npm install @langchain/azure-cosmosdb @langchain/core +``` + +See [usage example](/docs/integrations/memory/azure_cosmosdb_nosql.mdx). + +```typescript +import { AzureCosmosDBNoSQLChatMessageHistory } from "@langchain/azure-cosmosdb"; +``` + ## Document loaders ### Azure Blob Storage diff --git a/examples/src/memory/azure_cosmosdb_nosql.ts b/examples/src/memory/azure_cosmosdb_nosql.ts new file mode 100644 index 000000000000..2f3cddf4460f --- /dev/null +++ b/examples/src/memory/azure_cosmosdb_nosql.ts @@ -0,0 +1,58 @@ +import { ChatOpenAI } from "@langchain/openai"; +import { AzureCosmsosDBNoSQLChatMessageHistory } from "@langchain/azure-cosmosdb"; +import { RunnableWithMessageHistory } from "@langchain/core/runnables"; +import { StringOutputParser } from "@langchain/core/output_parsers"; +import { + ChatPromptTemplate, + MessagesPlaceholder, +} from "@langchain/core/prompts"; + +const model = new ChatOpenAI({ + model: "gpt-3.5-turbo", + temperature: 0, +}); + +const prompt = ChatPromptTemplate.fromMessages([ + [ + "system", + "You are a helpful assistant. Answer all questions to the best of your ability.", + ], + new MessagesPlaceholder("chat_history"), + ["human", "{input}"], +]); + +const chain = prompt.pipe(model).pipe(new StringOutputParser()); + +const chainWithHistory = new RunnableWithMessageHistory({ + runnable: chain, + inputMessagesKey: "input", + historyMessagesKey: "chat_history", + getMessageHistory: async (sessionId) => { + const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory({ + sessionId, + userId: "user-id", + databaseName: "DATABASE_NAME", + containerName: "CONTAINER_NAME", + }); + return chatHistory; + }, +}); + +const res1 = await chainWithHistory.invoke( + { input: "Hi! I'm Jim." }, + { configurable: { sessionId: "langchain-test-session" } } +); +console.log({ res1 }); +/* +{ res1: 'Hi Jim! How can I assist you today?' } + */ + +const res2 = await chainWithHistory.invoke( + { input: "What did I just say my name was?" }, + { configurable: { sessionId: "langchain-test-session" } } +); +console.log({ res2 }); + +/* + { res2: { response: 'You said your name was Jim.' } + */ diff --git a/libs/langchain-azure-cosmosdb/src/chat_histories.ts b/libs/langchain-azure-cosmosdb/src/chat_histories.ts new file mode 100644 index 000000000000..033acc521334 --- /dev/null +++ b/libs/langchain-azure-cosmosdb/src/chat_histories.ts @@ -0,0 +1,204 @@ +import { Container, CosmosClient, CosmosClientOptions } from "@azure/cosmos"; +import { DefaultAzureCredential, TokenCredential } from "@azure/identity"; +import { BaseListChatMessageHistory } from "@langchain/core/chat_history"; +import { + BaseMessage, + mapChatMessagesToStoredMessages, + mapStoredMessagesToChatMessages, +} from "@langchain/core/messages"; +import { getEnvironmentVariable } from "@langchain/core/utils/env"; + +const USER_AGENT_SUFFIX = "langchainjs-cdbnosql-chathistory-javascript"; +const DEFAULT_DATABASE_NAME = "chatHistoryDB"; +const DEFAULT_CONTAINER_NAME = "chatHistoryContainer"; + +/** + * Type for the input to the `AzureCosmosDBNoSQLChatMessageHistory` constructor. + */ +export interface AzureCosmosDBNoSQLChatMessageHistoryInput { + sessionId: string; + userId?: string; + client?: CosmosClient; + connectionString?: string; + endpoint?: string; + databaseName?: string; + containerName?: string; + credentials?: TokenCredential; + ttl?: number; +} + +/** + * Class for storing chat message history with Cosmos DB NoSQL. It extends the + * BaseListChatMessageHistory class and provides methods to get, add, and + * clear messages. + * + * @example + * ```typescript + * const model = new ChatOpenAI({ + * model: "gpt-3.5-turbo", + * temperature: 0, + * }); + * const prompt = ChatPromptTemplate.fromMessages([ + * [ + * "system", + * "You are a helpful assistant. Answer all questions to the best of your ability.", + * ], + * new MessagesPlaceholder("chat_history"), + * ["human", "{input}"], + * ]); + * + * const chain = prompt.pipe(model).pipe(new StringOutputParser()); + * const chainWithHistory = new RunnableWithMessageHistory({ + * runnable: chain, + * inputMessagesKey: "input", + * historyMessagesKey: "chat_history", + * getMessageHistory: async (sessionId) => { + * const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory({ + * sessionId: sessionId, + * userId: "user-id", + * databaseName: "DATABASE_NAME", + * containerName: "CONTAINER_NAME", + * }) + * return chatHistory; + * }, + * }); + * await chainWithHistory.invoke( + * { input: "What did I just say my name was?" }, + * { configurable: { sessionId: "session-id" } } + * ); + * ``` + */ + +export class AzureCosmsosDBNoSQLChatMessageHistory extends BaseListChatMessageHistory { + lc_namespace = ["langchain", "stores", "message", "azurecosmosdb"]; + + private container: Container; + + private sessionId: string; + + private databaseName: string; + + private containerName: string; + + private client: CosmosClient; + + private userId: string; + + private ttl: number | undefined; + + private messageList: BaseMessage[] = []; + + private initPromise?: Promise; + + constructor(chatHistoryInput: AzureCosmosDBNoSQLChatMessageHistoryInput) { + super(); + + this.sessionId = chatHistoryInput.sessionId; + this.databaseName = chatHistoryInput.databaseName ?? DEFAULT_DATABASE_NAME; + this.containerName = + chatHistoryInput.containerName ?? DEFAULT_CONTAINER_NAME; + this.userId = chatHistoryInput.userId ?? "anonymous"; + this.ttl = chatHistoryInput.ttl; + this.client = this.initializeClient(chatHistoryInput); + } + + private initializeClient( + input: AzureCosmosDBNoSQLChatMessageHistoryInput + ): CosmosClient { + const connectionString = + input.connectionString ?? + getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_CONNECTION_STRING"); + const endpoint = + input.endpoint ?? getEnvironmentVariable("AZURE_COSMOSDB_NOSQL_ENDPOINT"); + + if (!input.client && !connectionString && !endpoint) { + throw new Error( + "CosmosClient, connection string, or endpoint must be provided." + ); + } + + if (input.client) { + return input.client; + } + + if (connectionString) { + const [endpointPart, keyPart] = connectionString.split(";"); + const endpoint = endpointPart.split("=")[1]; + const key = keyPart.split("=")[1]; + + return new CosmosClient({ + endpoint, + key, + userAgentSuffix: USER_AGENT_SUFFIX, + }); + } else { + return new CosmosClient({ + endpoint, + aadCredentials: input.credentials ?? new DefaultAzureCredential(), + userAgentSuffix: USER_AGENT_SUFFIX, + } as CosmosClientOptions); + } + } + + private async initializeContainer(): Promise { + if (!this.initPromise) { + this.initPromise = (async () => { + const { database } = await this.client.databases.createIfNotExists({ + id: this.databaseName, + }); + const { container } = await database.containers.createIfNotExists({ + id: this.containerName, + partitionKey: "/userId", + defaultTtl: this.ttl, + }); + this.container = container; + })().catch((error) => { + console.error("Error initializing Cosmos DB container:", error); + throw error; + }); + } + return this.initPromise; + } + + async getMessages(): Promise { + await this.initializeContainer(); + const document = await this.container + .item(this.sessionId, this.userId) + .read(); + const messages = document.resource?.messages || []; + this.messageList = mapStoredMessagesToChatMessages(messages); + return this.messageList; + } + + async addMessage(message: BaseMessage): Promise { + await this.initializeContainer(); + this.messageList = await this.getMessages(); + this.messageList.push(message); + const messages = mapChatMessagesToStoredMessages(this.messageList); + await this.container.items.upsert({ + id: this.sessionId, + userId: this.userId, + messages, + }); + } + + async clear(): Promise { + this.messageList = []; + await this.initializeContainer(); + await this.container.item(this.sessionId, this.userId).delete(); + } + + async clearAllSessionsForUser(userId: string) { + await this.initializeContainer(); + const query = { + query: "SELECT c.id FROM c WHERE c.userId = @userId", + parameters: [{ name: "@userId", value: userId }], + }; + const { resources: userSessions } = await this.container.items + .query(query) + .fetchAll(); + for (const userSession of userSessions) { + await this.container.item(userSession.id, userId).delete(); + } + } +} diff --git a/libs/langchain-azure-cosmosdb/src/index.ts b/libs/langchain-azure-cosmosdb/src/index.ts index e1160c548ef9..c5160397b474 100644 --- a/libs/langchain-azure-cosmosdb/src/index.ts +++ b/libs/langchain-azure-cosmosdb/src/index.ts @@ -1,3 +1,4 @@ export * from "./azure_cosmosdb_mongodb.js"; export * from "./azure_cosmosdb_nosql.js"; export * from "./caches.js"; +export * from "./chat_histories.js"; diff --git a/libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts b/libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts new file mode 100644 index 000000000000..81f2070ceb81 --- /dev/null +++ b/libs/langchain-azure-cosmosdb/src/tests/chat_histories.int.test.ts @@ -0,0 +1,168 @@ +/* eslint-disable no-promise-executor-return */ +/* eslint-disable no-process-env */ + +import { expect } from "@jest/globals"; +import { HumanMessage, AIMessage } from "@langchain/core/messages"; +import { CosmosClient } from "@azure/cosmos"; +import { DefaultAzureCredential } from "@azure/identity"; +import { ObjectId } from "mongodb"; +import { AzureCosmsosDBNoSQLChatMessageHistory } from "../chat_histories.js"; + +const DATABASE_NAME = "langchainTestDB"; +const CONTAINER_NAME = "testContainer"; + +/* + * To run this test, you need have an Azure Cosmos DB for NoSQL instance + * running. You can deploy a free version on Azure Portal without any cost, + * following this guide: + * https://learn.microsoft.com/azure/cosmos-db/nosql/vector-search + * + * You do not need to create a database or collection, it will be created + * automatically by the test. + * + * Once you have the instance running, you need to set the following environment + * variables before running the test: + * - AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT + */ +beforeEach(async () => { + let client: CosmosClient; + + if (process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING) { + client = new CosmosClient( + process.env.AZURE_COSMOSDB_NOSQL_CONNECTION_STRING + ); + } else if (process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT) { + client = new CosmosClient({ + endpoint: process.env.AZURE_COSMOSDB_NOSQL_ENDPOINT, + aadCredentials: new DefaultAzureCredential(), + }); + } else { + throw new Error( + "Please set the environment variable AZURE_COSMOSDB_NOSQL_CONNECTION_STRING or AZURE_COSMOSDB_NOSQL_ENDPOINT" + ); + } + try { + await client.database(DATABASE_NAME).delete(); + } catch { + // Ignore error if the database does not exist + } + try { + await client.database("DbWithTTL").delete(); + } catch { + // Ignore error if the database does not exist + } +}); + +test("Test CosmosDB History Store", async () => { + const input = { + sessionId: new ObjectId().toString(), + userId: new ObjectId().toString(), + databaseName: DATABASE_NAME, + containerName: CONTAINER_NAME, + }; + const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory(input); + const blankResult = await chatHistory.getMessages(); + expect(blankResult).toStrictEqual([]); + + await chatHistory.addUserMessage("Who is the best vocalist?"); + await chatHistory.addAIMessage("Ozzy Osbourne"); + + const expectedMessages = [ + new HumanMessage("Who is the best vocalist?"), + new AIMessage("Ozzy Osbourne"), + ]; + const resultWithHistory = await chatHistory.getMessages(); + expect(resultWithHistory).toEqual(expectedMessages); +}); + +test("Test clear CosmosDB history Store", async () => { + const input = { + sessionId: new ObjectId().toString(), + userId: new ObjectId().toString(), + databaseName: DATABASE_NAME, + containerName: CONTAINER_NAME, + }; + const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory(input); + + await chatHistory.addUserMessage("Who is the best vocalist?"); + await chatHistory.addAIMessage("Ozzy Osbourne"); + + const expectedMessages = [ + new HumanMessage("Who is the best vocalist?"), + new AIMessage("Ozzy Osbourne"), + ]; + const resultWithHistory = await chatHistory.getMessages(); + expect(resultWithHistory).toEqual(expectedMessages); + + await chatHistory.clear(); + + const blankResult = await chatHistory.getMessages(); + expect(blankResult).toStrictEqual([]); +}); + +test("Test CosmosDB history with a TTL", async () => { + const input = { + sessionId: new ObjectId().toString(), + userId: new ObjectId().toString(), + databaseName: "DbWithTTL", + ttl: 5, + }; + const chatHistory = new AzureCosmsosDBNoSQLChatMessageHistory(input); + + await chatHistory.addUserMessage("Who is the best vocalist?"); + await chatHistory.addAIMessage("Ozzy Osbourne"); + + const expectedMessages = [ + new HumanMessage("Who is the best vocalist?"), + new AIMessage("Ozzy Osbourne"), + ]; + const resultWithHistory = await chatHistory.getMessages(); + expect(resultWithHistory).toEqual(expectedMessages); + + await new Promise((resolve) => setTimeout(resolve, 6000)); + + const expiredResult = await chatHistory.getMessages(); + expect(expiredResult).toStrictEqual([]); +}); + +test("Test clear all sessions for a user", async () => { + const input1 = { + sessionId: new Date().toISOString(), + userId: "user1", + databaseName: "DbWithTTL", + ttl: 5, + }; + const chatHistory1 = new AzureCosmsosDBNoSQLChatMessageHistory(input1); + + await chatHistory1.addUserMessage("Who is the best vocalist?"); + await chatHistory1.addAIMessage("Ozzy Osbourne"); + + const input2 = { + sessionId: new Date().toISOString(), + userId: "user1", + databaseName: "DbWithTTL", + ttl: 5, + }; + const chatHistory2 = new AzureCosmsosDBNoSQLChatMessageHistory(input2); + + await chatHistory2.addUserMessage("Who is the best vocalist?"); + await chatHistory2.addAIMessage("Ozzy Osbourne"); + + const expectedMessages = [ + new HumanMessage("Who is the best vocalist?"), + new AIMessage("Ozzy Osbourne"), + ]; + + const result1 = await chatHistory1.getMessages(); + expect(result1).toEqual(expectedMessages); + + const result2 = await chatHistory1.getMessages(); + expect(result2).toEqual(expectedMessages); + + await chatHistory1.clearAllSessionsForUser("user1"); + + const deletedResult1 = await chatHistory1.getMessages(); + const deletedResult2 = await chatHistory2.getMessages(); + expect(deletedResult1).toStrictEqual([]); + expect(deletedResult2).toStrictEqual([]); +}); From fb9eaf637d4b80c6b09b259dc29693c24b99c2ef Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Sat, 16 Nov 2024 18:13:34 -0800 Subject: [PATCH 099/100] chore(azure-cosmosdb): Release 0.2.2 (#7219) --- libs/langchain-azure-cosmosdb/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain-azure-cosmosdb/package.json b/libs/langchain-azure-cosmosdb/package.json index 025ae69ed90b..df04442e5ed7 100644 --- a/libs/langchain-azure-cosmosdb/package.json +++ b/libs/langchain-azure-cosmosdb/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/azure-cosmosdb", - "version": "0.2.1", + "version": "0.2.2", "description": "Azure CosmosDB integration for LangChain.js", "type": "module", "engines": { From 633dca9ed82ad594b5f717640ba6932f8e375c6a Mon Sep 17 00:00:00 2001 From: Christopher Dierkens Date: Sun, 17 Nov 2024 00:37:55 -0500 Subject: [PATCH 100/100] feat(community): add filters to LibSQLVectorStore (#7209) Co-authored-by: jacoblee93 --- .../src/utils/sqlite_where_builder.ts | 61 +++++++++ .../src/vectorstores/libsql.ts | 38 +++++- .../src/vectorstores/tests/libsql.int.test.ts | 118 ++++++++++++------ 3 files changed, 176 insertions(+), 41 deletions(-) create mode 100644 libs/langchain-community/src/utils/sqlite_where_builder.ts diff --git a/libs/langchain-community/src/utils/sqlite_where_builder.ts b/libs/langchain-community/src/utils/sqlite_where_builder.ts new file mode 100644 index 000000000000..5c8b55b85645 --- /dev/null +++ b/libs/langchain-community/src/utils/sqlite_where_builder.ts @@ -0,0 +1,61 @@ +import { InStatement, InValue } from "@libsql/client"; + +export type WhereCondition< + // eslint-disable-next-line @typescript-eslint/no-explicit-any + Metadata extends Record = Record +> = { + [Key in keyof Metadata]: + | { + operator: "=" | ">" | "<" | ">=" | "<=" | "<>" | "LIKE"; + value: InValue; + } + | { + operator: "IN"; + value: InValue[]; + }; +}; + +type WhereInStatement = Exclude; + +export class SqliteWhereBuilder { + private conditions: WhereCondition; + + constructor(conditions: WhereCondition) { + this.conditions = conditions; + } + + buildWhereClause(): WhereInStatement { + const sqlParts: string[] = []; + const args: Record = {}; + + for (const [column, condition] of Object.entries(this.conditions)) { + const { operator, value } = condition; + + if (operator === "IN") { + const placeholders = value + .map((_, index) => `:${column}${index}`) + .join(", "); + sqlParts.push( + `json_extract(metadata, '$.${column}') IN (${placeholders})` + ); + + const values = value.reduce( + (previousValue: Record, currentValue, index) => { + return { ...previousValue, [`${column}${index}`]: currentValue }; + }, + {} + ); + + Object.assign(args, values); + } else { + sqlParts.push( + `json_extract(metadata, '$.${column}') ${operator} :${column}` + ); + args[column] = value; + } + } + + const sql = sqlParts.length ? `${sqlParts.join(" AND ")}` : ""; + return { sql, args }; + } +} diff --git a/libs/langchain-community/src/vectorstores/libsql.ts b/libs/langchain-community/src/vectorstores/libsql.ts index 93c878073951..f13e42c1bdd7 100644 --- a/libs/langchain-community/src/vectorstores/libsql.ts +++ b/libs/langchain-community/src/vectorstores/libsql.ts @@ -2,6 +2,10 @@ import { Document } from "@langchain/core/documents"; import type { EmbeddingsInterface } from "@langchain/core/embeddings"; import { VectorStore } from "@langchain/core/vectorstores"; import type { Client, InStatement } from "@libsql/client"; +import { + SqliteWhereBuilder, + WhereCondition, +} from "../utils/sqlite_where_builder.js"; // eslint-disable-next-line @typescript-eslint/no-explicit-any type MetadataDefault = Record; @@ -24,7 +28,7 @@ export interface LibSQLVectorStoreArgs { export class LibSQLVectorStore< Metadata extends MetadataDefault = MetadataDefault > extends VectorStore { - declare FilterType: (doc: Document) => boolean; + declare FilterType: string | InStatement | WhereCondition; private db; @@ -111,9 +115,8 @@ export class LibSQLVectorStore< */ async similaritySearchVectorWithScore( query: number[], - k: number - // filter is currently unused - // filter?: this["FilterType"] + k: number, + filter?: this["FilterType"] ): Promise<[Document, number][]> { // Potential SQL injection risk if query vector is not properly sanitized. if (!query.every((num) => typeof num === "number" && !Number.isNaN(num))) { @@ -122,12 +125,35 @@ export class LibSQLVectorStore< const queryVector = `[${query.join(",")}]`; - const sql: InStatement = { + const sql = { sql: `SELECT ${this.table}.rowid as id, ${this.table}.content, ${this.table}.metadata, vector_distance_cos(${this.table}.${this.column}, vector(:queryVector)) AS distance FROM vector_top_k('idx_${this.table}_${this.column}', vector(:queryVector), CAST(:k AS INTEGER)) as top_k JOIN ${this.table} ON top_k.rowid = ${this.table}.rowid`, args: { queryVector, k }, - }; + } satisfies InStatement; + + // Filter is a raw sql where clause, so append it to the join + if (typeof filter === "string") { + sql.sql += ` AND ${filter}`; + } else if (typeof filter === "object") { + // Filter is an in statement. + if ("sql" in filter) { + sql.sql += ` AND ${filter.sql}`; + sql.args = { + ...filter.args, + ...sql.args, + }; + } else { + const builder = new SqliteWhereBuilder(filter); + const where = builder.buildWhereClause(); + + sql.sql += ` AND ${where.sql}`; + sql.args = { + ...where.args, + ...sql.args, + }; + } + } const results = await this.db.execute(sql); diff --git a/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts b/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts index 5dbec055afff..27b2ee793f42 100644 --- a/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts +++ b/libs/langchain-community/src/vectorstores/tests/libsql.int.test.ts @@ -80,10 +80,10 @@ describe("LibSQLVectorStore (local)", () => { const store = new LibSQLVectorStore(embeddings, config); const ids = await store.addDocuments([ - { + new Document({ pageContent: "hello", metadata: { a: 1 }, - }, + }), ]); expect(ids).toHaveLength(1); @@ -117,10 +117,10 @@ describe("LibSQLVectorStore (local)", () => { const store = new LibSQLVectorStore(embeddings, config); const ids = await store.addDocuments([ - { + new Document({ pageContent: "hello world", metadata: { a: 1 }, - }, + }), ]); expect(ids).toHaveLength(1); @@ -154,18 +154,15 @@ describe("LibSQLVectorStore (local)", () => { const store = new LibSQLVectorStore(embeddings, config); const ids = await store.addDocuments([ - { + new Document({ pageContent: "the quick brown fox", - metadata: { a: 1 }, - }, - { + }), + new Document({ pageContent: "jumped over the lazy dog", - metadata: { a: 2 }, - }, - { + }), + new Document({ pageContent: "hello world", - metadata: { a: 3 }, - }, + }), ]); expect(ids).toHaveLength(3); @@ -186,7 +183,7 @@ describe("LibSQLVectorStore (local)", () => { ).toBe(true); }); - test("a document can be deleted by id", async () => { + test("a similarity search with a filter can be performed", async () => { await client.batch([ `DROP TABLE IF EXISTS vectors;`, `CREATE TABLE IF NOT EXISTS vectors ( @@ -201,18 +198,72 @@ describe("LibSQLVectorStore (local)", () => { const store = new LibSQLVectorStore(embeddings, config); const ids = await store.addDocuments([ - { + new Document({ pageContent: "the quick brown fox", - metadata: { a: 1 }, + metadata: { + label: "1", + }, + }), + new Document({ + pageContent: "jumped over the lazy dog", + metadata: { + label: "2", + }, + }), + new Document({ + pageContent: "hello world", + metadata: { + label: "1", + }, + }), + ]); + + expect(ids).toHaveLength(3); + expect(ids.every((id) => typeof id === "string")).toBe(true); + + const results = await store.similaritySearch("the quick brown dog", 10, { + label: { + operator: "=", + value: "1", }, - { + }); + + expect(results).toHaveLength(2); + expect(results.map((result) => result.pageContent)).toEqual([ + "the quick brown fox", + "hello world", + ]); + expect( + results.map((result) => result.id).every((id) => typeof id === "string") + ).toBe(true); + }); + + test("a document can be deleted by id", async () => { + await client.batch([ + `DROP TABLE IF EXISTS vectors;`, + `CREATE TABLE IF NOT EXISTS vectors ( + content TEXT, + metadata JSON, + embedding F32_BLOB(1024) + );`, + `CREATE INDEX IF NOT EXISTS idx_vectors_embedding + ON vectors (libsql_vector_idx(embedding));`, + ]); + + const store = new LibSQLVectorStore(embeddings, config); + + const ids = await store.addDocuments([ + new Document({ + pageContent: "the quick brown fox", + }), + new Document({ pageContent: "jumped over the lazy dog", metadata: { a: 2 }, - }, - { + }), + new Document({ pageContent: "hello world", metadata: { a: 3 }, - }, + }), ]); expect(ids).toHaveLength(3); @@ -247,18 +298,15 @@ describe("LibSQLVectorStore (local)", () => { const store = new LibSQLVectorStore(embeddings, config); const ids = await store.addDocuments([ - { + new Document({ pageContent: "the quick brown fox", - metadata: { a: 1 }, - }, - { + }), + new Document({ pageContent: "jumped over the lazy dog", - metadata: { a: 2 }, - }, - { + }), + new Document({ pageContent: "hello world", - metadata: { a: 3 }, - }, + }), ]); expect(ids).toHaveLength(3); @@ -289,18 +337,18 @@ describe("LibSQLVectorStore (local)", () => { const store = new LibSQLVectorStore(embeddings, config); const ids = await store.addDocuments([ - { + new Document({ pageContent: "the quick brown fox", metadata: { a: 1 }, - }, - { + }), + new Document({ pageContent: "jumped over the lazy dog", metadata: { a: 2 }, - }, - { + }), + new Document({ pageContent: "hello world", metadata: { a: 3 }, - }, + }), ]); expect(ids).toHaveLength(3);