From bd717d340c2b1f3bd3a7409244c65b91de0dd0fe Mon Sep 17 00:00:00 2001 From: bracesproul Date: Tue, 7 May 2024 15:13:57 -0700 Subject: [PATCH 001/373] js[patch]: Fix listRuns limit arg --- js/package.json | 1 + js/src/client.ts | 17 +++++++++++- js/src/tests/client.int.test.ts | 49 ++++++++++++++++++++++++++++++++- js/src/tests/utils.ts | 27 ++++++++++++++++++ js/yarn.lock | 5 ++++ 5 files changed, 97 insertions(+), 2 deletions(-) diff --git a/js/package.json b/js/package.json index 484d8c4b3..71391ee54 100644 --- a/js/package.json +++ b/js/package.json @@ -84,6 +84,7 @@ }, "devDependencies": { "@babel/preset-env": "^7.22.4", + "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", "@langchain/core": "^0.1.32", "@langchain/langgraph": "^0.0.8", diff --git a/js/src/client.ts b/js/src/client.ts index 4736740f9..6ebaebe2f 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -1194,11 +1194,26 @@ export class Client { is_root: isRoot, }; + let runsYielded = 0; + console.log("going to list!"); for await (const runs of this._getCursorPaginatedList( "/runs/query", body )) { - yield* runs; + if (limit) { + if (runsYielded >= limit) { + break; + } + if (runs.length + runsYielded > limit) { + const newRuns = runs.splice(limit - runsYielded); + yield* newRuns; + break; + } + runsYielded += runs.length; + yield* runs; + } else { + yield* runs; + } } } diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index eaf49b975..64b7f72d1 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -3,7 +3,13 @@ import { FunctionMessage, HumanMessage } from "@langchain/core/messages"; import { Client } from "../client.js"; import { v4 as uuidv4 } from "uuid"; -import { deleteDataset, deleteProject, toArray, waitUntil } from "./utils.js"; +import { + createRunsFactory, + deleteDataset, + deleteProject, + toArray, + waitUntil, +} from "./utils.js"; type CheckOutputsType = boolean | ((run: Run) => boolean); async function waitUntilRunFound( @@ -558,3 +564,44 @@ test.concurrent( }, 180_000 ); + +test.concurrent.only("list runs limit arg works", async () => { + const client = new Client(); + + const projectName = "test-limit-runs-listRuns-endpoint"; + try { + // create a fresh project with 10 runs --default amount created by createRunsFactory + await client.createProject({ + projectName, + }); + await Promise.all(createRunsFactory(projectName).map(client.createRun)); + + const limit = 6; + let iters = 0; + const runsArr: Array = []; + for await (const run of client.listRuns({ limit, projectName })) { + expect(run).toBeDefined(); + runsArr.push(run); + iters += 1; + if (iters > limit) { + throw new Error( + `More runs returned than expected.\nExpected: ${limit}\nReceived: ${iters}` + ); + } + } + expect(runsArr.length).toBe(limit); + } catch (e: any) { + // cleanup by deleting the project + const projectExists = await client.hasProject({ projectName }); + if (projectExists) { + await client.deleteProject({ projectName }); + } + + // Error thrown by test, rethrow + if (e.message.startsWith("More runs returned than expected.")) { + throw e; + } else { + console.error(e); + } + } +}); diff --git a/js/src/tests/utils.ts b/js/src/tests/utils.ts index a7e4d3197..1165c7373 100644 --- a/js/src/tests/utils.ts +++ b/js/src/tests/utils.ts @@ -1,4 +1,8 @@ import { Client } from "../client.js"; +import { v4 as uuidv4 } from "uuid"; +// eslint-disable-next-line import/no-extraneous-dependencies +import { faker } from "@faker-js/faker"; +import { RunCreate } from "../schemas.js"; export async function toArray(iterable: AsyncIterable): Promise { const result: T[] = []; @@ -112,3 +116,26 @@ export function sanitizePresignedUrls(payload: unknown) { return value; }); } + +/** + * Factory which returns a list of `RunCreate` objects. + * @param {number} count Number of runs to create (default: 10) + * @returns {Array} List of `RunCreate` objects + */ +export function createRunsFactory( + projectName: string, + count = 10 +): Array { + return Array.from({ length: count }).map((_, idx) => ({ + id: uuidv4(), + name: `${idx}-${faker.lorem.words()}`, + run_type: faker.helpers.arrayElement(["tool", "chain", "llm", "runnable"]), + inputs: { + question: faker.lorem.sentence(), + }, + outputs: { + answer: faker.lorem.sentence(), + }, + project_name: projectName, + })); +} diff --git a/js/yarn.lock b/js/yarn.lock index 94e03d6a3..4968faf24 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -1109,6 +1109,11 @@ resolved "https://registry.npmjs.org/@eslint/js/-/js-8.41.0.tgz" integrity sha512-LxcyMGxwmTh2lY9FwHPGWOHmYFCZvbrFCBZL4FzSSsxsRPuhrYUg/49/0KDfW8tnIEaEHtfmn6+NPN+1DqaNmA== +"@faker-js/faker@^8.4.1": + version "8.4.1" + resolved "https://registry.yarnpkg.com/@faker-js/faker/-/faker-8.4.1.tgz#5d5e8aee8fce48f5e189bf730ebd1f758f491451" + integrity sha512-XQ3cU+Q8Uqmrbf2e0cIC/QN43sTBSC8KF12u29Mb47tWrt2hAgBXSgpZMj4Ao8Uk0iJcU99QsOCaIL8934obCg== + "@humanwhocodes/config-array@^0.11.8": version "0.11.8" resolved "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz" From 8887b06675492bba97a95f02314667556c4e62ff Mon Sep 17 00:00:00 2001 From: bracesproul Date: Tue, 7 May 2024 15:14:50 -0700 Subject: [PATCH 002/373] cr --- js/src/client.ts | 1 - js/src/tests/client.int.test.ts | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index 6ebaebe2f..961061300 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -1195,7 +1195,6 @@ export class Client { }; let runsYielded = 0; - console.log("going to list!"); for await (const runs of this._getCursorPaginatedList( "/runs/query", body diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 64b7f72d1..9e047b5da 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -565,7 +565,7 @@ test.concurrent( 180_000 ); -test.concurrent.only("list runs limit arg works", async () => { +test.concurrent("list runs limit arg works", async () => { const client = new Client(); const projectName = "test-limit-runs-listRuns-endpoint"; From 78d688c319dd9ab725908a090f157d8d27f7e22d Mon Sep 17 00:00:00 2001 From: bracesproul Date: Tue, 7 May 2024 20:13:24 -0700 Subject: [PATCH 003/373] cr --- js/src/client.ts | 2 +- js/src/tests/client.int.test.ts | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index 961061300..8c4ad763c 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -1204,7 +1204,7 @@ export class Client { break; } if (runs.length + runsYielded > limit) { - const newRuns = runs.splice(limit - runsYielded); + const newRuns = runs.slice(0, limit - runsYielded); yield* newRuns; break; } diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 9e047b5da..a69218736 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -569,6 +569,9 @@ test.concurrent("list runs limit arg works", async () => { const client = new Client(); const projectName = "test-limit-runs-listRuns-endpoint"; + const runsArr: Array = []; + const limit = 6; + try { // create a fresh project with 10 runs --default amount created by createRunsFactory await client.createProject({ @@ -576,9 +579,7 @@ test.concurrent("list runs limit arg works", async () => { }); await Promise.all(createRunsFactory(projectName).map(client.createRun)); - const limit = 6; let iters = 0; - const runsArr: Array = []; for await (const run of client.listRuns({ limit, projectName })) { expect(run).toBeDefined(); runsArr.push(run); @@ -589,7 +590,6 @@ test.concurrent("list runs limit arg works", async () => { ); } } - expect(runsArr.length).toBe(limit); } catch (e: any) { // cleanup by deleting the project const projectExists = await client.hasProject({ projectName }); @@ -604,4 +604,6 @@ test.concurrent("list runs limit arg works", async () => { console.error(e); } } + + expect(runsArr.length).toBe(limit); }); From e8298cc80f787eecad8aa32e841c51a7e3b2a587 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 02:53:10 +0200 Subject: [PATCH 004/373] feat(pairwise): show URL in console --- js/src/client.ts | 25 ++++- js/src/evaluation/evaluate_comparative.ts | 118 +++++++++++++++------- 2 files changed, 104 insertions(+), 39 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index fab3f6b23..e0cf1a8a4 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -459,12 +459,12 @@ export class Client { }; } - private getHostUrl(): string { + public getHostUrl(): string { if (this.webUrl) { return this.webUrl; } else if (isLocalhost(this.apiUrl)) { - this.webUrl = "http://localhost"; - return "http://localhost"; + this.webUrl = "http://localhost:3000"; + return this.webUrl; } else if ( this.apiUrl.includes("/api") && !this.apiUrl.split(".", 1)[0].endsWith("api") @@ -473,10 +473,10 @@ export class Client { return this.webUrl; } else if (this.apiUrl.split(".", 1)[0].includes("dev")) { this.webUrl = "https://dev.smith.langchain.com"; - return "https://dev.smith.langchain.com"; + return this.webUrl; } else { this.webUrl = "https://smith.langchain.com"; - return "https://smith.langchain.com"; + return this.webUrl; } } @@ -1568,6 +1568,21 @@ export class Client { return result; } + public async getProjectUrl({ + projectId, + projectName, + }: { + projectId?: string; + projectName?: string; + }) { + if (projectId === undefined && projectName === undefined) { + throw new Error("Must provide either projectName or projectId"); + } + const project = await this.readProject({ projectId, projectName }); + const tenantId = await this._getTenantId(); + return `${this.getHostUrl()}/o/${tenantId}/projects/p/${project.id}`; + } + private async _getTenantId(): Promise { if (this._tenantId !== null) { return this._tenantId; diff --git a/js/src/evaluation/evaluate_comparative.ts b/js/src/evaluation/evaluate_comparative.ts index bb58f582f..dce1132cc 100644 --- a/js/src/evaluation/evaluate_comparative.ts +++ b/js/src/evaluation/evaluate_comparative.ts @@ -1,7 +1,12 @@ import { v4 as uuid4, validate } from "uuid"; import { Client } from "../index.js"; -import { ComparisonEvaluationResult, Example, Run } from "../schemas.js"; +import { + ComparisonEvaluationResult as ComparisonEvaluationResultRow, + Example, + Run, +} from "../schemas.js"; import { shuffle } from "../utils/shuffle.js"; +import { AsyncCaller } from "../utils/async_caller.js"; function loadExperiment(client: Client, experiment: string) { return client.readProject( @@ -57,7 +62,7 @@ export interface EvaluateComparativeOptions { ( runs: Run[], example: Example - ) => ComparisonEvaluationResult | Promise + ) => ComparisonEvaluationResultRow | Promise >; /** * Randomize the order of outputs for each evaluation @@ -97,7 +102,8 @@ export interface EvaluateComparativeOptions { } export interface ComparisonEvaluationResults { - results: ComparisonEvaluationResult[]; + experimentName: string; + results: ComparisonEvaluationResultRow[]; } export async function evaluateComparative( @@ -170,6 +176,35 @@ export async function evaluateComparative( referenceDatasetId: projects.at(0)?.reference_dataset_id, }); + const viewUrl = await (async () => { + const projectId = projects.at(0)?.id ?? projects.at(1)?.id; + const datasetId = comparativeExperiment?.reference_dataset_id; + + if (projectId && datasetId) { + const hostUrl = (await client.getProjectUrl({ projectId })) + .split("/projects/p/") + .at(0); + + const result = new URL(`${hostUrl}/datasets/${datasetId}/compare`); + result.searchParams.set( + "selectedSessions", + projects.map((p) => p.id).join(",") + ); + + result.searchParams.set( + "comparativeExperiment", + comparativeExperiment.id + ); + return result.toString(); + } + + return null; + })(); + + if (viewUrl != null) { + console.log(`View results at: ${viewUrl}`); + } + const experimentRuns = await Promise.all( projects.map((p) => loadTraces(client, p.id, { loadNested: !!options.loadNested }) @@ -225,41 +260,56 @@ export async function evaluateComparative( } } - const results: ComparisonEvaluationResult[] = []; - - // TODO: handle maxConcurrency - for (const [exampleId, runs] of Object.entries(runMapByExampleId)) { - const example = exampleMap[exampleId]; - if (!example) throw new Error(`Example ${exampleId} not found.`); + const caller = new AsyncCaller({ maxConcurrency: options.maxConcurrency }); - for (const evaluator of options.evaluators) { - const expectedRunIds = new Set(runs.map((r) => r.id)); + async function evaluateAndSubmitFeedback( + runs: Run[], + example: Example, + evaluator: ( + runs: Run[], + example: Example + ) => ComparisonEvaluationResultRow | Promise + ) { + const expectedRunIds = new Set(runs.map((r) => r.id)); + const result = await evaluator( + options.randomizeOrder ? shuffle(runs) : runs, + example + ); - if (options.randomizeOrder) { - runs.sort(() => Math.random() - 0.5); - } - const result = await evaluator( - options.randomizeOrder ? shuffle(runs) : runs, - example - ); - results.push(result); - - for (const [runId, score] of Object.entries(result.scores)) { - // validate if the run id - if (!expectedRunIds.has(runId)) { - throw new Error( - `Returning an invalid run id ${runId} from evaluator.` - ); - } - - await client.createFeedback(runId, result.key, { - score, - sourceRunId: result.source_run_id, - comparativeExperimentId: comparativeExperiment.id, - }); + for (const [runId, score] of Object.entries(result.scores)) { + // validate if the run id + if (!expectedRunIds.has(runId)) { + throw new Error(`Returning an invalid run id ${runId} from evaluator.`); } + + await client.createFeedback(runId, result.key, { + score, + sourceRunId: result.source_run_id, + comparativeExperimentId: comparativeExperiment.id, + }); } + + return result; } - return { results }; + const results: ComparisonEvaluationResultRow[] = await Promise.all( + Object.entries(runMapByExampleId).flatMap(([exampleId, runs]) => { + const example = exampleMap[exampleId]; + if (!example) throw new Error(`Example ${exampleId} not found.`); + + return options.evaluators.map((evaluator) => + caller.call( + evaluateAndSubmitFeedback, + runs, + exampleMap[exampleId], + evaluator + ) + ); + }) + ); + + return { + experimentName: name, + results, + }; } From b6e3b6ac85ebd9ce9ac9f67d1add61802f33e72f Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:01:18 +0200 Subject: [PATCH 005/373] Fix tests --- js/src/tests/client.test.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index d1b822d9e..31ac1f724 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -91,10 +91,10 @@ describe("Client", () => { expect(result).toBe("http://example.com"); }); - it("should return 'http://localhost' if apiUrl is localhost", () => { + it("should return 'http://localhost:3000' if apiUrl is localhost", () => { const client = new Client({ apiUrl: "http://localhost/api" }); const result = (client as any).getHostUrl(); - expect(result).toBe("http://localhost"); + expect(result).toBe("http://localhost:3000"); }); it("should return the webUrl without '/api' if apiUrl contains '/api'", () => { @@ -127,6 +127,7 @@ describe("Client", () => { describe("env functions", () => { it("should return the env variables correctly", async () => { + console.log(process.env) // eslint-disable-next-line no-process-env process.env.LANGCHAIN_REVISION_ID = "test_revision_id"; // eslint-disable-next-line no-process-env From ef6cca4cbbdb1383b08c56145866df5ec2418069 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:02:31 +0200 Subject: [PATCH 006/373] Remove console.log --- js/src/tests/client.test.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index 31ac1f724..245c9487e 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -127,7 +127,6 @@ describe("Client", () => { describe("env functions", () => { it("should return the env variables correctly", async () => { - console.log(process.env) // eslint-disable-next-line no-process-env process.env.LANGCHAIN_REVISION_ID = "test_revision_id"; // eslint-disable-next-line no-process-env From ce85b6d8a855a3d5808b17b5d3aa5ec8eaf54f88 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:40:13 +0200 Subject: [PATCH 007/373] Flaky test for batch client, add reason for waitUntil timeout --- js/src/tests/batch_client.int.test.ts | 42 +++++++++++++++++++++------ js/src/tests/utils.ts | 34 +++++++++++++++++++--- 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/js/src/tests/batch_client.int.test.ts b/js/src/tests/batch_client.int.test.ts index 4715e80a8..4705fae3c 100644 --- a/js/src/tests/batch_client.int.test.ts +++ b/js/src/tests/batch_client.int.test.ts @@ -1,7 +1,11 @@ import { Client } from "../client.js"; import { RunTree, convertToDottedOrderFormat } from "../run_trees.js"; import { v4 as uuidv4 } from "uuid"; -import { deleteProject, waitUntilRunFound } from "./utils.js"; +import { + deleteProject, + waitUntilProjectFound, + waitUntilRunFound, +} from "./utils.js"; test.concurrent( "Test persist update run", @@ -11,7 +15,8 @@ test.concurrent( callerOptions: { maxRetries: 2 }, timeout_ms: 30_000, }); - const projectName = "__test_persist_update_run_batch_1"; + const projectName = + "__test_persist_update_run_batch_1" + uuidv4().substring(0, 4); await deleteProject(langchainClient, projectName); const runId = uuidv4(); @@ -34,7 +39,12 @@ test.concurrent( dotted_order: dottedOrder, trace_id: runId, }); - await waitUntilRunFound(langchainClient, runId, true); + + await Promise.all([ + waitUntilRunFound(langchainClient, runId, true), + waitUntilProjectFound(langchainClient, projectName), + ]); + const storedRun = await langchainClient.readRun(runId); expect(storedRun.id).toEqual(runId); await langchainClient.deleteProject({ projectName }); @@ -51,7 +61,9 @@ test.concurrent( pendingAutoBatchedRunLimit: 2, timeout_ms: 30_000, }); - const projectName = "__test_persist_update_run_batch_above_bs_limit"; + const projectName = + "__test_persist_update_run_batch_above_bs_limit" + + uuidv4().substring(0, 4); await deleteProject(langchainClient, projectName); const createRun = async () => { @@ -76,7 +88,11 @@ test.concurrent( trace_id: runId, end_time: Math.floor(new Date().getTime() / 1000), }); - await waitUntilRunFound(langchainClient, runId, true); + await Promise.all([ + waitUntilRunFound(langchainClient, runId, true), + waitUntilProjectFound(langchainClient, projectName), + ]); + const storedRun = await langchainClient.readRun(runId); expect(storedRun.id).toEqual(runId); }; @@ -96,7 +112,8 @@ test.concurrent( callerOptions: { maxRetries: 2 }, timeout_ms: 30_000, }); - const projectName = "__test_persist_update_run_batch_with_delay"; + const projectName = + "__test_persist_update_run_batch_with_delay" + uuidv4().substring(0, 4); await deleteProject(langchainClient, projectName); const runId = uuidv4(); @@ -121,7 +138,10 @@ test.concurrent( trace_id: runId, end_time: Math.floor(new Date().getTime() / 1000), }); - await waitUntilRunFound(langchainClient, runId, true); + await Promise.all([ + waitUntilRunFound(langchainClient, runId, true), + waitUntilProjectFound(langchainClient, projectName), + ]); const storedRun = await langchainClient.readRun(runId); expect(storedRun.id).toEqual(runId); await langchainClient.deleteProject({ projectName }); @@ -137,7 +157,8 @@ test.concurrent( callerOptions: { maxRetries: 2 }, timeout_ms: 30_000, }); - const projectName = "__test_persist_update_run_tree"; + const projectName = + "__test_persist_update_run_tree" + uuidv4().substring(0, 4); await deleteProject(langchainClient, projectName); const runId = uuidv4(); const runTree = new RunTree({ @@ -150,7 +171,10 @@ test.concurrent( await runTree.postRun(); await runTree.end({ output: "foo2" }); await runTree.patchRun(); - await waitUntilRunFound(langchainClient, runId, true); + await Promise.all([ + waitUntilRunFound(langchainClient, runId, true), + waitUntilProjectFound(langchainClient, projectName), + ]); const storedRun = await langchainClient.readRun(runId); expect(storedRun.id).toEqual(runId); expect(storedRun.dotted_order).toEqual(runTree.dotted_order); diff --git a/js/src/tests/utils.ts b/js/src/tests/utils.ts index a7e4d3197..cd3d15ee6 100644 --- a/js/src/tests/utils.ts +++ b/js/src/tests/utils.ts @@ -11,7 +11,8 @@ export async function toArray(iterable: AsyncIterable): Promise { export async function waitUntil( condition: () => Promise, timeout: number, - interval: number + interval: number, + prefix?: string ): Promise { const start = Date.now(); while (Date.now() - start < timeout) { @@ -25,7 +26,9 @@ export async function waitUntil( await new Promise((resolve) => setTimeout(resolve, interval)); } const elapsed = Date.now() - start; - throw new Error(`Timeout after ${elapsed / 1000}s`); + throw new Error( + [prefix, `Timeout after ${elapsed / 1000}s`].filter(Boolean).join(": ") + ); } export async function pollRunsUntilCount( @@ -74,7 +77,10 @@ export async function deleteDataset( export async function waitUntilRunFound( client: Client, runId: string, - checkOutputs = false + checkOutputs = false, + options?: { + prefix?: string; + } ) { return waitUntil( async () => { @@ -93,7 +99,27 @@ export async function waitUntilRunFound( } }, 30_000, - 5_000 + 5_000, + `Waiting for run "${runId}"` + ); +} + +export async function waitUntilProjectFound( + client: Client, + projectName: string +) { + return waitUntil( + async () => { + try { + await client.readProject({ projectName }); + return true; + } catch (e) { + return false; + } + }, + 10_000, + 5_000, + `Waiting for project "${projectName}"` ); } From f7c20b927e9e909f1fec0a81f101e63cefef6339 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:42:22 +0200 Subject: [PATCH 008/373] Fix lint --- js/src/tests/utils.ts | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/js/src/tests/utils.ts b/js/src/tests/utils.ts index cd3d15ee6..c9f30fe78 100644 --- a/js/src/tests/utils.ts +++ b/js/src/tests/utils.ts @@ -77,10 +77,7 @@ export async function deleteDataset( export async function waitUntilRunFound( client: Client, runId: string, - checkOutputs = false, - options?: { - prefix?: string; - } + checkOutputs = false ) { return waitUntil( async () => { From bfe77f1963f7811da4c2ab91da4f7a68faa888a0 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:45:23 +0200 Subject: [PATCH 009/373] Dont remove the dataset just yet --- js/src/tests/evaluate_comparative.int.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/tests/evaluate_comparative.int.test.ts b/js/src/tests/evaluate_comparative.int.test.ts index 99058e77f..9d68bd790 100644 --- a/js/src/tests/evaluate_comparative.int.test.ts +++ b/js/src/tests/evaluate_comparative.int.test.ts @@ -22,7 +22,7 @@ beforeAll(async () => { afterAll(async () => { const client = new Client(); - await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); + // await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); }); describe("evaluate comparative", () => { From beb2d719078e2ff73ee00c8a2d606ef81ec77a4d Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:47:33 +0200 Subject: [PATCH 010/373] Fix lint --- js/src/tests/evaluate_comparative.int.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/tests/evaluate_comparative.int.test.ts b/js/src/tests/evaluate_comparative.int.test.ts index 9d68bd790..46637504a 100644 --- a/js/src/tests/evaluate_comparative.int.test.ts +++ b/js/src/tests/evaluate_comparative.int.test.ts @@ -21,7 +21,7 @@ beforeAll(async () => { }); afterAll(async () => { - const client = new Client(); + // const client = new Client(); // await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); }); From 2c887f7dcee62092484bd8a05576ea07b65b9027 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:52:23 +0200 Subject: [PATCH 011/373] Add console.log? --- js/src/evaluation/evaluate_comparative.ts | 18 ++++++++---------- js/src/tests/evaluate_comparative.int.test.ts | 9 +++++++-- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/js/src/evaluation/evaluate_comparative.ts b/js/src/evaluation/evaluate_comparative.ts index dce1132cc..2dc31379a 100644 --- a/js/src/evaluation/evaluate_comparative.ts +++ b/js/src/evaluation/evaluate_comparative.ts @@ -152,7 +152,7 @@ export async function evaluateComparative( const datasetVersion = projects.at(0)?.extra?.metadata?.dataset_version; const id = uuid4(); - const name = (() => { + const experimentName = (() => { if (!options.experimentPrefix) { const names = projects .map((p) => p.name) @@ -165,11 +165,11 @@ export async function evaluateComparative( })(); // TODO: add URL to the comparative experiment - console.log(`Starting pairwise evaluation of: ${name}`); + console.log(`Starting pairwise evaluation of: ${experimentName}`); const comparativeExperiment = await client.createComparativeExperiment({ id, - name, + name: experimentName, experimentIds: projects.map((p) => p.id), description: options.description, metadata: options.metadata, @@ -292,8 +292,8 @@ export async function evaluateComparative( return result; } - const results: ComparisonEvaluationResultRow[] = await Promise.all( - Object.entries(runMapByExampleId).flatMap(([exampleId, runs]) => { + const promises = Object.entries(runMapByExampleId).flatMap( + ([exampleId, runs]) => { const example = exampleMap[exampleId]; if (!example) throw new Error(`Example ${exampleId} not found.`); @@ -305,11 +305,9 @@ export async function evaluateComparative( evaluator ) ); - }) + } ); - return { - experimentName: name, - results, - }; + const results: ComparisonEvaluationResultRow[] = await Promise.all(promises); + return { experimentName, results }; } diff --git a/js/src/tests/evaluate_comparative.int.test.ts b/js/src/tests/evaluate_comparative.int.test.ts index 46637504a..4478c4741 100644 --- a/js/src/tests/evaluate_comparative.int.test.ts +++ b/js/src/tests/evaluate_comparative.int.test.ts @@ -21,8 +21,9 @@ beforeAll(async () => { }); afterAll(async () => { - // const client = new Client(); - // await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); + console.log("Deleting dataset") + const client = new Client(); + await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); }); describe("evaluate comparative", () => { @@ -37,6 +38,8 @@ describe("evaluate comparative", () => { { data: TESTING_DATASET_NAME } ); + console.log("Pairwise starting") + const pairwise = await evaluateComparative( [firstEval.experimentName, secondEval.experimentName], { @@ -49,6 +52,8 @@ describe("evaluate comparative", () => { } ); + console.log("Pairwise completed") + // TODO: we should a) wait for runs to be persisted, b) allow passing runnables / traceables directly expect(pairwise.results.length).toBeGreaterThanOrEqual(1); }); From 353652653bd3d6eb67f51cc1da1fcd50622d2a43 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:55:28 +0200 Subject: [PATCH 012/373] remove client again --- js/src/tests/evaluate_comparative.int.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/js/src/tests/evaluate_comparative.int.test.ts b/js/src/tests/evaluate_comparative.int.test.ts index 4478c4741..33e31c851 100644 --- a/js/src/tests/evaluate_comparative.int.test.ts +++ b/js/src/tests/evaluate_comparative.int.test.ts @@ -22,8 +22,8 @@ beforeAll(async () => { afterAll(async () => { console.log("Deleting dataset") - const client = new Client(); - await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); + // const client = new Client(); + // await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); }); describe("evaluate comparative", () => { From 2428ba99f51254e5c60cf9cb70c19fe4bcb593d3 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 16:59:35 +0200 Subject: [PATCH 013/373] See experiment runs --- js/src/evaluation/evaluate_comparative.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/js/src/evaluation/evaluate_comparative.ts b/js/src/evaluation/evaluate_comparative.ts index 2dc31379a..423605eea 100644 --- a/js/src/evaluation/evaluate_comparative.ts +++ b/js/src/evaluation/evaluate_comparative.ts @@ -211,6 +211,8 @@ export async function evaluateComparative( ) ); + console.dir(experimentRuns, { depth: null }); + let exampleIdsIntersect: Set | undefined; for (const runs of experimentRuns) { const exampleIdsSet = new Set( From 017137ee5f5e11b456dcf1de297c3b5104ed6559 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 17:35:24 +0200 Subject: [PATCH 014/373] feat(pairwise): allow passing evaluator directly to wait for evaluation to complete --- js/demo/pairwise.mts | 41 +++++++++++++ js/src/evaluation/evaluate_comparative.ts | 58 +++++++++++++++---- js/src/tests/evaluate_comparative.int.test.ts | 31 ++++++++-- 3 files changed, 115 insertions(+), 15 deletions(-) create mode 100644 js/demo/pairwise.mts diff --git a/js/demo/pairwise.mts b/js/demo/pairwise.mts new file mode 100644 index 000000000..35d79b7c5 --- /dev/null +++ b/js/demo/pairwise.mts @@ -0,0 +1,41 @@ +import { evaluateComparative, evaluate } from "../evaluation"; +import { Client } from "../index"; + +const TESTING_DATASET_NAME = "test_evaluate_comparative_js"; + +const client = new Client(); + +if (!(await client.hasDataset({ datasetName: TESTING_DATASET_NAME }))) { + await client.createDataset(TESTING_DATASET_NAME, { + description: "For testing pruposes", + }); + + await client.createExamples({ + inputs: [{ input: 1 }, { input: 2 }], + outputs: [{ output: 2 }, { output: 3 }], + datasetName: TESTING_DATASET_NAME, + }); +} + +const firstEval = await evaluate((input) => ({ foo: `first:${input.input}` }), { + data: TESTING_DATASET_NAME, +}); + +const secondEval = await evaluate( + (input) => ({ foo: `second:${input.input}` }), + { data: TESTING_DATASET_NAME } +); + +const pairwise = await evaluateComparative( + [firstEval.experimentName, secondEval.experimentName], + { + evaluators: [ + (runs) => ({ + key: "latter_precedence", + scores: Object.fromEntries(runs.map((run, i) => [run.id, i % 2])), + }), + ], + } +); + +console.dir(pairwise, { depth: null }); diff --git a/js/src/evaluation/evaluate_comparative.ts b/js/src/evaluation/evaluate_comparative.ts index 423605eea..018d3de57 100644 --- a/js/src/evaluation/evaluate_comparative.ts +++ b/js/src/evaluation/evaluate_comparative.ts @@ -7,12 +7,26 @@ import { } from "../schemas.js"; import { shuffle } from "../utils/shuffle.js"; import { AsyncCaller } from "../utils/async_caller.js"; +import { type evaluate } from "./index.js"; +import pRetry from "p-retry"; + +type ExperimentResults = Awaited>; + +function isExperimentResultsList( + value: ExperimentResults[] | string[] +): value is ExperimentResults[] { + return value.some((x) => typeof x !== "string"); +} + +async function loadExperiment( + client: Client, + experiment: string | ExperimentResults +) { + const value = + typeof experiment === "string" ? experiment : experiment.experimentName; -function loadExperiment(client: Client, experiment: string) { return client.readProject( - validate(experiment) - ? { projectId: experiment } - : { projectName: experiment } + validate(value) ? { projectId: value } : { projectName: value } ); } @@ -107,7 +121,9 @@ export interface ComparisonEvaluationResults { } export async function evaluateComparative( - experiments: Array, + experiments: + | Array + | Array | ExperimentResults>, options: EvaluateComparativeOptions ): Promise { if (experiments.length < 2) { @@ -125,10 +141,34 @@ export async function evaluateComparative( } const client = options.client ?? new Client(); + const resolvedExperiments = await Promise.all(experiments); - const projects = await Promise.all( - experiments.map((experiment) => loadExperiment(client, experiment)) - ); + const projects = await (() => { + if (!isExperimentResultsList(resolvedExperiments)) { + return Promise.all( + resolvedExperiments.map((experiment) => + loadExperiment(client, experiment) + ) + ); + } + + // if we know the number of runs beforehand, check if the + // number of runs in the project matches the expected number of runs + return Promise.all( + resolvedExperiments.map((experiment) => + pRetry( + async () => { + const project = await loadExperiment(client, experiment); + if (project.run_count !== experiment?.results.length) { + throw new Error("Experiment is missing runs. Retrying."); + } + return project; + }, + { factor: 2, minTimeout: 1000, retries: 10 } + ) + ) + ); + })(); if (new Set(projects.map((p) => p.reference_dataset_id)).size > 1) { throw new Error("All experiments must have the same reference dataset."); @@ -211,8 +251,6 @@ export async function evaluateComparative( ) ); - console.dir(experimentRuns, { depth: null }); - let exampleIdsIntersect: Set | undefined; for (const runs of experimentRuns) { const exampleIdsSet = new Set( diff --git a/js/src/tests/evaluate_comparative.int.test.ts b/js/src/tests/evaluate_comparative.int.test.ts index 41afd3dff..5b18884bb 100644 --- a/js/src/tests/evaluate_comparative.int.test.ts +++ b/js/src/tests/evaluate_comparative.int.test.ts @@ -22,9 +22,8 @@ beforeAll(async () => { }); afterAll(async () => { - console.log("Deleting dataset"); - // const client = new Client(); - // await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); + const client = new Client(); + await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); }); describe("evaluate comparative", () => { @@ -59,7 +58,29 @@ describe("evaluate comparative", () => { } ); - // TODO: we should a) wait for runs to be persisted, b) allow passing runnables / traceables directly - expect(pairwise.results.length).toBeGreaterThanOrEqual(1); + expect(pairwise.results.length).toEqual(2); + }); + + test("pass directly", async () => { + const pairwise = await evaluateComparative( + [ + evaluate((input) => ({ foo: `first:${input.input}` }), { + data: TESTING_DATASET_NAME, + }), + evaluate((input) => ({ foo: `second:${input.input}` }), { + data: TESTING_DATASET_NAME, + }), + ], + { + evaluators: [ + (runs) => ({ + key: "latter_precedence", + scores: Object.fromEntries(runs.map((run, i) => [run.id, i % 2])), + }), + ], + } + ); + + expect(pairwise.results.length).toEqual(2); }); }); From 4718dab1c8d91f116bdd67837cde41f464336e46 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 17:05:20 +0200 Subject: [PATCH 015/373] Wait for runs to be stored in LangSmith --- js/src/tests/evaluate_comparative.int.test.ts | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/js/src/tests/evaluate_comparative.int.test.ts b/js/src/tests/evaluate_comparative.int.test.ts index 33e31c851..41afd3dff 100644 --- a/js/src/tests/evaluate_comparative.int.test.ts +++ b/js/src/tests/evaluate_comparative.int.test.ts @@ -1,6 +1,7 @@ import { evaluate } from "../evaluation/_runner.js"; import { evaluateComparative } from "../evaluation/evaluate_comparative.js"; import { Client } from "../index.js"; +import { waitUntilRunFound } from "./utils.js"; const TESTING_DATASET_NAME = "test_evaluate_comparative_js"; @@ -21,13 +22,15 @@ beforeAll(async () => { }); afterAll(async () => { - console.log("Deleting dataset") + console.log("Deleting dataset"); // const client = new Client(); // await client.deleteDataset({ datasetName: TESTING_DATASET_NAME }); }); describe("evaluate comparative", () => { test("basic", async () => { + const client = new Client(); + const firstEval = await evaluate( (input) => ({ foo: `first:${input.input}` }), { data: TESTING_DATASET_NAME } @@ -38,7 +41,11 @@ describe("evaluate comparative", () => { { data: TESTING_DATASET_NAME } ); - console.log("Pairwise starting") + await Promise.all( + [firstEval, secondEval].flatMap(({ results }) => + results.flatMap(({ run }) => waitUntilRunFound(client, run.id)) + ) + ); const pairwise = await evaluateComparative( [firstEval.experimentName, secondEval.experimentName], @@ -52,8 +59,6 @@ describe("evaluate comparative", () => { } ); - console.log("Pairwise completed") - // TODO: we should a) wait for runs to be persisted, b) allow passing runnables / traceables directly expect(pairwise.results.length).toBeGreaterThanOrEqual(1); }); From f4ea490f0364b776d2998664dd1cf91208fa8dd9 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 18:08:53 +0200 Subject: [PATCH 016/373] Remove file --- js/demo/pairwise.mts | 41 ----------------------------------------- 1 file changed, 41 deletions(-) delete mode 100644 js/demo/pairwise.mts diff --git a/js/demo/pairwise.mts b/js/demo/pairwise.mts deleted file mode 100644 index 35d79b7c5..000000000 --- a/js/demo/pairwise.mts +++ /dev/null @@ -1,41 +0,0 @@ -import { evaluateComparative, evaluate } from "../evaluation"; -import { Client } from "../index"; - -const TESTING_DATASET_NAME = "test_evaluate_comparative_js"; - -const client = new Client(); - -if (!(await client.hasDataset({ datasetName: TESTING_DATASET_NAME }))) { - await client.createDataset(TESTING_DATASET_NAME, { - description: "For testing pruposes", - }); - - await client.createExamples({ - inputs: [{ input: 1 }, { input: 2 }], - outputs: [{ output: 2 }, { output: 3 }], - datasetName: TESTING_DATASET_NAME, - }); -} - -const firstEval = await evaluate((input) => ({ foo: `first:${input.input}` }), { - data: TESTING_DATASET_NAME, -}); - -const secondEval = await evaluate( - (input) => ({ foo: `second:${input.input}` }), - { data: TESTING_DATASET_NAME } -); - -const pairwise = await evaluateComparative( - [firstEval.experimentName, secondEval.experimentName], - { - evaluators: [ - (runs) => ({ - key: "latter_precedence", - scores: Object.fromEntries(runs.map((run, i) => [run.id, i % 2])), - }), - ], - } -); - -console.dir(pairwise, { depth: null }); From a48ed1933df1369a2b3cdaacb0a970e86bdbdf5d Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 18:07:39 +0200 Subject: [PATCH 017/373] Trace evaluators --- js/src/evaluation/evaluate_comparative.ts | 30 +++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/js/src/evaluation/evaluate_comparative.ts b/js/src/evaluation/evaluate_comparative.ts index 018d3de57..5a67ee9f5 100644 --- a/js/src/evaluation/evaluate_comparative.ts +++ b/js/src/evaluation/evaluate_comparative.ts @@ -7,8 +7,9 @@ import { } from "../schemas.js"; import { shuffle } from "../utils/shuffle.js"; import { AsyncCaller } from "../utils/async_caller.js"; -import { type evaluate } from "./index.js"; +import { evaluate } from "./index.js"; import pRetry from "p-retry"; +import { getCurrentRunTree, traceable } from "../traceable.js"; type ExperimentResults = Awaited>; @@ -332,12 +333,37 @@ export async function evaluateComparative( return result; } + const tracedEvaluators = options.evaluators.map((evaluator) => + traceable( + async ( + runs: Run[], + example: Example + ): Promise => { + const evaluatorRun = getCurrentRunTree(); + const result = await evaluator(runs, example); + + // sanitise the payload before sending to LangSmith + evaluatorRun.inputs = { runs: runs, example: example }; + evaluatorRun.outputs = result; + + return { + ...result, + source_run_id: result.source_run_id ?? evaluatorRun.id, + }; + }, + { + project_name: "evaluators", + name: evaluator.name || "evaluator", + } + ) + ); + const promises = Object.entries(runMapByExampleId).flatMap( ([exampleId, runs]) => { const example = exampleMap[exampleId]; if (!example) throw new Error(`Example ${exampleId} not found.`); - return options.evaluators.map((evaluator) => + return tracedEvaluators.map((evaluator) => caller.call( evaluateAndSubmitFeedback, runs, From 6ef79f382e7ce34493fc68e8ffab25e91577c7a4 Mon Sep 17 00:00:00 2001 From: infra Date: Sat, 11 May 2024 12:58:02 -0400 Subject: [PATCH 018/373] feat: bump docker compose to 0.3.5 --- python/langsmith/cli/.env.example | 11 ++- python/langsmith/cli/docker-compose.yaml | 91 +++++++++++++++++------- python/langsmith/cli/main.py | 14 ++-- 3 files changed, 80 insertions(+), 36 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index 7e32c3802..f719a7470 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,5 +1,5 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.2.17 +_LANGSMITH_IMAGE_VERSION=0.3.5 LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key OPENAI_API_KEY=your-openai-api-key # Needed for Online Evals and Magic Query features AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 @@ -10,4 +10,11 @@ POSTGRES_DATABASE_URI=postgres:postgres@langchain-db:5432/postgres # Change to y REDIS_DATABASE_URI=redis://langchain-redis:6379 # Change to your Redis URI if using external Redis. Otherwise, leave it as is LOG_LEVEL=warning # Change to your desired log level MAX_ASYNC_JOBS_PER_WORKER=10 # Change to your desired maximum async jobs per worker. We recommend 10/suggest spinning up more replicas of the queue worker if you need more throughput -ASYNCPG_POOL_MAX_SIZE=${ASYNCPG_POOL_MAX_SIZE:-3} # Change the PG pool size based off your pg instance/requirements. +ASYNCPG_POOL_MAX_SIZE=3 # Change the PG pool size based off your pg instance/requirements. +CLICKHOUSE_HOST=langchain-clickhouse +CLICKHOUSE_USER=default +CLICKHOUSE_DB=default +CLICKHOUSE_PORT=8123 +CLICKHOUSE_TLS=false +CLICKHOUSE_PASSWORD=password# Change to your Clickhouse password if needed +CLICKHOUSE_NATIVE_PORT=9000 diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index e83631175..5f433f782 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,11 +1,11 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.3.4} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} @@ -16,10 +16,11 @@ services: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker + - GO_ENDPOINT=http://langchain-platform-backend:1986 - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} - LOG_LEVEL=${LOG_LEVEL:-warning} @@ -29,9 +30,12 @@ services: - API_KEY_SALT=${API_KEY_SALT} - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} - - INGESTION_QUEUE=default - - ADHOC_QUEUE=default - - RUN_RULES_QUEUE=default + - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} ports: - 1984:1984 depends_on: @@ -44,10 +48,35 @@ services: postgres-setup: condition: service_completed_successfully restart: always + langchain-platform-backend: + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} + environment: + - PORT=1986 + - LANGCHAIN_ENV=local_docker + - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - LOG_LEVEL=${LOG_LEVEL:-warning} + - AUTH_TYPE=${AUTH_TYPE:-none} + - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} + - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} + - API_KEY_SALT=${API_KEY_SALT} + - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} + - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} + ports: + - 1986:1986 + depends_on: + langchain-db: + condition: service_healthy + langchain-redis: + condition: service_healthy + clickhouse-setup: + condition: service_completed_successfully + postgres-setup: + condition: service_completed_successfully + restart: always langchain-queue: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.} environment: - - PORT=1984 - LANGCHAIN_ENV=local_docker - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} @@ -60,9 +89,12 @@ services: - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} - MAX_ASYNC_JOBS_PER_WORKER=${MAX_ASYNC_JOBS_PER_WORKER:-10} - ASYNCPG_POOL_MAX_SIZE=${ASYNCPG_POOL_MAX_SIZE:-3} - - INGESTION_QUEUE=default - - ADHOC_QUEUE=default - - RUN_RULES_QUEUE=default + - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} command: - "saq" - "app.workers.queues.single_queue_worker.settings" @@ -116,9 +148,9 @@ services: user: "101:101" restart: always environment: - - CLICKHOUSE_DB=default - - CLICKHOUSE_USER=default - - CLICKHOUSE_PASSWORD=password + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} volumes: - langchain-clickhouse-data:/var/lib/clickhouse - ./users.xml:/etc/clickhouse-server/users.d/users.xml @@ -131,30 +163,26 @@ services: timeout: 2s retries: 30 clickhouse-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} depends_on: langchain-clickhouse: condition: service_healthy restart: "on-failure:10" environment: - - PORT=1984 - - LANGCHAIN_ENV=local_docker - - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - - OPENAI_API_KEY=${OPENAI_API_KEY} - - LOG_LEVEL=${LOG_LEVEL:-warning} - - AUTH_TYPE=${AUTH_TYPE:-none} - - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} - - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} + - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} entrypoint: [ "bash", "-c", - "migrate -source file://clickhouse/migrations -database 'clickhouse://langchain-clickhouse:9000?username=default&password=password&database=default&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", + "migrate -source file://clickhouse/migrations -database 'clickhouse://${CLICKHOUSE_HOST}:${CLICKHOUSE_NATIVE_PORT}?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}&database=${CLICKHOUSE_DB}&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", ] postgres-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.17} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} depends_on: langchain-db: condition: service_healthy @@ -163,11 +191,20 @@ services: - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} - LOG_LEVEL=${LOG_LEVEL:-warning} + - AUTH_TYPE=${AUTH_TYPE:-none} - OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} - API_KEY_SALT=${API_KEY_SALT} - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} + - MAX_ASYNC_JOBS_PER_WORKER=${MAX_ASYNC_JOBS_PER_WORKER:-10} + - ASYNCPG_POOL_MAX_SIZE=${ASYNCPG_POOL_MAX_SIZE:-3} + - CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse} + - CLICKHOUSE_USER=${CLICKHOUSE_USER:-default} + - CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password} + - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} + - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} + - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} restart: "on-failure:10" entrypoint: [ diff --git a/python/langsmith/cli/main.py b/python/langsmith/cli/main.py index 15bc3fc02..52cee0c83 100644 --- a/python/langsmith/cli/main.py +++ b/python/langsmith/cli/main.py @@ -101,12 +101,12 @@ def _start_local(self) -> None: def pull( self, *, - version: str = "0.2.17", + version: str = "0.3.5", ) -> None: """Pull the latest LangSmith images. Args: - version: The LangSmith version to use for LangSmith. Defaults to 0.2.17 + version: The LangSmith version to use for LangSmith. Defaults to 0.3.5 """ os.environ["_LANGSMITH_IMAGE_VERSION"] = version subprocess.run( @@ -123,7 +123,7 @@ def start( *, openai_api_key: Optional[str] = None, langsmith_license_key: str, - version: str = "0.2.17", + version: str = "0.3.5", ) -> None: """Run the LangSmith server locally. @@ -251,8 +251,8 @@ def main() -> None: ) server_start_parser.add_argument( "--version", - default="0.2.17", - help="The LangSmith version to use for LangSmith. Defaults to 0.2.17.", + default="0.3.5", + help="The LangSmith version to use for LangSmith. Defaults to 0.3.5.", ) server_start_parser.set_defaults( func=lambda args: server_command.start( @@ -279,8 +279,8 @@ def main() -> None: ) server_pull_parser.add_argument( "--version", - default="0.2.17", - help="The LangSmith version to use for LangSmith. Defaults to 0.2.17.", + default="0.3.5", + help="The LangSmith version to use for LangSmith. Defaults to 0.3.5.", ) server_pull_parser.set_defaults( func=lambda args: server_command.pull(version=args.version) From 7c14918c24ac1bb932a47c4a743d2a4eb9d3e3e7 Mon Sep 17 00:00:00 2001 From: infra Date: Sat, 11 May 2024 12:59:24 -0400 Subject: [PATCH 019/373] feat: bump docker compose to 0.3.5 --- python/langsmith/cli/docker-compose.yaml | 14 +++++++------- python/pyproject.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 5f433f782..b2435167d 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,11 +1,11 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.3.4} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.3.5} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} @@ -16,7 +16,7 @@ services: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker @@ -49,7 +49,7 @@ services: condition: service_completed_successfully restart: always langchain-platform-backend: - image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} environment: - PORT=1986 - LANGCHAIN_ENV=local_docker @@ -75,7 +75,7 @@ services: condition: service_completed_successfully restart: always langchain-queue: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.2.} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} environment: - LANGCHAIN_ENV=local_docker - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} @@ -163,7 +163,7 @@ services: timeout: 2s retries: 30 clickhouse-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} depends_on: langchain-clickhouse: condition: service_healthy @@ -182,7 +182,7 @@ services: "migrate -source file://clickhouse/migrations -database 'clickhouse://${CLICKHOUSE_HOST}:${CLICKHOUSE_NATIVE_PORT}?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}&database=${CLICKHOUSE_DB}&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", ] postgres-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.4} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} depends_on: langchain-db: condition: service_healthy diff --git a/python/pyproject.toml b/python/pyproject.toml index beb690473..71b3ed0f8 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.56" +version = "0.1.57" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 6a6b03502cce51d1e229e9912d87338ba311ab18 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Mon, 13 May 2024 09:19:02 -0700 Subject: [PATCH 020/373] Make traceable not throw error in deno by using node: imports --- js/src/traceable.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/traceable.ts b/js/src/traceable.ts index cfec78c76..11f2c64e5 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -1,4 +1,4 @@ -import { AsyncLocalStorage } from "async_hooks"; +import { AsyncLocalStorage } from "node:async_hooks"; import { RunTree, From 5ee33b48247f05d7e0c1b1d6da996062e2bb22ab Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 13 May 2024 04:36:22 +0200 Subject: [PATCH 021/373] chore(js): 0.1.24-rc.0 --- js/package.json | 2 +- js/src/index.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/js/package.json b/js/package.json index 09191b908..bbdfb6551 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.23", + "version": "0.1.24-rc.0", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/index.ts b/js/src/index.ts index e1f22e0e3..318784b0f 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -11,4 +11,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.23"; +export const __version__ = "0.1.24-rc.0"; From 9cbbe1c02622c0cd37ccdfd5bd2856b21e66972b Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 13 May 2024 23:22:51 +0200 Subject: [PATCH 022/373] Release as 0.1.24 --- js/package.json | 2 +- js/src/index.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/js/package.json b/js/package.json index bbdfb6551..54c216f15 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.24-rc.0", + "version": "0.1.24", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/index.ts b/js/src/index.ts index 318784b0f..3f1da1513 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -11,4 +11,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.24-rc.0"; +export const __version__ = "0.1.24"; From 943bdfd020274e7bd2b804b0bdd84f944e14c6c2 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 01:02:55 +0200 Subject: [PATCH 023/373] fix(js): evaluator runs traced in experiment project than in evaluators project --- js/src/evaluation/_runner.ts | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 9bc850c5e..c2cc7408d 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -510,7 +510,6 @@ class _ExperimentManager { evaluators: Array, currentResults: ExperimentResultRow, fields: { - experimentName: string; client: Client; } ): Promise { @@ -519,7 +518,7 @@ class _ExperimentManager { try { const options = { reference_example_id: example.id, - project_name: fields.experimentName, + project_name: "evaluators", metadata: { example_version: example.modified_at ? new Date(example.modified_at).toISOString() @@ -567,7 +566,6 @@ class _ExperimentManager { if (maxConcurrency === 0) { for await (const currentResults of this.getResults()) { yield this._runEvaluators(evaluators, currentResults, { - experimentName: this.experimentName, client: this.client, }); } @@ -579,7 +577,6 @@ class _ExperimentManager { for await (const currentResults of this.getResults()) { futures.push( caller.call(this._runEvaluators, evaluators, currentResults, { - experimentName: this.experimentName, client: this.client, }) ); From b462ea33b3dd6776eb90dd99d4808800fc91512c Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 01:14:25 +0200 Subject: [PATCH 024/373] fix(js): make sure evaluator run ID is propagated properly --- js/src/evaluation/evaluator.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/js/src/evaluation/evaluator.ts b/js/src/evaluation/evaluator.ts index 4e07fa9dc..a9b6e096b 100644 --- a/js/src/evaluation/evaluator.ts +++ b/js/src/evaluation/evaluator.ts @@ -177,7 +177,12 @@ export class DynamicRunEvaluator any> const wrappedTraceableFunc: TraceableFunction = traceable( this.func, - { project_name: "evaluators", name: "evaluator", ...options } + { + project_name: "evaluators", + name: "evaluator", + id: sourceRunId, + ...options, + } ); const result = (await wrappedTraceableFunc( From 500414b488a907bc17a6c90a5979175a56bdc184 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 02:17:19 +0200 Subject: [PATCH 025/373] fix(js): e2e tests failing for limit --- js/src/tests/client.int.test.ts | 37 ++++++++++++++++++++------------- js/src/tests/utils.ts | 2 +- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index a69218736..bc357c4ab 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -568,16 +568,25 @@ test.concurrent( test.concurrent("list runs limit arg works", async () => { const client = new Client(); - const projectName = "test-limit-runs-listRuns-endpoint"; - const runsArr: Array = []; + const projectName = `test-limit-runs-${uuidv4().substring(0, 4)}`; const limit = 6; + // delete the project just in case + if (await client.hasProject({ projectName })) { + await client.deleteProject({ projectName }); + } + try { + const runsArr: Array = []; // create a fresh project with 10 runs --default amount created by createRunsFactory - await client.createProject({ - projectName, - }); - await Promise.all(createRunsFactory(projectName).map(client.createRun)); + await client.createProject({ projectName }); + await Promise.all( + createRunsFactory(projectName).map(async (payload) => { + if (!payload.id) payload.id = uuidv4(); + await client.createRun(payload); + await waitUntilRunFound(client, payload.id); + }) + ); let iters = 0; for await (const run of client.listRuns({ limit, projectName })) { @@ -590,20 +599,18 @@ test.concurrent("list runs limit arg works", async () => { ); } } - } catch (e: any) { - // cleanup by deleting the project - const projectExists = await client.hasProject({ projectName }); - if (projectExists) { - await client.deleteProject({ projectName }); - } - // Error thrown by test, rethrow + expect(runsArr.length).toBe(limit); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } catch (e: any) { if (e.message.startsWith("More runs returned than expected.")) { throw e; } else { console.error(e); } + } finally { + if (await client.hasProject({ projectName })) { + await client.deleteProject({ projectName }); + } } - - expect(runsArr.length).toBe(limit); }); diff --git a/js/src/tests/utils.ts b/js/src/tests/utils.ts index 1e01279ee..faf458542 100644 --- a/js/src/tests/utils.ts +++ b/js/src/tests/utils.ts @@ -152,7 +152,7 @@ export function createRunsFactory( return Array.from({ length: count }).map((_, idx) => ({ id: uuidv4(), name: `${idx}-${faker.lorem.words()}`, - run_type: faker.helpers.arrayElement(["tool", "chain", "llm", "runnable"]), + run_type: faker.helpers.arrayElement(["tool", "chain", "llm", "retriever"]), inputs: { question: faker.lorem.sentence(), }, From 1f76fa2314d83aa2d30f45eccc5ca1adbc0e923f Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 02:53:08 +0200 Subject: [PATCH 026/373] chore[js]: bump JS to 0.1.25 --- js/package.json | 4 ++-- js/src/index.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index 129f40588..aaf31120c 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.24", + "version": "0.1.25", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -197,4 +197,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/index.ts b/js/src/index.ts index 3f1da1513..1c0711e24 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -11,4 +11,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.24"; +export const __version__ = "0.1.25"; From ce38ed68a78de5d93c3171e300f4214c5c67461d Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 15:50:31 +0200 Subject: [PATCH 027/373] fix[js]: print stack trace when error occur in evaluate --- js/package.json | 2 +- js/src/evaluation/_runner.ts | 4 ++++ js/src/utils/error.ts | 23 +++++++++++++++++++++++ 3 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 js/src/utils/error.ts diff --git a/js/package.json b/js/package.json index aaf31120c..ade1bc5d0 100644 --- a/js/package.json +++ b/js/package.json @@ -197,4 +197,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index c2cc7408d..8a3b45a84 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -6,6 +6,7 @@ import { assertUuid } from "../utils/_uuid.js"; import { AsyncCaller } from "../utils/async_caller.js"; import { atee } from "../utils/atee.js"; import { getLangChainEnvVarsMetadata } from "../utils/env.js"; +import { printErrorStackTrace } from "../utils/error.js"; import { randomName } from "./_random_name.js"; import { EvaluationResult, @@ -538,6 +539,7 @@ class _ExperimentManager { console.error( `Error running evaluator ${evaluator.evaluateRun.name} on run ${run.id}: ${e}` ); + printErrorStackTrace(e); } } @@ -635,6 +637,7 @@ class _ExperimentManager { evaluator.name }: ${JSON.stringify(e, null, 2)}` ); + printErrorStackTrace(e); } } @@ -835,6 +838,7 @@ async function _forward( await wrappedFn(example.inputs); } catch (e) { console.error(`Error running target function: ${e}`); + printErrorStackTrace(e); } if (!run) { diff --git a/js/src/utils/error.ts b/js/src/utils/error.ts new file mode 100644 index 000000000..8739cb091 --- /dev/null +++ b/js/src/utils/error.ts @@ -0,0 +1,23 @@ +function getErrorStackTrace(e: unknown) { + if (typeof e !== "object" || e == null) return undefined; + if (!("stack" in e) || typeof e.stack !== "string") return undefined; + + let stack = e.stack; + + const prevLine = `${e}`; + if (stack.startsWith(prevLine)) { + stack = stack.slice(prevLine.length); + } + + if (stack.startsWith("\n")) { + stack = stack.slice(1); + } + + return stack; +} + +export function printErrorStackTrace(e: unknown) { + const stack = getErrorStackTrace(e); + if (stack == null) return; + console.error(stack); +} From ff0e30e22e7981046187b4b228661290f4f4d28e Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 15:59:33 +0200 Subject: [PATCH 028/373] chore[ts]: expose RetrieverOutput type --- js/src/index.ts | 1 + js/src/schemas.ts | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/js/src/index.ts b/js/src/index.ts index 1c0711e24..c3b377c1d 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -6,6 +6,7 @@ export type { TracerSession, Run, Feedback, + RetrieverOutput, } from "./schemas.js"; export { RunTree, type RunTreeConfig } from "./run_trees.js"; diff --git a/js/src/schemas.ts b/js/src/schemas.ts index ca53e301b..f4f6e05cf 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -380,3 +380,14 @@ export interface ComparativeExperiment { experiments_info?: Array>; feedback_stats?: Record; } + +/** + * Represents the expected output schema returned by traceable + * or by run tree output for LangSmith to correctly display + * documents in the UI + */ +export type RetrieverOutput = Array<{ + page_content: string; + type: "Document"; + metadata?: KVMap; +}>; From 6a2252b02eeb0a7599a675e5acfd1cf257a8c3d5 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 16:39:00 +0200 Subject: [PATCH 029/373] feat[js]: add view URL to the experiment when running evaluate --- js/src/client.ts | 15 +++++++++++++++ js/src/evaluation/_runner.ts | 14 +++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index bcad8eaca..cfbd869da 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -1597,6 +1597,21 @@ export class Client { return `${this.getHostUrl()}/o/${tenantId}/projects/p/${project.id}`; } + public async getDatasetUrl({ + datasetId, + datasetName, + }: { + datasetId?: string; + datasetName?: string; + }) { + if (datasetId === undefined && datasetName === undefined) { + throw new Error("Must provide either datasetName or datasetId"); + } + const dataset = await this.readDataset({ datasetId, datasetName }); + const tenantId = await this._getTenantId(); + return `${this.getHostUrl()}/o/${tenantId}/datasets/${dataset.id}`; + } + private async _getTenantId(): Promise { if (this._tenantId !== null) { return this._tenantId; diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index c2cc7408d..087582278 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -319,16 +319,24 @@ class _ExperimentManager { return project; } - _printExperimentStart(): void { - // @TODO log with experiment URL + protected async _printExperimentStart(): Promise { console.log(`Starting evaluation of experiment: ${this.experimentName}`); + + const firstExample = this._examples?.[0]; + const datasetId = firstExample?.dataset_id; + if (!datasetId || !this._experiment) return; + + const datasetUrl = await this.client.getDatasetUrl({ datasetId }); + const compareUrl = `${datasetUrl}/compare?selectedSessions=${this._experiment.id}`; + + console.log(`View results at ${compareUrl}`); } async start(): Promise<_ExperimentManager> { const examples = await this.getExamples(); const firstExample = examples[0]; const project = await this._getProject(firstExample); - this._printExperimentStart(); + await this._printExperimentStart(project); return new _ExperimentManager({ examples, experiment: project, From fb41a5106a48c3a86636f0ffde042af051f91f45 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 17:10:32 +0200 Subject: [PATCH 030/373] fix tsc --- js/src/evaluation/_runner.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 087582278..3c5a12799 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -336,7 +336,7 @@ class _ExperimentManager { const examples = await this.getExamples(); const firstExample = examples[0]; const project = await this._getProject(firstExample); - await this._printExperimentStart(project); + await this._printExperimentStart(); return new _ExperimentManager({ examples, experiment: project, From ddfb162f50d284e978b25a96b4eaef8dbc39cba1 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Tue, 14 May 2024 14:03:35 -0700 Subject: [PATCH 031/373] ignore me --- python/langsmith/_internal/_embedding_distance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/_internal/_embedding_distance.py b/python/langsmith/_internal/_embedding_distance.py index 4eb9fc83b..dff2d1f00 100644 --- a/python/langsmith/_internal/_embedding_distance.py +++ b/python/langsmith/_internal/_embedding_distance.py @@ -15,7 +15,7 @@ from typing_extensions import TypedDict if TYPE_CHECKING: - import numpy as np + import numpy as np # type: ignore logger = logging.getLogger(__name__) From 536fc5c097ac566062f6a8480c7f4aff1370b085 Mon Sep 17 00:00:00 2001 From: infra Date: Wed, 15 May 2024 02:26:45 -0400 Subject: [PATCH 032/373] fix: use 0.5.0 image --- python/langsmith/cli/.env.example | 2 +- python/langsmith/cli/docker-compose.yaml | 14 +++++++------- python/langsmith/cli/main.py | 14 +++++++------- python/pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index f719a7470..e078ec3cd 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,5 +1,5 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.3.5 +_LANGSMITH_IMAGE_VERSION=0.5.0 LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key OPENAI_API_KEY=your-openai-api-key # Needed for Online Evals and Magic Query features AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index b2435167d..5c1df727c 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,11 +1,11 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.3.5} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.5.0} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} @@ -16,7 +16,7 @@ services: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker @@ -49,7 +49,7 @@ services: condition: service_completed_successfully restart: always langchain-platform-backend: - image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} environment: - PORT=1986 - LANGCHAIN_ENV=local_docker @@ -75,7 +75,7 @@ services: condition: service_completed_successfully restart: always langchain-queue: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} environment: - LANGCHAIN_ENV=local_docker - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} @@ -163,7 +163,7 @@ services: timeout: 2s retries: 30 clickhouse-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} depends_on: langchain-clickhouse: condition: service_healthy @@ -182,7 +182,7 @@ services: "migrate -source file://clickhouse/migrations -database 'clickhouse://${CLICKHOUSE_HOST}:${CLICKHOUSE_NATIVE_PORT}?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}&database=${CLICKHOUSE_DB}&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", ] postgres-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.3.5} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} depends_on: langchain-db: condition: service_healthy diff --git a/python/langsmith/cli/main.py b/python/langsmith/cli/main.py index 52cee0c83..737952825 100644 --- a/python/langsmith/cli/main.py +++ b/python/langsmith/cli/main.py @@ -101,12 +101,12 @@ def _start_local(self) -> None: def pull( self, *, - version: str = "0.3.5", + version: str = "0.5.0", ) -> None: """Pull the latest LangSmith images. Args: - version: The LangSmith version to use for LangSmith. Defaults to 0.3.5 + version: The LangSmith version to use for LangSmith. Defaults to 0.5.0 """ os.environ["_LANGSMITH_IMAGE_VERSION"] = version subprocess.run( @@ -123,7 +123,7 @@ def start( *, openai_api_key: Optional[str] = None, langsmith_license_key: str, - version: str = "0.3.5", + version: str = "0.5.0", ) -> None: """Run the LangSmith server locally. @@ -251,8 +251,8 @@ def main() -> None: ) server_start_parser.add_argument( "--version", - default="0.3.5", - help="The LangSmith version to use for LangSmith. Defaults to 0.3.5.", + default="0.5.0", + help="The LangSmith version to use for LangSmith. Defaults to 0.5.0.", ) server_start_parser.set_defaults( func=lambda args: server_command.start( @@ -279,8 +279,8 @@ def main() -> None: ) server_pull_parser.add_argument( "--version", - default="0.3.5", - help="The LangSmith version to use for LangSmith. Defaults to 0.3.5.", + default="0.5.0", + help="The LangSmith version to use for LangSmith. Defaults to 0.5.0.", ) server_pull_parser.set_defaults( func=lambda args: server_command.pull(version=args.version) diff --git a/python/pyproject.toml b/python/pyproject.toml index 71b3ed0f8..a5ba8021f 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.57" +version = "0.1.58" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 891dc8c53b94e91105ac2c9864bcc83353c69ac7 Mon Sep 17 00:00:00 2001 From: infra Date: Wed, 15 May 2024 03:23:51 -0400 Subject: [PATCH 033/373] add .env comments --- python/langsmith/cli/.env.example | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index e078ec3cd..03dc0ff8b 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -11,10 +11,10 @@ REDIS_DATABASE_URI=redis://langchain-redis:6379 # Change to your Redis URI if us LOG_LEVEL=warning # Change to your desired log level MAX_ASYNC_JOBS_PER_WORKER=10 # Change to your desired maximum async jobs per worker. We recommend 10/suggest spinning up more replicas of the queue worker if you need more throughput ASYNCPG_POOL_MAX_SIZE=3 # Change the PG pool size based off your pg instance/requirements. -CLICKHOUSE_HOST=langchain-clickhouse -CLICKHOUSE_USER=default -CLICKHOUSE_DB=default -CLICKHOUSE_PORT=8123 -CLICKHOUSE_TLS=false -CLICKHOUSE_PASSWORD=password# Change to your Clickhouse password if needed -CLICKHOUSE_NATIVE_PORT=9000 +CLICKHOUSE_HOST=langchain-clickhouse # Change to your Clickhouse host if using external Clickhouse. Otherwise, leave it as is +CLICKHOUSE_USER=default # Change to your Clickhouse user if needed +CLICKHOUSE_DB=default # Change to your Clickhouse database if needed +CLICKHOUSE_PORT=8123 # Change to your Clickhouse port if needed +CLICKHOUSE_TLS=false # Change to true if you are using TLS to connect to Clickhouse. Otherwise, leave it as is +CLICKHOUSE_PASSWORD=password # Change to your Clickhouse password if needed +CLICKHOUSE_NATIVE_PORT=9000 # Change to your Clickhouse native port if needed From d5eb23f7edeebb294b65042f9d44fedf754eed7e Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 12:55:43 -0700 Subject: [PATCH 034/373] Add splits as param to list_examples --- python/langsmith/client.py | 4 +++ python/tests/integration_tests/test_client.py | 36 ++++++++++++++----- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 872f83530..21de7d799 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3094,6 +3094,7 @@ def list_examples( dataset_name: Optional[str] = None, example_ids: Optional[Sequence[ID_TYPE]] = None, as_of: Optional[Union[datetime.datetime, str]] = None, + splits: Optional[Sequence[str]] = None, inline_s3_urls: bool = True, limit: Optional[int] = None, metadata: Optional[dict] = None, @@ -3112,6 +3113,8 @@ def list_examples( timestamp to retrieve the examples as of. Response examples will only be those that were present at the time of the tagged (or timestamped) version. + splits (List[str], optional): A list of dataset splits to filter by. + Returns examples only from the specified splits. inline_s3_urls (bool, optional): Whether to inline S3 URLs. Defaults to True. limit (int, optional): The maximum number of examples to return. @@ -3125,6 +3128,7 @@ def list_examples( "as_of": ( as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of ), + "splits": splits, "inline_s3_urls": inline_s3_urls, "limit": min(limit, 100) if limit is not None else 100, } diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index 79c18c8ac..be04bef1f 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -109,25 +109,43 @@ def test_datasets(langchain_client: Client) -> None: def test_list_examples(langchain_client: Client) -> None: """Test list_examples.""" examples = [ - ("Shut up, idiot", "Toxic"), - ("You're a wonderful person", "Not toxic"), - ("This is the worst thing ever", "Toxic"), - ("I had a great day today", "Not toxic"), - ("Nobody likes you", "Toxic"), - ("This is unacceptable. I want to speak to the manager.", "Not toxic"), + ("Shut up, idiot", "Toxic", {"dataset_split": "train"}), + ("You're a wonderful person", "Not toxic", {"dataset_split": "test"}), + ("This is the worst thing ever", "Toxic", {"dataset_split": "train"}), + ("I had a great day today", "Not toxic", {"dataset_split": "test"}), + ("Nobody likes you", "Toxic", {"dataset_split": "train"}), + ("This is unacceptable. I want to speak to the manager.", "Not toxic", {}), ] dataset_name = "__test_list_examples" + uuid4().hex[:4] dataset = langchain_client.create_dataset(dataset_name=dataset_name) - inputs, outputs = zip( - *[({"text": text}, {"label": label}) for text, label in examples] + inputs, outputs, metadata = zip( + *[ + ({"text": text}, {"label": label}, metadata) + for text, label, metadata in examples + ] ) langchain_client.create_examples( - inputs=inputs, outputs=outputs, dataset_id=dataset.id + inputs=inputs, outputs=outputs, metadata=metadata, dataset_id=dataset.id ) example_list = list(langchain_client.list_examples(dataset_id=dataset.id)) assert len(example_list) == len(examples) + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["train"]) + ) + assert len(example_list) == 3 + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["test"]) + ) + assert len(example_list) == 2 + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["train", "test"]) + ) + assert len(example_list) == 5 + langchain_client.create_example( inputs={"text": "What's up!"}, outputs={"label": "Not toxic"}, From 09040c7855a2fa9a7ef6148b52f9ffc21e780bf7 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 15:00:25 -0700 Subject: [PATCH 035/373] add typescript support, add to update/create examples --- js/src/client.ts | 12 +++++++++++ js/src/schemas.ts | 2 ++ js/src/tests/client.int.test.ts | 38 ++++++++++++++++++++++++++++++++- python/langsmith/client.py | 7 +++++- python/langsmith/schemas.py | 2 ++ 5 files changed, 59 insertions(+), 2 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index cfbd869da..df427edbb 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -239,6 +239,7 @@ export type CreateExampleOptions = { exampleId?: string; metadata?: KVMap; + split?: string; }; type AutoBatchQueueItem = { @@ -1989,6 +1990,7 @@ export class Client { createdAt, exampleId, metadata, + split, }: CreateExampleOptions ): Promise { let datasetId_ = datasetId; @@ -2009,6 +2011,7 @@ export class Client { created_at: createdAt_?.toISOString(), id: exampleId, metadata, + split, }; const response = await this.caller.call(fetch, `${this.apiUrl}/examples`, { @@ -2033,6 +2036,7 @@ export class Client { inputs: Array; outputs?: Array; metadata?: Array; + splits?: Array; sourceRunIds?: Array; exampleIds?: Array; datasetId?: string; @@ -2063,6 +2067,7 @@ export class Client { inputs: input, outputs: outputs ? outputs[idx] : undefined, metadata: metadata ? metadata[idx] : undefined, + split: props.splits ? props.splits[idx] : undefined, id: exampleIds ? exampleIds[idx] : undefined, source_run_id: sourceRunIds ? sourceRunIds[idx] : undefined, }; @@ -2130,6 +2135,7 @@ export class Client { datasetName, exampleIds, asOf, + splits, inlineS3Urls, metadata, }: { @@ -2137,6 +2143,7 @@ export class Client { datasetName?: string; exampleIds?: string[]; asOf?: string | Date; + splits?: string[]; inlineS3Urls?: boolean; metadata?: KVMap; } = {}): AsyncIterable { @@ -2167,6 +2174,11 @@ export class Client { params.append("id", id_); } } + if (splits !== undefined) { + for (const split of splits) { + params.append("splits", split); + } + } if (metadata !== undefined) { const serializedMetadata = JSON.stringify(metadata); params.append("metadata", serializedMetadata); diff --git a/js/src/schemas.ts b/js/src/schemas.ts index f4f6e05cf..4f46c905e 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -229,6 +229,7 @@ export interface RunUpdate { export interface ExampleCreate extends BaseExample { id?: string; created_at?: string; + split?: string; } export interface Example extends BaseExample { @@ -244,6 +245,7 @@ export interface ExampleUpdate { inputs?: KVMap; outputs?: KVMap; metadata?: KVMap; + split?: string; } export interface BaseDataset { name: string; diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index bc357c4ab..5f646a7b5 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -76,7 +76,7 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { const example = await client.createExample( { col1: "addedExampleCol1" }, { col2: "addedExampleCol2" }, - { datasetId: newDataset.id } + { datasetId: newDataset.id, split: "my_split" } ); const exampleValue = await client.readExample(example.id); expect(exampleValue.inputs.col1).toBe("addedExampleCol1"); @@ -88,13 +88,21 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { expect(examples.length).toBe(2); expect(examples.map((e) => e.id)).toContain(example.id); + const _examples = await toArray( + client.listExamples({ datasetId: newDataset.id, splits: ["my_split"] }) + ); + expect(_examples.length).toBe(2); + expect(_examples.map((e) => e.id)).toContain(example.id); + await client.updateExample(example.id, { inputs: { col1: "updatedExampleCol1" }, outputs: { col2: "updatedExampleCol2" }, + split: "my_split2", }); // Says 'example updated' or something similar const newExampleValue = await client.readExample(example.id); expect(newExampleValue.inputs.col1).toBe("updatedExampleCol1"); + expect(newExampleValue.metadata?.dataset_split).toBe("my_split2"); await client.deleteExample(example.id); const examples2 = await toArray( client.listExamples({ datasetId: newDataset.id }) @@ -481,6 +489,7 @@ test.concurrent( { output: "hi there 3" }, ], metadata: [{ key: "value 1" }, { key: "value 2" }, { key: "value 3" }], + splits: ["train", "test", "train"], datasetId: dataset.id, }); const initialExamplesList = await toArray( @@ -511,16 +520,19 @@ test.concurrent( ); expect(example1?.outputs?.output).toEqual("hi there 1"); expect(example1?.metadata?.key).toEqual("value 1"); + expect(example1?.metadata?.dataset_split).toEqual("train"); const example2 = examplesList2.find( (e) => e.inputs.input === "hello world 2" ); expect(example2?.outputs?.output).toEqual("hi there 2"); expect(example2?.metadata?.key).toEqual("value 2"); + expect(example2?.metadata?.dataset_split).toEqual("test"); const example3 = examplesList2.find( (e) => e.inputs.input === "hello world 3" ); expect(example3?.outputs?.output).toEqual("hi there 3"); expect(example3?.metadata?.key).toEqual("value 3"); + expect(example3?.metadata?.dataset_split).toEqual("train"); await client.createExample( { input: "hello world" }, @@ -560,6 +572,30 @@ test.concurrent( expect(examplesList3[0].metadata?.foo).toEqual("bar"); expect(examplesList3[0].metadata?.baz).toEqual("qux"); + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + splits: ["train"], + }) + ); + expect(examplesList3.length).toEqual(2); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + splits: ["test"], + }) + ); + expect(examplesList3.length).toEqual(1); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + splits: ["train", "test"], + }) + ); + expect(examplesList3.length).toEqual(3); + await client.deleteDataset({ datasetId: dataset.id }); }, 180_000 diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 21de7d799..0b5e29278 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -2936,6 +2936,7 @@ def create_examples( inputs: Sequence[Mapping[str, Any]], outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, metadata: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, + splits: Optional[Sequence[Optional[str]]] = None, source_run_ids: Optional[Sequence[Optional[ID_TYPE]]] = None, ids: Optional[Sequence[Optional[ID_TYPE]]] = None, dataset_id: Optional[ID_TYPE] = None, @@ -2981,13 +2982,15 @@ def create_examples( "outputs": out_, "dataset_id": dataset_id, "metadata": metadata_, + "split": split_, "id": id_, "source_run_id": source_run_id_, } - for in_, out_, metadata_, id_, source_run_id_ in zip( + for in_, out_, metadata_, split_, id_, source_run_id_ in zip( inputs, outputs or [None] * len(inputs), metadata or [None] * len(inputs), + splits or [None] * len(inputs), ids or [None] * len(inputs), source_run_ids or [None] * len(inputs), ) @@ -3009,6 +3012,7 @@ def create_example( created_at: Optional[datetime.datetime] = None, outputs: Optional[Mapping[str, Any]] = None, metadata: Optional[Mapping[str, Any]] = None, + split: Optional[str] = None, example_id: Optional[ID_TYPE] = None, ) -> ls_schemas.Example: """Create a dataset example in the LangSmith API. @@ -3045,6 +3049,7 @@ def create_example( "outputs": outputs, "dataset_id": dataset_id, "metadata": metadata, + "split": split, } if created_at: data["created_at"] = created_at.isoformat() diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 78c849bbc..ee57ffd32 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -63,6 +63,7 @@ class ExampleCreate(ExampleBase): id: Optional[UUID] created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) + split: Optional[str] = None class Example(ExampleBase): @@ -105,6 +106,7 @@ class ExampleUpdate(BaseModel): inputs: Optional[Dict[str, Any]] = None outputs: Optional[Dict[str, Any]] = None metadata: Optional[Dict[str, Any]] = None + split: Optional[str] = None class Config: """Configuration class for the schema.""" From 10d4cc6068cd4986b0dbe10a325fbb3b972522e9 Mon Sep 17 00:00:00 2001 From: infra Date: Wed, 15 May 2024 18:09:43 -0400 Subject: [PATCH 036/373] fix: bump 0.5.1 --- python/langsmith/cli/.env.example | 2 +- python/langsmith/cli/docker-compose.yaml | 14 +++++++------- python/langsmith/cli/main.py | 14 +++++++------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index 03dc0ff8b..757ca17c9 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,5 +1,5 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.5.0 +_LANGSMITH_IMAGE_VERSION=0.5.1 LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key OPENAI_API_KEY=your-openai-api-key # Needed for Online Evals and Magic Query features AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 5c1df727c..1dff7840b 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,11 +1,11 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.5.0} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.5.1} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} @@ -16,7 +16,7 @@ services: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker @@ -49,7 +49,7 @@ services: condition: service_completed_successfully restart: always langchain-platform-backend: - image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} environment: - PORT=1986 - LANGCHAIN_ENV=local_docker @@ -75,7 +75,7 @@ services: condition: service_completed_successfully restart: always langchain-queue: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} environment: - LANGCHAIN_ENV=local_docker - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} @@ -163,7 +163,7 @@ services: timeout: 2s retries: 30 clickhouse-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} depends_on: langchain-clickhouse: condition: service_healthy @@ -182,7 +182,7 @@ services: "migrate -source file://clickhouse/migrations -database 'clickhouse://${CLICKHOUSE_HOST}:${CLICKHOUSE_NATIVE_PORT}?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}&database=${CLICKHOUSE_DB}&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", ] postgres-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.0} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} depends_on: langchain-db: condition: service_healthy diff --git a/python/langsmith/cli/main.py b/python/langsmith/cli/main.py index 737952825..116437aa3 100644 --- a/python/langsmith/cli/main.py +++ b/python/langsmith/cli/main.py @@ -101,12 +101,12 @@ def _start_local(self) -> None: def pull( self, *, - version: str = "0.5.0", + version: str = "0.5.1", ) -> None: """Pull the latest LangSmith images. Args: - version: The LangSmith version to use for LangSmith. Defaults to 0.5.0 + version: The LangSmith version to use for LangSmith. Defaults to 0.5.1 """ os.environ["_LANGSMITH_IMAGE_VERSION"] = version subprocess.run( @@ -123,7 +123,7 @@ def start( *, openai_api_key: Optional[str] = None, langsmith_license_key: str, - version: str = "0.5.0", + version: str = "0.5.1", ) -> None: """Run the LangSmith server locally. @@ -251,8 +251,8 @@ def main() -> None: ) server_start_parser.add_argument( "--version", - default="0.5.0", - help="The LangSmith version to use for LangSmith. Defaults to 0.5.0.", + default="0.5.1", + help="The LangSmith version to use for LangSmith. Defaults to 0.5.1.", ) server_start_parser.set_defaults( func=lambda args: server_command.start( @@ -279,8 +279,8 @@ def main() -> None: ) server_pull_parser.add_argument( "--version", - default="0.5.0", - help="The LangSmith version to use for LangSmith. Defaults to 0.5.0.", + default="0.5.1", + help="The LangSmith version to use for LangSmith. Defaults to 0.5.1.", ) server_pull_parser.set_defaults( func=lambda args: server_command.pull(version=args.version) From 4e0aead60b262051d94e75595048014831ffb5ca Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 15:20:14 -0700 Subject: [PATCH 037/373] update python tests --- python/langsmith/client.py | 2 ++ python/tests/integration_tests/test_client.py | 31 ++++++++++++------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 0b5e29278..1a1ee0df7 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3164,6 +3164,7 @@ def update_example( inputs: Optional[Dict[str, Any]] = None, outputs: Optional[Mapping[str, Any]] = None, metadata: Optional[Dict] = None, + split: Optional[str] = None, dataset_id: Optional[ID_TYPE] = None, ) -> Dict[str, Any]: """Update a specific example. @@ -3191,6 +3192,7 @@ def update_example( outputs=outputs, dataset_id=dataset_id, metadata=metadata, + split=split, ) response = self.session.patch( f"{self.api_url}/examples/{_as_uuid(example_id, 'example_id')}", diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index be04bef1f..dd65b3678 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -109,24 +109,21 @@ def test_datasets(langchain_client: Client) -> None: def test_list_examples(langchain_client: Client) -> None: """Test list_examples.""" examples = [ - ("Shut up, idiot", "Toxic", {"dataset_split": "train"}), - ("You're a wonderful person", "Not toxic", {"dataset_split": "test"}), - ("This is the worst thing ever", "Toxic", {"dataset_split": "train"}), - ("I had a great day today", "Not toxic", {"dataset_split": "test"}), - ("Nobody likes you", "Toxic", {"dataset_split": "train"}), - ("This is unacceptable. I want to speak to the manager.", "Not toxic", {}), + ("Shut up, idiot", "Toxic", "train"), + ("You're a wonderful person", "Not toxic", "test"), + ("This is the worst thing ever", "Toxic", "train"), + ("I had a great day today", "Not toxic", "test"), + ("Nobody likes you", "Toxic", "train"), + ("This is unacceptable. I want to speak to the manager.", "Not toxic", None), ] dataset_name = "__test_list_examples" + uuid4().hex[:4] dataset = langchain_client.create_dataset(dataset_name=dataset_name) - inputs, outputs, metadata = zip( - *[ - ({"text": text}, {"label": label}, metadata) - for text, label, metadata in examples - ] + inputs, outputs, split = zip( + *[({"text": text}, {"label": label}, split) for text, label, split in examples] ) langchain_client.create_examples( - inputs=inputs, outputs=outputs, metadata=metadata, dataset_id=dataset.id + inputs=inputs, outputs=outputs, split=split, dataset_id=dataset.id ) example_list = list(langchain_client.list_examples(dataset_id=dataset.id)) assert len(example_list) == len(examples) @@ -146,6 +143,16 @@ def test_list_examples(langchain_client: Client) -> None: ) assert len(example_list) == 5 + langchain_client.update_example( + example_id=[ + example.id + for example in example_list + if example.metadata is not None + and example.metadata.get("dataset_split") == "test" + ][0], + split="train", + ) + langchain_client.create_example( inputs={"text": "What's up!"}, outputs={"label": "Not toxic"}, From 00b6c8e9288e5a5eeecc434a354bd2f48ecbabec Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 15:26:30 -0700 Subject: [PATCH 038/373] update docstring --- python/langsmith/client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 1a1ee0df7..9a822fd90 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3118,7 +3118,8 @@ def list_examples( timestamp to retrieve the examples as of. Response examples will only be those that were present at the time of the tagged (or timestamped) version. - splits (List[str], optional): A list of dataset splits to filter by. + splits (List[str], optional): A list of dataset splits, which are + divisions of your dataset such as 'train', 'test', or 'validation'. Returns examples only from the specified splits. inline_s3_urls (bool, optional): Whether to inline S3 URLs. Defaults to True. From 0c90c7d785b96e2a870ef0450ff02e3294d67ee6 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 18:00:07 -0700 Subject: [PATCH 039/373] fix python integration test --- python/tests/integration_tests/test_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index dd65b3678..c037bfd65 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -119,11 +119,11 @@ def test_list_examples(langchain_client: Client) -> None: dataset_name = "__test_list_examples" + uuid4().hex[:4] dataset = langchain_client.create_dataset(dataset_name=dataset_name) - inputs, outputs, split = zip( + inputs, outputs, splits = zip( *[({"text": text}, {"label": label}, split) for text, label, split in examples] ) langchain_client.create_examples( - inputs=inputs, outputs=outputs, split=split, dataset_id=dataset.id + inputs=inputs, outputs=outputs, splits=splits, dataset_id=dataset.id ) example_list = list(langchain_client.list_examples(dataset_id=dataset.id)) assert len(example_list) == len(examples) From cdf3da3007c3058aa3b17754cff9060906eff4a9 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 18:05:36 -0700 Subject: [PATCH 040/373] fix js test --- js/src/tests/client.int.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 5f646a7b5..7637bb821 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -91,7 +91,7 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { const _examples = await toArray( client.listExamples({ datasetId: newDataset.id, splits: ["my_split"] }) ); - expect(_examples.length).toBe(2); + expect(_examples.length).toBe(1); expect(_examples.map((e) => e.id)).toContain(example.id); await client.updateExample(example.id, { From 525c788cc1a6a9c02095faae56a9459a702e32cd Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 18:18:49 -0700 Subject: [PATCH 041/373] v0.1.26 --- js/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/package.json b/js/package.json index ade1bc5d0..5aa0b8b4a 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.25", + "version": "0.1.26", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ From ea5ea51517a922995862858f83b479d2a3dc62b9 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 18:21:05 -0700 Subject: [PATCH 042/373] bump python version --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index a5ba8021f..90f4db811 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.58" +version = "0.1.59" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 8f078ac2c66ead2fdf7121d9d288c7f636de39ee Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 18:23:34 -0700 Subject: [PATCH 043/373] Revert "v0.1.26" This reverts commit 525c788cc1a6a9c02095faae56a9459a702e32cd. --- js/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/package.json b/js/package.json index 5aa0b8b4a..ade1bc5d0 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.26", + "version": "0.1.25", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ From e5551e711a4252497c85f1a075cb4b61d2100476 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 15 May 2024 18:24:13 -0700 Subject: [PATCH 044/373] bump version correctly --- js/package.json | 4 ++-- js/src/index.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index ade1bc5d0..dc4f02c00 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.25", + "version": "0.1.26", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -197,4 +197,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/index.ts b/js/src/index.ts index c3b377c1d..01f8dbfa7 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.25"; +export const __version__ = "0.1.26"; From 8bfe97c70467d59939f194679a237521bf517314 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 16 May 2024 21:54:58 +0200 Subject: [PATCH 045/373] feat(ts): add invocation params handling in wrapOpenAI --- js/src/schemas.ts | 9 ++++++ js/src/tests/wrapped_openai.int.test.ts | 24 ++++++++------ js/src/traceable.ts | 43 +++++++++++++++++++++---- js/src/wrappers/openai.ts | 36 +++++++++++++++++++++ 4 files changed, 95 insertions(+), 17 deletions(-) diff --git a/js/src/schemas.ts b/js/src/schemas.ts index 4f46c905e..ee8a11036 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -393,3 +393,12 @@ export type RetrieverOutput = Array<{ type: "Document"; metadata?: KVMap; }>; + +export interface InvocationParamsSchema { + ls_provider?: string; + ls_model_name?: string; + ls_model_type: "chat"; + ls_temperature?: number; + ls_max_tokens?: number; + ls_stop?: string[]; +} diff --git a/js/src/tests/wrapped_openai.int.test.ts b/js/src/tests/wrapped_openai.int.test.ts index 95962b979..be80e895d 100644 --- a/js/src/tests/wrapped_openai.int.test.ts +++ b/js/src/tests/wrapped_openai.int.test.ts @@ -61,7 +61,7 @@ test.concurrent("chat.completions", async () => { stream: true, }); - const originalChoices = []; + const originalChoices: unknown[] = []; for await (const chunk of originalStream) { originalChoices.push(chunk.choices); } @@ -74,7 +74,7 @@ test.concurrent("chat.completions", async () => { stream: true, }); - const patchedChoices = []; + const patchedChoices: unknown[] = []; for await (const chunk of patchedStream) { patchedChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -126,7 +126,7 @@ test.concurrent("chat.completions", async () => { } ); - const patchedChoices2 = []; + const patchedChoices2: unknown[] = []; for await (const chunk of patchedStreamWithMetadata) { patchedChoices2.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -246,7 +246,7 @@ test.concurrent("chat completions with tool calling", async () => { stream: true, }); - const originalChoices = []; + const originalChoices: any[] = []; for await (const chunk of originalStream) { originalChoices.push(chunk.choices); } @@ -264,7 +264,7 @@ test.concurrent("chat completions with tool calling", async () => { stream: true, }); - const patchedChoices = []; + const patchedChoices: any[] = []; for await (const chunk of patchedStream) { patchedChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -305,7 +305,7 @@ test.concurrent("chat completions with tool calling", async () => { } ); - const patchedChoices2 = []; + const patchedChoices2: any[] = []; for await (const chunk of patchedStream2) { patchedChoices2.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -322,6 +322,10 @@ test.concurrent("chat completions with tool calling", async () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any expect(JSON.parse((call[2] as any).body).extra.metadata).toEqual({ thing1: "thing2", + ls_model_name: "gpt-3.5-turbo", + ls_model_type: "chat", + ls_provider: "openai", + ls_temperature: 0, }); } callSpy.mockClear(); @@ -364,7 +368,7 @@ test.concurrent("completions", async () => { stream: true, }); - const originalChoices = []; + const originalChoices: unknown[] = []; for await (const chunk of originalStream) { originalChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -380,7 +384,7 @@ test.concurrent("completions", async () => { stream: true, }); - const patchedChoices = []; + const patchedChoices: unknown[] = []; for await (const chunk of patchedStream) { patchedChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -411,7 +415,7 @@ test.concurrent("completions", async () => { } ); - const patchedChoices2 = []; + const patchedChoices2: unknown[] = []; for await (const chunk of patchedStream2) { patchedChoices2.push(chunk.choices); // @ts-expect-error Should type check streamed output @@ -441,7 +445,7 @@ test.skip("with initialization time config", async () => { stream: true, }); - const patchedChoices = []; + const patchedChoices: unknown[] = []; for await (const chunk of patchedStream) { patchedChoices.push(chunk.choices); // @ts-expect-error Should type check streamed output diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 11f2c64e5..6da9f9650 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -7,7 +7,7 @@ import { isRunTree, isRunnableConfigLike, } from "./run_trees.js"; -import { KVMap } from "./schemas.js"; +import { InvocationParamsSchema, KVMap } from "./schemas.js"; import { getEnvironmentVariable } from "./utils/env.js"; function isPromiseMethod( @@ -167,9 +167,12 @@ const handleRunOutputs = (rawOutputs: unknown): KVMap => { return { outputs: rawOutputs }; }; -const getTracingRunTree = ( +const getTracingRunTree = ( runTree: RunTree, - inputs: unknown[] + inputs: Args, + getInvocationParams: + | ((...args: Args) => InvocationParamsSchema | undefined) + | undefined ): RunTree | undefined => { const tracingEnabled_ = tracingIsEnabled(runTree.tracingEnabled); if (!tracingEnabled_) { @@ -177,6 +180,16 @@ const getTracingRunTree = ( } runTree.inputs = handleRunInputs(inputs); + + const invocationParams = getInvocationParams?.(...inputs); + if (invocationParams != null) { + runTree.extra ??= {}; + runTree.extra.metadata = { + ...runTree.extra.metadata, + ...invocationParams, + }; + } + return runTree; }; @@ -381,6 +394,18 @@ export function traceable any>( // eslint-disable-next-line @typescript-eslint/no-explicit-any aggregator?: (args: any[]) => any; argsConfigPath?: [number] | [number, string]; + + /** + * Extract invocation parameters from the arguments of the traced function. + * This is useful for LangSmith to properly track common metadata like + * provider, model name and temperature. + * + * @param args Arguments of the traced function + * @returns Key-value map of the invocation parameters, which will be merged with the existing metadata + */ + getInvocationParams?: ( + ...args: Parameters + ) => InvocationParamsSchema | undefined; } ) { type Inputs = Parameters; @@ -454,7 +479,8 @@ export function traceable any>( return [ getTracingRunTree( RunTree.fromRunnableConfig(firstArg, ensuredConfig), - restArgs + restArgs as Inputs, + config?.getInvocationParams ), restArgs as Inputs, ]; @@ -477,7 +503,8 @@ export function traceable any>( firstArg === ROOT ? new RunTree(ensuredConfig) : firstArg.createChild(ensuredConfig), - restArgs + restArgs as Inputs, + config?.getInvocationParams ); return [currentRunTree, [currentRunTree, ...restArgs] as Inputs]; @@ -490,7 +517,8 @@ export function traceable any>( return [ getTracingRunTree( prevRunFromStore.createChild(ensuredConfig), - processedArgs + processedArgs, + config?.getInvocationParams ), processedArgs as Inputs, ]; @@ -498,7 +526,8 @@ export function traceable any>( const currentRunTree = getTracingRunTree( new RunTree(ensuredConfig), - processedArgs + processedArgs, + config?.getInvocationParams ); return [currentRunTree, processedArgs as Inputs]; })(); diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index 03154d35a..e43c55775 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -223,6 +223,24 @@ export const wrapOpenAI = ( run_type: "llm", aggregator: chatAggregator, argsConfigPath: [1, "langsmithExtra"], + getInvocationParams: (payload: unknown) => { + if (typeof payload !== "object" || payload == null) return undefined; + // we can safely do so, as the types are not exported in TSC + const params = payload as OpenAI.ChatCompletionCreateParams; + + const ls_stop = + (typeof params.stop === "string" ? [params.stop] : params.stop) ?? + undefined; + + return { + ls_provider: "openai", + ls_model_type: "chat", + ls_model_name: params.model, + ls_max_tokens: params.max_tokens ?? undefined, + ls_temperature: params.temperature ?? undefined, + ls_stop, + }; + }, ...options, } ); @@ -234,6 +252,24 @@ export const wrapOpenAI = ( run_type: "llm", aggregator: textAggregator, argsConfigPath: [1, "langsmithExtra"], + getInvocationParams: (payload: unknown) => { + if (typeof payload !== "object" || payload == null) return undefined; + // we can safely do so, as the types are not exported in TSC + const params = payload as OpenAI.CompletionCreateParams; + + const ls_stop = + (typeof params.stop === "string" ? [params.stop] : params.stop) ?? + undefined; + + return { + ls_provider: "openai", + ls_model_type: "chat", + ls_model_name: params.model, + ls_max_tokens: params.max_tokens ?? undefined, + ls_temperature: params.temperature ?? undefined, + ls_stop, + }; + }, ...options, } ); From 42b5374989f1f96c896eec20030ed5aa59819a27 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 9 May 2024 18:23:50 +0200 Subject: [PATCH 046/373] Add failing tests --- js/src/tests/traceable.test.ts | 248 +++++++++++++++++++++++++++++++-- 1 file changed, 237 insertions(+), 11 deletions(-) diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 94fb50367..9b39411b3 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -1,10 +1,15 @@ import type { RunTree, RunTreeConfig } from "../run_trees.js"; -import { ROOT, traceable } from "../traceable.js"; +import { ROOT, getCurrentRunTree, traceable } from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; import { FakeChatModel } from "@langchain/core/utils/testing"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; +import { getLangchainCallbacks } from "../langchain.js"; +import { BaseMessage, HumanMessage } from "@langchain/core/messages"; +import { RunnableConfig, RunnableLambda } from "@langchain/core/runnables"; +import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; test("basic traceable implementation", async () => { const { client, callSpy } = mockClient(); @@ -649,7 +654,7 @@ describe("deferred input", () => { }); describe("langchain", () => { - test.skip("bound", async () => { + test("explicit traceable to langchain", async () => { const { client, callSpy } = mockClient(); const llm = new FakeChatModel({}); @@ -659,28 +664,249 @@ describe("langchain", () => { const parser = new StringOutputParser(); const chain = prompt.pipe(llm).pipe(parser); - const main = traceable(chain.invoke.bind(chain), { - client, - tracingEnabled: true, - }); + const main = traceable( + async (input: { text: string }) => { + const runTree = getCurrentRunTree(); + const callbacks = await getLangchainCallbacks(runTree); + + const response = await chain.invoke(input, { callbacks }); + return response; + }, + { + name: "main", + client, + tracingEnabled: true, + tags: ["welcome"], + metadata: { hello: "world" }, + } + ); const result = await main({ text: "Hello world" }); expect(result).toEqual("Hello world"); expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ nodes: [ - "bound invoke:0", + "main:0", + "RunnableSequence:1", + "ChatPromptTemplate:2", + "FakeChatModel:3", + "StrOutputParser:4", + ], + edges: [ + ["main:0", "RunnableSequence:1"], + ["RunnableSequence:1", "ChatPromptTemplate:2"], + ["RunnableSequence:1", "FakeChatModel:3"], + ["RunnableSequence:1", "StrOutputParser:4"], + ], + data: { + "main:0": { + inputs: { text: "Hello world" }, + outputs: { outputs: "Hello world" }, + tags: ["welcome"], + extra: { metadata: { hello: "world" } }, + }, + }, + }); + }); + + test("explicit langchain to traceable", async () => { + const { client, callSpy } = mockClient(); + + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + + const addValueTraceable = traceable( + (msg: BaseMessage) => + new HumanMessage({ content: msg.content + " world" }), + { name: "add_negligible_value" } + ); + + const chain = prompt + .pipe(llm) + .pipe( + new RunnableLambda({ + func: (message: BaseMessage, config?: RunnableConfig) => + addValueTraceable( + config ?? { callbacks: [] }, + message as HumanMessage + ), + }) + ) + .pipe(parser); + + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + const tracer = new LangChainTracer({ client }); + const response = await chain.invoke( + { text: "Hello" }, + { callbacks: [tracer] } + ); + + // callbacks are backgrounded by default + await awaitAllCallbacks(); + + expect(response).toEqual("Hello world"); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "RunnableSequence:0", "ChatPromptTemplate:1", "FakeChatModel:2", - "StringOutputParser:3", + "RunnableLambda:3", + "StrOutputParser:4", + ], + edges: [ + ["RunnableSequence:0", "ChatPromptTemplate:1"], + ["RunnableSequence:0", "FakeChatModel:2"], + ["RunnableSequence:0", "RunnableLambda:3"], + ["RunnableSequence:0", "StrOutputParser:4"], + ], + }); + }); + + test("explicit simple nested", async () => { + const { client, callSpy } = mockClient(); + + const wrappedModel = traceable( + async (value: { input: string }) => `Wrapped input: ${value.input}`, + { name: "wrappedModel" } + ); + + const lambda = new RunnableLambda({ + func: async (value: { input: string }, config?: RunnableConfig) => { + return await wrappedModel(config ?? { callbacks: [] }, value); + }, + }).withConfig({ runName: "lambda" }); + + const main = traceable( + async () => { + const runTree = getCurrentRunTree(); + const callbacks = await getLangchainCallbacks(runTree); + + return { + response: [ + await lambda.invoke({ input: "Are you ready?" }, { callbacks }), + ], + }; + }, + { name: "main", client, tracingEnabled: true } + ); + + const result = await main(); + await awaitAllCallbacks(); + + expect(result).toEqual({ + response: ["Wrapped input: Are you ready?"], + }); + + console.dir(callSpy.mock.calls, { depth: null }); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: ["main:0", "lambda:1", "wrappedModel:2"], + edges: [ + ["main:0", "lambda:1"], + ["lambda:1", "wrappedModel:2"], + ], + }); + }); + + test("explicit nested", async () => { + const { client, callSpy } = mockClient(); + + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + const model = prompt + .pipe(llm) + .pipe(parser) + .withConfig({ runName: "model" }); + + const wrappedModel = traceable( + async (value: { input: string }) => { + const runTree = getCurrentRunTree(); + const callbacks = await getLangchainCallbacks(runTree); + return model.invoke( + { text: `Wrapped input: ${value.input}` }, + { callbacks } + ); + }, + { name: "wrappedModel" } + ); + + const lambda = new RunnableLambda({ + func: async (value: { input: string }, config?: RunnableConfig) => { + console.log("lambda", config?.callbacks); + return await wrappedModel(config ?? { callbacks: [] }, value); + }, + }).withConfig({ runName: "lambda" }); + + const main = traceable( + async () => { + const runTree = getCurrentRunTree(); + console.log("main, runTree.id", runTree.id); + const callbacks = await getLangchainCallbacks(runTree); + + return { + response: [ + await lambda.invoke({ input: "Are you ready?" }, { callbacks }), + await lambda.invoke( + { input: "I said, Are. You. Ready?" }, + { callbacks } + ), + ], + }; + }, + { name: "main", client, tracingEnabled: true } + ); + + const result = await main(); + await awaitAllCallbacks(); + + expect(result).toEqual({ + response: [ + "Wrapped input: Are you ready?", + "Wrapped input: I said, Are. You. Ready?", + ], + }); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "lambda:1", + "wrappedModel:2", + "model:3", + "ChatPromptTemplate:4", + "FakeChatModel:5", + "StrOutputParser:6", + "lambda:7", + "wrappedModel:8", + "model:9", + "ChatPromptTemplate:10", + "FakeChatModel:11", + "StrOutputParser:12", ], edges: [ - ["bound invoke:0", "ChatPromptTemplate:1"], - ["ChatPromptTemplate:1", "FakeChatModel:2"], - ["FakeChatModel:2", "StringOutputParser:3"], + ["main:0", "lambda:1"], + ["lambda:1", "wrappedModel:2"], + ["wrappedModel:2", "model:3"], + ["model:3", "ChatPromptTemplate:4"], + ["model:3", "FakeChatModel:5"], + ["model:3", "StrOutputParser:6"], + ["main:0", "lambda:7"], + ["lambda:7", "wrappedModel:8"], + ["wrappedModel:8", "model:9"], + ["model:9", "ChatPromptTemplate:10"], + ["model:9", "FakeChatModel:11"], + ["model:9", "StrOutputParser:12"], ], }); }); + + test.skip("make sure disabled callback state work correctly", () => {}); }); describe("generator", () => { From 72d92ca38670139f1520c0f1a78c4408bd33270a Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 9 May 2024 18:24:08 +0200 Subject: [PATCH 047/373] Make sure we're passing the custom client in fromRunnableConfig as well --- js/src/run_trees.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 267b22c5e..9a40e4d64 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -93,6 +93,7 @@ interface LangChainTracerLike extends TracerLike { name: "langchain_tracer"; projectName: string; getRun?: (id: string) => RunTree | undefined; + client: Client; } export class RunTree implements BaseRun { @@ -170,13 +171,16 @@ export class RunTree implements BaseRun { | undefined; let parentRun: RunTree | undefined; let projectName: string | undefined; + let client: Client | undefined; if (callbackManager) { const parentRunId = callbackManager?.getParentRunId?.() ?? ""; + console.log("obtaining parent run id", parentRunId) const langChainTracer = callbackManager?.handlers?.find( (handler: TracerLike) => handler?.name == "langchain_tracer" ) as LangChainTracerLike | undefined; parentRun = langChainTracer?.getRun?.(parentRunId); projectName = langChainTracer?.projectName; + client = langChainTracer?.client; } const dedupedTags = [ ...new Set((parentRun?.tags ?? []).concat(config?.tags ?? [])), @@ -189,6 +193,7 @@ export class RunTree implements BaseRun { name: props?.name ?? "", parent_run: parentRun, tags: dedupedTags, + client, extra: { metadata: dedupedMetadata, }, From e510cf6dac5a99bad54964712edf055251becdee Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 9 May 2024 18:24:22 +0200 Subject: [PATCH 048/373] Add util method for converting RunTree to CallbackLike --- js/src/langchain.ts | 48 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 js/src/langchain.ts diff --git a/js/src/langchain.ts b/js/src/langchain.ts new file mode 100644 index 000000000..a0bb42211 --- /dev/null +++ b/js/src/langchain.ts @@ -0,0 +1,48 @@ +/* eslint-disable import/no-extraneous-dependencies */ +import { CallbackManager } from "@langchain/core/callbacks/manager"; +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; +import { RunTree } from "./run_trees.js"; +import { Run } from "./schemas.js"; + +// TODO: move this to langchain/smith +export async function getLangchainCallbacks(runTree: RunTree) { + // TODO: CallbackManager.configure() is only async due to LangChainTracer + // creationg being async, which is unnecessary. + + const callbacks = await CallbackManager.configure(); + let langChainTracer = callbacks?.handlers.find( + (handler): handler is LangChainTracer => + handler?.name === "langchain_tracer" + ); + + if (!langChainTracer && runTree.tracingEnabled) { + langChainTracer = new LangChainTracer(); + callbacks?.addHandler(langChainTracer); + } + + const runMap = new Map(); + + const queue = [runTree]; + const visited = new Set(); + + while (queue.length > 0) { + const current = queue.shift(); + if (!current || visited.has(current.id)) continue; + visited.add(current.id); + + runMap.set(current.id, current); + if (current.child_runs) { + queue.push(...current.child_runs); + } + } + + if (callbacks != null) { + Object.assign(callbacks, { _parentRunId: runTree.id }); + } + + if (langChainTracer != null) { + Object.assign(langChainTracer, { runMap, client: runTree.client }); + } + + return callbacks; +} From 016904388b2b5f0a7a62bd03ff63a5129c6d3520 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 9 May 2024 21:25:54 +0200 Subject: [PATCH 049/373] Load parent run as well --- js/src/langchain.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index a0bb42211..74a45a6ae 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -34,6 +34,10 @@ export async function getLangchainCallbacks(runTree: RunTree) { if (current.child_runs) { queue.push(...current.child_runs); } + + if (current.parent_run) { + queue.push(current.parent_run); + } } if (callbacks != null) { From 74ad7de2ebbd32662b68ac63786010a75089c0ff Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 9 May 2024 21:55:27 +0200 Subject: [PATCH 050/373] Remove console.log --- js/src/run_trees.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 9a40e4d64..e9aa0d558 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -174,7 +174,6 @@ export class RunTree implements BaseRun { let client: Client | undefined; if (callbackManager) { const parentRunId = callbackManager?.getParentRunId?.() ?? ""; - console.log("obtaining parent run id", parentRunId) const langChainTracer = callbackManager?.handlers?.find( (handler: TracerLike) => handler?.name == "langchain_tracer" ) as LangChainTracerLike | undefined; From 9ed6a45d1ad914146ec62306f2cb8591ae64082d Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 9 May 2024 21:58:17 +0200 Subject: [PATCH 051/373] Move RunnableTraceable for testing purposes --- js/src/langchain.ts | 164 ++++++++++++++++++++++++++++++++- js/src/tests/traceable.test.ts | 153 +++++++++--------------------- 2 files changed, 208 insertions(+), 109 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 74a45a6ae..83ff2a174 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -1,8 +1,17 @@ /* eslint-disable import/no-extraneous-dependencies */ -import { CallbackManager } from "@langchain/core/callbacks/manager"; +import { + CallbackManager, + CallbackManagerForChainRun, +} from "@langchain/core/callbacks/manager"; import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; -import { RunTree } from "./run_trees.js"; +import { RunTree, RunTreeConfig } from "./run_trees.js"; import { Run } from "./schemas.js"; +import { + Runnable, + RunnableConfig, + getCallbackManagerForConfig, +} from "@langchain/core/runnables"; +import { TraceableFunction, isTraceableFunction } from "./traceable.js"; // TODO: move this to langchain/smith export async function getLangchainCallbacks(runTree: RunTree) { @@ -50,3 +59,154 @@ export async function getLangchainCallbacks(runTree: RunTree) { return callbacks; } + +type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>; + +class CallbackManagerRunTree extends RunTree { + callbackManager: CallbackManager; + + activeCallbackManager: CallbackManagerForChainRun | undefined = undefined; + + constructor(config: RunTreeConfig, callbackManager: CallbackManager) { + super(config); + + this.callbackManager = callbackManager; + } + + public createChild(config: RunTreeConfig): RunTree { + const child = new CallbackManagerRunTree( + { + ...config, + parent_run: this, + project_name: this.project_name, + client: this.client, + }, + this.activeCallbackManager?.getChild() ?? this.callbackManager + ); + this.child_runs.push(child); + return child as RunTree; + } + + async postRun(): Promise { + // how it is translated in comparison to basic RunTree? + this.activeCallbackManager = await this.callbackManager.handleChainStart( + typeof this.serialized !== "object" && + this.serialized != null && + "lc" in this.serialized + ? this.serialized + : { + id: ["langchain", "smith", "CallbackManagerRunTree"], + lc: 1, + type: "not_implemented", + }, + this.inputs, + this.id, + this.run_type, + undefined, + undefined, + this.name + ); + } + + async patchRun(): Promise { + if (this.error) { + await this.activeCallbackManager?.handleChainError( + this.error, + this.id, + this.parent_run?.id, + undefined, + undefined + ); + } else { + await this.activeCallbackManager?.handleChainEnd( + this.outputs ?? {}, + this.id, + this.parent_run?.id, + undefined, + undefined + ); + } + } +} + +export class RunnableTraceable extends Runnable< + RunInput, + RunOutput +> { + lc_serializable = false; + + lc_namespace = ["langchain_core", "runnables"]; + + protected func: AnyTraceableFunction; + + constructor(fields: { func: AnyTraceableFunction }) { + super(fields); + + if (!isTraceableFunction(fields.func)) { + throw new Error( + "RunnableTraceable requires a function that is wrapped in traceable higher-order function" + ); + } + + this.func = fields.func; + } + + async invoke(input: RunInput, options?: Partial) { + const [config] = this._getOptionsList(options ?? {}, 1); + const callbackManager = await getCallbackManagerForConfig(config); + + const partialConfig = + "langsmith:traceable" in this.func + ? (this.func["langsmith:traceable"] as RunTreeConfig) + : { name: "" }; + + if (!callbackManager) throw new Error("CallbackManager not found"); + + const tracer = callbackManager?.handlers.find( + (handler): handler is LangChainTracer => + handler?.name === "langchain_tracer" + ); + + const runTree = new CallbackManagerRunTree( + { + ...partialConfig, + parent_run: callbackManager?._parentRunId + ? new RunTree({ name: "", id: callbackManager?._parentRunId }) + : undefined, + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore mismatched client version + client: tracer?.client, + }, + callbackManager + ); + + if ( + typeof input === "object" && + input != null && + Object.keys(input).length === 1 + ) { + if ("args" in input && Array.isArray(input)) { + return (await this.func(runTree, ...input)) as RunOutput; + } + + if ( + "input" in input && + !( + typeof input === "object" && + input != null && + !Array.isArray(input) && + // eslint-disable-next-line no-instanceof/no-instanceof + !(input instanceof Date) + ) + ) { + try { + return (await this.func(runTree, input.input)) as RunOutput; + } catch (err) { + return (await this.func(runTree, input)) as RunOutput; + } + } + } + + return (await this.func(runTree, input)) as RunOutput; + } +} diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 9b39411b3..4b03b7c47 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -6,9 +6,8 @@ import { FakeChatModel } from "@langchain/core/utils/testing"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; -import { getLangchainCallbacks } from "../langchain.js"; +import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; import { BaseMessage, HumanMessage } from "@langchain/core/messages"; -import { RunnableConfig, RunnableLambda } from "@langchain/core/runnables"; import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; test("basic traceable implementation", async () => { @@ -726,15 +725,7 @@ describe("langchain", () => { const chain = prompt .pipe(llm) - .pipe( - new RunnableLambda({ - func: (message: BaseMessage, config?: RunnableConfig) => - addValueTraceable( - config ?? { callbacks: [] }, - message as HumanMessage - ), - }) - ) + .pipe(new RunnableTraceable({ func: addValueTraceable })) .pipe(parser); // eslint-disable-next-line @typescript-eslint/ban-ts-comment @@ -754,64 +745,18 @@ describe("langchain", () => { "RunnableSequence:0", "ChatPromptTemplate:1", "FakeChatModel:2", - "RunnableLambda:3", + "add_negligible_value:3", "StrOutputParser:4", ], edges: [ ["RunnableSequence:0", "ChatPromptTemplate:1"], ["RunnableSequence:0", "FakeChatModel:2"], - ["RunnableSequence:0", "RunnableLambda:3"], + ["RunnableSequence:0", "add_negligible_value:3"], ["RunnableSequence:0", "StrOutputParser:4"], ], }); }); - test("explicit simple nested", async () => { - const { client, callSpy } = mockClient(); - - const wrappedModel = traceable( - async (value: { input: string }) => `Wrapped input: ${value.input}`, - { name: "wrappedModel" } - ); - - const lambda = new RunnableLambda({ - func: async (value: { input: string }, config?: RunnableConfig) => { - return await wrappedModel(config ?? { callbacks: [] }, value); - }, - }).withConfig({ runName: "lambda" }); - - const main = traceable( - async () => { - const runTree = getCurrentRunTree(); - const callbacks = await getLangchainCallbacks(runTree); - - return { - response: [ - await lambda.invoke({ input: "Are you ready?" }, { callbacks }), - ], - }; - }, - { name: "main", client, tracingEnabled: true } - ); - - const result = await main(); - await awaitAllCallbacks(); - - expect(result).toEqual({ - response: ["Wrapped input: Are you ready?"], - }); - - console.dir(callSpy.mock.calls, { depth: null }); - - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: ["main:0", "lambda:1", "wrappedModel:2"], - edges: [ - ["main:0", "lambda:1"], - ["lambda:1", "wrappedModel:2"], - ], - }); - }); - test("explicit nested", async () => { const { client, callSpy } = mockClient(); @@ -820,40 +765,38 @@ describe("langchain", () => { ["human", "{text}"], ]); const parser = new StringOutputParser(); - const model = prompt + const chain = prompt .pipe(llm) .pipe(parser) - .withConfig({ runName: "model" }); - - const wrappedModel = traceable( - async (value: { input: string }) => { - const runTree = getCurrentRunTree(); - const callbacks = await getLangchainCallbacks(runTree); - return model.invoke( - { text: `Wrapped input: ${value.input}` }, - { callbacks } - ); - }, - { name: "wrappedModel" } - ); - - const lambda = new RunnableLambda({ - func: async (value: { input: string }, config?: RunnableConfig) => { - console.log("lambda", config?.callbacks); - return await wrappedModel(config ?? { callbacks: [] }, value); - }, - }).withConfig({ runName: "lambda" }); + .withConfig({ runName: "chain" }); + + const wrappedModel = new RunnableTraceable({ + func: traceable( + async (value: { input: string }) => { + const runTree = getCurrentRunTree(); + const callbacks = await getLangchainCallbacks(runTree); + + return chain.invoke( + { text: `Wrapped input: ${value.input}` }, + { callbacks } + ); + }, + { name: "wrappedModel" } + ), + }); const main = traceable( async () => { const runTree = getCurrentRunTree(); - console.log("main, runTree.id", runTree.id); const callbacks = await getLangchainCallbacks(runTree); return { response: [ - await lambda.invoke({ input: "Are you ready?" }, { callbacks }), - await lambda.invoke( + await wrappedModel.invoke( + { input: "Are you ready?" }, + { callbacks } + ), + await wrappedModel.invoke( { input: "I said, Are. You. Ready?" }, { callbacks } ), @@ -876,32 +819,28 @@ describe("langchain", () => { expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ nodes: [ "main:0", - "lambda:1", - "wrappedModel:2", - "model:3", - "ChatPromptTemplate:4", - "FakeChatModel:5", - "StrOutputParser:6", - "lambda:7", - "wrappedModel:8", - "model:9", - "ChatPromptTemplate:10", - "FakeChatModel:11", - "StrOutputParser:12", + "wrappedModel:1", + "chain:2", + "ChatPromptTemplate:3", + "FakeChatModel:4", + "StrOutputParser:5", + "wrappedModel:6", + "chain:7", + "ChatPromptTemplate:8", + "FakeChatModel:9", + "StrOutputParser:10", ], edges: [ - ["main:0", "lambda:1"], - ["lambda:1", "wrappedModel:2"], - ["wrappedModel:2", "model:3"], - ["model:3", "ChatPromptTemplate:4"], - ["model:3", "FakeChatModel:5"], - ["model:3", "StrOutputParser:6"], - ["main:0", "lambda:7"], - ["lambda:7", "wrappedModel:8"], - ["wrappedModel:8", "model:9"], - ["model:9", "ChatPromptTemplate:10"], - ["model:9", "FakeChatModel:11"], - ["model:9", "StrOutputParser:12"], + ["main:0", "wrappedModel:1"], + ["wrappedModel:1", "chain:2"], + ["chain:2", "ChatPromptTemplate:3"], + ["chain:2", "FakeChatModel:4"], + ["chain:2", "StrOutputParser:5"], + ["main:0", "wrappedModel:6"], + ["wrappedModel:6", "chain:7"], + ["chain:7", "ChatPromptTemplate:8"], + ["chain:7", "FakeChatModel:9"], + ["chain:7", "StrOutputParser:10"], ], }); }); From 012035e86fe70ae666b797df54bde0ba5f12b96e Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 9 May 2024 23:46:02 +0200 Subject: [PATCH 052/373] Infer tracingEnabled --- js/src/langchain.ts | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 83ff2a174..43b1164c1 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -18,7 +18,11 @@ export async function getLangchainCallbacks(runTree: RunTree) { // TODO: CallbackManager.configure() is only async due to LangChainTracer // creationg being async, which is unnecessary. - const callbacks = await CallbackManager.configure(); + let callbacks = await CallbackManager.configure(); + if (!callbacks && runTree.tracingEnabled) { + callbacks = new CallbackManager(); + } + let langChainTracer = callbacks?.handlers.find( (handler): handler is LangChainTracer => handler?.name === "langchain_tracer" @@ -169,9 +173,14 @@ export class RunnableTraceable extends Runnable< const runTree = new CallbackManagerRunTree( { + tracingEnabled: !!tracer, ...partialConfig, parent_run: callbackManager?._parentRunId - ? new RunTree({ name: "", id: callbackManager?._parentRunId }) + ? new RunTree({ + name: "", + id: callbackManager?._parentRunId, + tracingEnabled: !!tracer, + }) : undefined, // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore mismatched client version From cf2f022766f93d1404070370d48013547e554391 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 9 May 2024 23:47:39 +0200 Subject: [PATCH 053/373] Link nested langsmith found in @langchain/core --- js/package.json | 3 +++ js/yarn.lock | 15 ++++++--------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/js/package.json b/js/package.json index dc4f02c00..8a25469b0 100644 --- a/js/package.json +++ b/js/package.json @@ -116,6 +116,9 @@ "optional": true } }, + "resolutions": { + "langsmith": "link:." + }, "lint-staged": { "**/*.{ts,tsx}": [ "prettier --write --ignore-unknown", diff --git a/js/yarn.lock b/js/yarn.lock index 4968faf24..721bf9f6e 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -3543,16 +3543,13 @@ kleur@^3.0.3: resolved "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== +"langsmith@link:.": + version "0.0.0" + uid "" + langsmith@~0.1.1: - version "0.1.3" - resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.3.tgz#b086c5dd0709c41da417bc8b672e8f4a03e80809" - integrity sha512-kQMS3QySeU0Qt9A71d9trUXbeKn33HfxpRc7hRjSB967zcdTAngh66NcqYqBflD3nOL4FK6LKmvfb3vbNDEoPg== - dependencies: - "@types/uuid" "^9.0.1" - commander "^10.0.1" - p-queue "^6.6.2" - p-retry "4" - uuid "^9.0.0" + version "0.0.0" + uid "" leven@^3.1.0: version "3.1.0" From 88feb02e8cda2af4705311134c901ee0bbcfb24e Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 01:31:49 +0200 Subject: [PATCH 054/373] Use fromRunnableConfig, fix invalid parent run --- js/src/langchain.ts | 128 +++++---------------------------- js/src/run_trees.ts | 40 ++++++----- js/src/tests/traceable.test.ts | 2 - js/src/traceable.ts | 2 +- 4 files changed, 41 insertions(+), 131 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 43b1164c1..4cb5dcd02 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -1,10 +1,7 @@ /* eslint-disable import/no-extraneous-dependencies */ -import { - CallbackManager, - CallbackManagerForChainRun, -} from "@langchain/core/callbacks/manager"; +import { CallbackManager } from "@langchain/core/callbacks/manager"; import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; -import { RunTree, RunTreeConfig } from "./run_trees.js"; +import { RunTree } from "./run_trees.js"; import { Run } from "./schemas.js"; import { Runnable, @@ -35,7 +32,16 @@ export async function getLangchainCallbacks(runTree: RunTree) { const runMap = new Map(); - const queue = [runTree]; + // find upward root run + let rootRun = runTree; + const rootVisited = new Set(); + while (rootRun.parent_run) { + if (rootVisited.has(rootRun.id)) break; + rootVisited.add(rootRun.id); + rootRun = rootRun.parent_run; + } + + const queue = [rootRun]; const visited = new Set(); while (queue.length > 0) { @@ -47,10 +53,6 @@ export async function getLangchainCallbacks(runTree: RunTree) { if (current.child_runs) { queue.push(...current.child_runs); } - - if (current.parent_run) { - queue.push(current.parent_run); - } } if (callbacks != null) { @@ -66,73 +68,6 @@ export async function getLangchainCallbacks(runTree: RunTree) { type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>; -class CallbackManagerRunTree extends RunTree { - callbackManager: CallbackManager; - - activeCallbackManager: CallbackManagerForChainRun | undefined = undefined; - - constructor(config: RunTreeConfig, callbackManager: CallbackManager) { - super(config); - - this.callbackManager = callbackManager; - } - - public createChild(config: RunTreeConfig): RunTree { - const child = new CallbackManagerRunTree( - { - ...config, - parent_run: this, - project_name: this.project_name, - client: this.client, - }, - this.activeCallbackManager?.getChild() ?? this.callbackManager - ); - this.child_runs.push(child); - return child as RunTree; - } - - async postRun(): Promise { - // how it is translated in comparison to basic RunTree? - this.activeCallbackManager = await this.callbackManager.handleChainStart( - typeof this.serialized !== "object" && - this.serialized != null && - "lc" in this.serialized - ? this.serialized - : { - id: ["langchain", "smith", "CallbackManagerRunTree"], - lc: 1, - type: "not_implemented", - }, - this.inputs, - this.id, - this.run_type, - undefined, - undefined, - this.name - ); - } - - async patchRun(): Promise { - if (this.error) { - await this.activeCallbackManager?.handleChainError( - this.error, - this.id, - this.parent_run?.id, - undefined, - undefined - ); - } else { - await this.activeCallbackManager?.handleChainEnd( - this.outputs ?? {}, - this.id, - this.parent_run?.id, - undefined, - undefined - ); - } - } -} - export class RunnableTraceable extends Runnable< RunInput, RunOutput @@ -157,37 +92,6 @@ export class RunnableTraceable extends Runnable< async invoke(input: RunInput, options?: Partial) { const [config] = this._getOptionsList(options ?? {}, 1); - const callbackManager = await getCallbackManagerForConfig(config); - - const partialConfig = - "langsmith:traceable" in this.func - ? (this.func["langsmith:traceable"] as RunTreeConfig) - : { name: "" }; - - if (!callbackManager) throw new Error("CallbackManager not found"); - - const tracer = callbackManager?.handlers.find( - (handler): handler is LangChainTracer => - handler?.name === "langchain_tracer" - ); - - const runTree = new CallbackManagerRunTree( - { - tracingEnabled: !!tracer, - ...partialConfig, - parent_run: callbackManager?._parentRunId - ? new RunTree({ - name: "", - id: callbackManager?._parentRunId, - tracingEnabled: !!tracer, - }) - : undefined, - // eslint-disable-next-line @typescript-eslint/ban-ts-comment - // @ts-ignore mismatched client version - client: tracer?.client, - }, - callbackManager - ); if ( typeof input === "object" && @@ -195,7 +99,7 @@ export class RunnableTraceable extends Runnable< Object.keys(input).length === 1 ) { if ("args" in input && Array.isArray(input)) { - return (await this.func(runTree, ...input)) as RunOutput; + return (await this.func(config, ...input)) as RunOutput; } if ( @@ -209,13 +113,13 @@ export class RunnableTraceable extends Runnable< ) ) { try { - return (await this.func(runTree, input.input)) as RunOutput; + return (await this.func(config, input.input)) as RunOutput; } catch (err) { - return (await this.func(runTree, input)) as RunOutput; + return (await this.func(config, input)) as RunOutput; } } } - return (await this.func(runTree, input)) as RunOutput; + return (await this.func(config, input)) as RunOutput; } } diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index e9aa0d558..eb07870ce 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -158,7 +158,7 @@ export class RunTree implements BaseRun { } static fromRunnableConfig( - config: RunnableConfigLike, + parentConfig: RunnableConfigLike, props: { name: string; tags?: string[]; @@ -166,39 +166,47 @@ export class RunTree implements BaseRun { } ): RunTree { // We only handle the callback manager case for now - const callbackManager = config?.callbacks as + const callbackManager = parentConfig?.callbacks as | CallbackManagerLike | undefined; let parentRun: RunTree | undefined; let projectName: string | undefined; let client: Client | undefined; + if (callbackManager) { const parentRunId = callbackManager?.getParentRunId?.() ?? ""; const langChainTracer = callbackManager?.handlers?.find( (handler: TracerLike) => handler?.name == "langchain_tracer" ) as LangChainTracerLike | undefined; + parentRun = langChainTracer?.getRun?.(parentRunId); projectName = langChainTracer?.projectName; client = langChainTracer?.client; } - const dedupedTags = [ - ...new Set((parentRun?.tags ?? []).concat(config?.tags ?? [])), - ]; - const dedupedMetadata = { - ...parentRun?.extra?.metadata, - ...config?.metadata, - }; - const rt = new RunTree({ - name: props?.name ?? "", - parent_run: parentRun, - tags: dedupedTags, + + const parentRunTree = new RunTree({ + name: parentRun?.name ?? "", + id: parentRun?.id, client, + // TODO: handle tracing enabled + tracingEnabled: true, + project_name: projectName, + tags: [ + ...new Set((parentRun?.tags ?? []).concat(parentConfig?.tags ?? [])), + ], extra: { - metadata: dedupedMetadata, + metadata: { + ...parentRun?.extra?.metadata, + ...parentConfig?.metadata, + }, }, - project_name: projectName, }); - return rt; + + return parentRunTree.createChild({ + name: props?.name ?? "", + tags: props.tags, + metadata: props.metadata, + }); } private static getDefaultConfig(): object { diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 4b03b7c47..d4820c62f 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -844,8 +844,6 @@ describe("langchain", () => { ], }); }); - - test.skip("make sure disabled callback state work correctly", () => {}); }); describe("generator", () => { diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 6da9f9650..1d01217cb 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -486,7 +486,7 @@ export function traceable any>( ]; } - // legacy CallbackManagerRunTree used in runOnDataset + // deprecated: legacy CallbackManagerRunTree used in runOnDataset // override ALS and do not pass-through the run tree if ( isRunTree(firstArg) && From ee7fa33bf8e0d3ec3fc64f5f3bc9a012f84cef56 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 01:47:05 +0200 Subject: [PATCH 055/373] Cleanup --- js/src/langchain.ts | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 4cb5dcd02..0c7f9cc60 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -3,18 +3,12 @@ import { CallbackManager } from "@langchain/core/callbacks/manager"; import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; import { RunTree } from "./run_trees.js"; import { Run } from "./schemas.js"; -import { - Runnable, - RunnableConfig, - getCallbackManagerForConfig, -} from "@langchain/core/runnables"; +import { Runnable, RunnableConfig } from "@langchain/core/runnables"; import { TraceableFunction, isTraceableFunction } from "./traceable.js"; -// TODO: move this to langchain/smith export async function getLangchainCallbacks(runTree: RunTree) { // TODO: CallbackManager.configure() is only async due to LangChainTracer - // creationg being async, which is unnecessary. - + // factory being unnecessarily async. let callbacks = await CallbackManager.configure(); if (!callbacks && runTree.tracingEnabled) { callbacks = new CallbackManager(); @@ -66,6 +60,7 @@ export async function getLangchainCallbacks(runTree: RunTree) { return callbacks; } +// eslint-disable-next-line @typescript-eslint/no-explicit-any type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>; export class RunnableTraceable extends Runnable< @@ -93,6 +88,8 @@ export class RunnableTraceable extends Runnable< async invoke(input: RunInput, options?: Partial) { const [config] = this._getOptionsList(options ?? {}, 1); + // TODO: move this code to the runOnDataset / evaluate function instead? + // seems a bit too magical to be here if ( typeof input === "object" && input != null && From c2f5d3dac76c7498e9408ef4c44f98599309a8c4 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 18:42:31 +0200 Subject: [PATCH 056/373] Inline getCurrentRunTree --- js/src/langchain.ts | 11 +++++++++-- js/src/tests/traceable.test.ts | 18 ++++++------------ 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 0c7f9cc60..cfd746b1c 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -4,9 +4,16 @@ import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; import { RunTree } from "./run_trees.js"; import { Run } from "./schemas.js"; import { Runnable, RunnableConfig } from "@langchain/core/runnables"; -import { TraceableFunction, isTraceableFunction } from "./traceable.js"; +import { + TraceableFunction, + getCurrentRunTree, + isTraceableFunction, +} from "./traceable.js"; + +export async function getLangchainCallbacks() { + const runTree: RunTree | undefined = getCurrentRunTree(); + if (!runTree) return undefined; -export async function getLangchainCallbacks(runTree: RunTree) { // TODO: CallbackManager.configure() is only async due to LangChainTracer // factory being unnecessarily async. let callbacks = await CallbackManager.configure(); diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index d4820c62f..fd879b6f6 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -665,11 +665,9 @@ describe("langchain", () => { const main = traceable( async (input: { text: string }) => { - const runTree = getCurrentRunTree(); - const callbacks = await getLangchainCallbacks(runTree); - - const response = await chain.invoke(input, { callbacks }); - return response; + return chain.invoke(input, { + callbacks: await getLangchainCallbacks(), + }); }, { name: "main", @@ -773,8 +771,7 @@ describe("langchain", () => { const wrappedModel = new RunnableTraceable({ func: traceable( async (value: { input: string }) => { - const runTree = getCurrentRunTree(); - const callbacks = await getLangchainCallbacks(runTree); + const callbacks = await getLangchainCallbacks(); return chain.invoke( { text: `Wrapped input: ${value.input}` }, @@ -787,18 +784,15 @@ describe("langchain", () => { const main = traceable( async () => { - const runTree = getCurrentRunTree(); - const callbacks = await getLangchainCallbacks(runTree); - return { response: [ await wrappedModel.invoke( { input: "Are you ready?" }, - { callbacks } + { callbacks: await getLangchainCallbacks() } ), await wrappedModel.invoke( { input: "I said, Are. You. Ready?" }, - { callbacks } + { callbacks: await getLangchainCallbacks() } ), ], }; From 2efbc8f1a9da1fe2c129e6d0c6226b3b676d5314 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 10 May 2024 21:13:20 +0200 Subject: [PATCH 057/373] Fix lint --- js/src/tests/traceable.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index fd879b6f6..84b74e0bb 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -1,5 +1,5 @@ import type { RunTree, RunTreeConfig } from "../run_trees.js"; -import { ROOT, getCurrentRunTree, traceable } from "../traceable.js"; +import { ROOT, traceable } from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; import { FakeChatModel } from "@langchain/core/utils/testing"; From 3e08b0d946ecbddff6a46b429d8699272362559d Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 13 May 2024 04:06:51 +0200 Subject: [PATCH 058/373] Split env detection to separate file --- js/src/env.ts | 16 ++++++++++++++++ js/src/traceable.ts | 20 ++------------------ 2 files changed, 18 insertions(+), 18 deletions(-) create mode 100644 js/src/env.ts diff --git a/js/src/env.ts b/js/src/env.ts new file mode 100644 index 000000000..ee7a233d3 --- /dev/null +++ b/js/src/env.ts @@ -0,0 +1,16 @@ +import { getEnvironmentVariable } from "./utils/env.js"; + +export const isTracingEnabled = (tracingEnabled?: boolean): boolean => { + if (tracingEnabled !== undefined) { + return tracingEnabled; + } + const envVars = [ + "LANGSMITH_TRACING_V2", + "LANGCHAIN_TRACING_V2", + "LANGSMITH_TRACING", + "LANGCHAIN_TRACING", + ]; + return Boolean( + envVars.find((envVar) => getEnvironmentVariable(envVar) === "true") + ); +}; diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 1d01217cb..7448b368f 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -8,7 +8,7 @@ import { isRunnableConfigLike, } from "./run_trees.js"; import { InvocationParamsSchema, KVMap } from "./schemas.js"; -import { getEnvironmentVariable } from "./utils/env.js"; +import { isTracingEnabled } from "./env.js"; function isPromiseMethod( x: string | symbol @@ -128,21 +128,6 @@ const isReadableStream = (x: unknown): x is ReadableStream => "getReader" in x && typeof x.getReader === "function"; -const tracingIsEnabled = (tracingEnabled?: boolean): boolean => { - if (tracingEnabled !== undefined) { - return tracingEnabled; - } - const envVars = [ - "LANGSMITH_TRACING_V2", - "LANGCHAIN_TRACING_V2", - "LANGSMITH_TRACING", - "LANGCHAIN_TRACING", - ]; - return Boolean( - envVars.find((envVar) => getEnvironmentVariable(envVar) === "true") - ); -}; - const handleRunInputs = (rawInputs: unknown[]): KVMap => { const firstInput = rawInputs[0]; @@ -174,8 +159,7 @@ const getTracingRunTree = ( | ((...args: Args) => InvocationParamsSchema | undefined) | undefined ): RunTree | undefined => { - const tracingEnabled_ = tracingIsEnabled(runTree.tracingEnabled); - if (!tracingEnabled_) { + if (!isTracingEnabled(runTree.tracingEnabled)) { return undefined; } From 716cab37f2871631dc297e9d3e73e4b750e131bc Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 13 May 2024 04:20:43 +0200 Subject: [PATCH 059/373] Add optional @langchain/core dependency for backwards compatibility, expose LangChain handoff functions outside --- js/.gitignore | 4 ++++ js/package.json | 19 ++++++++++++++++++- js/scripts/create-entrypoints.js | 1 + js/src/langchain.ts | 26 ++++++++++++++++++++++---- js/src/tests/traceable.test.ts | 4 ++-- js/tsconfig.json | 1 + 6 files changed, 48 insertions(+), 7 deletions(-) diff --git a/js/.gitignore b/js/.gitignore index bf7ee31ef..6b720735c 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -51,6 +51,10 @@ Chinook_Sqlite.sql /schemas.js /schemas.d.ts /schemas.d.cts +/langchain.cjs +/langchain.js +/langchain.d.ts +/langchain.d.cts /wrappers.cjs /wrappers.js /wrappers.d.ts diff --git a/js/package.json b/js/package.json index 8a25469b0..83c861c6f 100644 --- a/js/package.json +++ b/js/package.json @@ -25,6 +25,10 @@ "schemas.js", "schemas.d.ts", "schemas.d.cts", + "langchain.cjs", + "langchain.js", + "langchain.d.ts", + "langchain.d.cts", "wrappers.cjs", "wrappers.js", "wrappers.d.ts", @@ -109,11 +113,15 @@ "typescript": "^5.4.5" }, "peerDependencies": { - "openai": "*" + "openai": "*", + "@langchain/core": "*" }, "peerDependenciesMeta": { "openai": { "optional": true + }, + "@langchain/core": { + "optional": true } }, "resolutions": { @@ -180,6 +188,15 @@ "import": "./schemas.js", "require": "./schemas.cjs" }, + "./langchain": { + "types": { + "import": "./langchain.d.ts", + "require": "./langchain.d.cts", + "default": "./langchain.d.ts" + }, + "import": "./langchain.js", + "require": "./langchain.cjs" + }, "./wrappers": { "types": { "import": "./wrappers.d.ts", diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index 3f6307303..1605188f1 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -12,6 +12,7 @@ const entrypoints = { traceable: "traceable", evaluation: "evaluation/index", schemas: "schemas", + langchain: "langchain", wrappers: "wrappers/index", "wrappers/openai": "wrappers/openai", }; diff --git a/js/src/langchain.ts b/js/src/langchain.ts index cfd746b1c..3168b79c3 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -1,17 +1,27 @@ -/* eslint-disable import/no-extraneous-dependencies */ import { CallbackManager } from "@langchain/core/callbacks/manager"; import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; +import { Runnable, RunnableConfig } from "@langchain/core/runnables"; + import { RunTree } from "./run_trees.js"; import { Run } from "./schemas.js"; -import { Runnable, RunnableConfig } from "@langchain/core/runnables"; import { TraceableFunction, getCurrentRunTree, isTraceableFunction, } from "./traceable.js"; -export async function getLangchainCallbacks() { - const runTree: RunTree | undefined = getCurrentRunTree(); +/** + * Converts the current run tree active within a traceable-wrapped function + * into a LangChain compatible callback manager. This is useful to handoff tracing + * from LangSmith to LangChain Runnables and LLMs. + * + * @param {RunTree | undefined} currentRunTree Current RunTree from within a traceable-wrapped function. If not provided, the current run tree will be inferred from AsyncLocalStorage. + * @returns {CallbackManager | undefined} Callback manager used by LangChain Runnable objects. + */ +export async function getLangchainCallbacks( + currentRunTree?: RunTree | undefined +) { + const runTree: RunTree | undefined = currentRunTree ?? getCurrentRunTree(); if (!runTree) return undefined; // TODO: CallbackManager.configure() is only async due to LangChainTracer @@ -70,6 +80,10 @@ export async function getLangchainCallbacks() { // eslint-disable-next-line @typescript-eslint/no-explicit-any type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>; +/** + * RunnableTraceable is a Runnable that wraps a traceable function. + * This allows adding Langsmith traced functions into LangChain sequences. + */ export class RunnableTraceable extends Runnable< RunInput, RunOutput @@ -126,4 +140,8 @@ export class RunnableTraceable extends Runnable< return (await this.func(config, input)) as RunOutput; } + + static from(func: AnyTraceableFunction) { + return new RunnableTraceable({ func }); + } } diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 84b74e0bb..d9912756a 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -6,9 +6,9 @@ import { FakeChatModel } from "@langchain/core/utils/testing"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; -import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; import { BaseMessage, HumanMessage } from "@langchain/core/messages"; import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; +import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; test("basic traceable implementation", async () => { const { client, callSpy } = mockClient(); @@ -723,7 +723,7 @@ describe("langchain", () => { const chain = prompt .pipe(llm) - .pipe(new RunnableTraceable({ func: addValueTraceable })) + .pipe(RunnableTraceable.from(addValueTraceable)) .pipe(parser); // eslint-disable-next-line @typescript-eslint/ban-ts-comment diff --git a/js/tsconfig.json b/js/tsconfig.json index 5beb3ad27..02278dc07 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -37,6 +37,7 @@ "src/traceable.ts", "src/evaluation/index.ts", "src/schemas.ts", + "src/langchain.ts", "src/wrappers/index.ts", "src/wrappers/openai.ts" ] From c1e438123b6eafa15358b4a4c300f9692bd58e75 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 13 May 2024 18:04:28 +0200 Subject: [PATCH 060/373] Make sure we pass project name and example ID --- js/src/langchain.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 3168b79c3..1427fbe83 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -71,7 +71,12 @@ export async function getLangchainCallbacks( } if (langChainTracer != null) { - Object.assign(langChainTracer, { runMap, client: runTree.client }); + Object.assign(langChainTracer, { + runMap, + client: runTree.client, + projectName: runTree.project_name || langChainTracer.projectName, + exampleId: runTree.reference_example_id || langChainTracer.exampleId, + }); } return callbacks; From dab84690b188c1854ddfd968fcdbaf4787cde75c Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 23:40:36 +0200 Subject: [PATCH 061/373] Remove the output parsing stuff --- js/src/langchain.ts | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 1427fbe83..505e05424 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -113,36 +113,6 @@ export class RunnableTraceable extends Runnable< async invoke(input: RunInput, options?: Partial) { const [config] = this._getOptionsList(options ?? {}, 1); - - // TODO: move this code to the runOnDataset / evaluate function instead? - // seems a bit too magical to be here - if ( - typeof input === "object" && - input != null && - Object.keys(input).length === 1 - ) { - if ("args" in input && Array.isArray(input)) { - return (await this.func(config, ...input)) as RunOutput; - } - - if ( - "input" in input && - !( - typeof input === "object" && - input != null && - !Array.isArray(input) && - // eslint-disable-next-line no-instanceof/no-instanceof - !(input instanceof Date) - ) - ) { - try { - return (await this.func(config, input.input)) as RunOutput; - } catch (err) { - return (await this.func(config, input)) as RunOutput; - } - } - } - return (await this.func(config, input)) as RunOutput; } From 5b1db1263cb842285f2c173f7c8e37ae443a3a7a Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 14 May 2024 23:57:23 +0200 Subject: [PATCH 062/373] Split context in a separate entrypoint --- js/.gitignore | 4 ++++ js/package.json | 13 ++++++++++ js/scripts/create-entrypoints.js | 1 + js/src/traceable.ts | 25 ++++--------------- js/src/traceable/context.ts | 41 ++++++++++++++++++++++++++++++++ js/tsconfig.json | 3 ++- 6 files changed, 66 insertions(+), 21 deletions(-) create mode 100644 js/src/traceable/context.ts diff --git a/js/.gitignore b/js/.gitignore index 6b720735c..99a36b823 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -63,6 +63,10 @@ Chinook_Sqlite.sql /wrappers/openai.js /wrappers/openai.d.ts /wrappers/openai.d.cts +/traceable/context.cjs +/traceable/context.js +/traceable/context.d.ts +/traceable/context.d.cts /index.cjs /index.js /index.d.ts diff --git a/js/package.json b/js/package.json index 83c861c6f..b803a29ea 100644 --- a/js/package.json +++ b/js/package.json @@ -37,6 +37,10 @@ "wrappers/openai.js", "wrappers/openai.d.ts", "wrappers/openai.d.cts", + "traceable/context.cjs", + "traceable/context.js", + "traceable/context.d.ts", + "traceable/context.d.cts", "index.cjs", "index.js", "index.d.ts", @@ -215,6 +219,15 @@ "import": "./wrappers/openai.js", "require": "./wrappers/openai.cjs" }, + "./traceable/context": { + "types": { + "import": "./traceable/context.d.ts", + "require": "./traceable/context.d.cts", + "default": "./traceable/context.d.ts" + }, + "import": "./traceable/context.js", + "require": "./traceable/context.cjs" + }, "./package.json": "./package.json" } } \ No newline at end of file diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index 1605188f1..0374336da 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -15,6 +15,7 @@ const entrypoints = { langchain: "langchain", wrappers: "wrappers/index", "wrappers/openai": "wrappers/openai", + "traceable/context": "traceable/context", }; const updateJsonFile = (relativePath, updateFunction) => { const contents = fs.readFileSync(relativePath).toString(); diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 7448b368f..c06e3ced5 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -9,6 +9,7 @@ import { } from "./run_trees.js"; import { InvocationParamsSchema, KVMap } from "./schemas.js"; import { isTracingEnabled } from "./env.js"; +import { TraceableLocalStorageContext } from "./traceable/context.js"; function isPromiseMethod( x: string | symbol @@ -19,7 +20,9 @@ function isPromiseMethod( return false; } -const asyncLocalStorage = new AsyncLocalStorage(); +const asyncLocalStorage = TraceableLocalStorageContext.register( + new AsyncLocalStorage() +); export const ROOT = Symbol("langsmith:traceable:root"); @@ -677,25 +680,7 @@ export function traceable any>( return traceableFunc as TraceableFunction; } -/** - * Return the current run tree from within a traceable-wrapped function. - * Will throw an error if called outside of a traceable function. - * - * @returns The run tree for the given context. - */ -export function getCurrentRunTree(): RunTree { - const runTree = asyncLocalStorage.getStore(); - if (runTree === undefined) { - throw new Error( - [ - "Could not get the current run tree.", - "", - "Please make sure you are calling this method within a traceable function.", - ].join("\n") - ); - } - return runTree; -} +export { getCurrentRunTree } from "./traceable/context.js"; export function isTraceableFunction( x: unknown diff --git a/js/src/traceable/context.ts b/js/src/traceable/context.ts new file mode 100644 index 000000000..c646f3cef --- /dev/null +++ b/js/src/traceable/context.ts @@ -0,0 +1,41 @@ +import { type AsyncLocalStorage } from "node:async_hooks"; +import { RunTree } from "../run_trees.js"; + +export const TraceableLocalStorageContext = (() => { + let storage: AsyncLocalStorage; + + return { + register: (value: AsyncLocalStorage) => { + storage ??= value; + return storage; + }, + get storage() { + return storage; + }, + }; +})(); + +/** + * Return the current run tree from within a traceable-wrapped function. + * Will throw an error if called outside of a traceable function. + * + * @returns The run tree for the given context. + */ +export const getCurrentRunTree = () => { + if (!TraceableLocalStorageContext.storage) { + throw new Error("Could not find the traceable storage context"); + } + + const runTree = TraceableLocalStorageContext.storage.getStore(); + if (runTree === undefined) { + throw new Error( + [ + "Could not get the current run tree.", + "", + "Please make sure you are calling this method within a traceable function.", + ].join("\n") + ); + } + + return runTree; +}; diff --git a/js/tsconfig.json b/js/tsconfig.json index 02278dc07..973744e15 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -39,7 +39,8 @@ "src/schemas.ts", "src/langchain.ts", "src/wrappers/index.ts", - "src/wrappers/openai.ts" + "src/wrappers/openai.ts", + "src/traceable/context.ts" ] } } From 04e63986928128149e4439ba626033e2545ebadc Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 00:29:27 +0200 Subject: [PATCH 063/373] Remove type dependency on AsyncLocalStorage --- js/src/traceable/context.ts | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/js/src/traceable/context.ts b/js/src/traceable/context.ts index c646f3cef..f622da37e 100644 --- a/js/src/traceable/context.ts +++ b/js/src/traceable/context.ts @@ -1,11 +1,16 @@ -import { type AsyncLocalStorage } from "node:async_hooks"; import { RunTree } from "../run_trees.js"; +interface AsyncStorageLike { + getStore: () => RunTree | undefined; + + run: (context: RunTree | undefined, fn: () => void) => void; +} + export const TraceableLocalStorageContext = (() => { - let storage: AsyncLocalStorage; + let storage: AsyncStorageLike; return { - register: (value: AsyncLocalStorage) => { + register: (value: AsyncStorageLike) => { storage ??= value; return storage; }, From 06b1799a6411b008d00a1591ba1b2660039bbb62 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 00:33:51 +0200 Subject: [PATCH 064/373] use singletons instead --- js/.gitignore | 8 ++++---- js/package.json | 20 +++++++++---------- js/scripts/create-entrypoints.js | 2 +- .../context.ts => singletons/traceable.ts} | 0 js/src/traceable.ts | 4 ++-- js/tsconfig.json | 2 +- 6 files changed, 18 insertions(+), 18 deletions(-) rename js/src/{traceable/context.ts => singletons/traceable.ts} (100%) diff --git a/js/.gitignore b/js/.gitignore index 99a36b823..ae5b5a591 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -63,10 +63,10 @@ Chinook_Sqlite.sql /wrappers/openai.js /wrappers/openai.d.ts /wrappers/openai.d.cts -/traceable/context.cjs -/traceable/context.js -/traceable/context.d.ts -/traceable/context.d.cts +/singletons/traceable.cjs +/singletons/traceable.js +/singletons/traceable.d.ts +/singletons/traceable.d.cts /index.cjs /index.js /index.d.ts diff --git a/js/package.json b/js/package.json index b803a29ea..606a1ca9c 100644 --- a/js/package.json +++ b/js/package.json @@ -37,10 +37,10 @@ "wrappers/openai.js", "wrappers/openai.d.ts", "wrappers/openai.d.cts", - "traceable/context.cjs", - "traceable/context.js", - "traceable/context.d.ts", - "traceable/context.d.cts", + "singletons/traceable.cjs", + "singletons/traceable.js", + "singletons/traceable.d.ts", + "singletons/traceable.d.cts", "index.cjs", "index.js", "index.d.ts", @@ -219,14 +219,14 @@ "import": "./wrappers/openai.js", "require": "./wrappers/openai.cjs" }, - "./traceable/context": { + "./singletons/traceable": { "types": { - "import": "./traceable/context.d.ts", - "require": "./traceable/context.d.cts", - "default": "./traceable/context.d.ts" + "import": "./singletons/traceable.d.ts", + "require": "./singletons/traceable.d.cts", + "default": "./singletons/traceable.d.ts" }, - "import": "./traceable/context.js", - "require": "./traceable/context.cjs" + "import": "./singletons/traceable.js", + "require": "./singletons/traceable.cjs" }, "./package.json": "./package.json" } diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index 0374336da..60fc42692 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -15,7 +15,7 @@ const entrypoints = { langchain: "langchain", wrappers: "wrappers/index", "wrappers/openai": "wrappers/openai", - "traceable/context": "traceable/context", + "singletons/traceable": "singletons/traceable", }; const updateJsonFile = (relativePath, updateFunction) => { const contents = fs.readFileSync(relativePath).toString(); diff --git a/js/src/traceable/context.ts b/js/src/singletons/traceable.ts similarity index 100% rename from js/src/traceable/context.ts rename to js/src/singletons/traceable.ts diff --git a/js/src/traceable.ts b/js/src/traceable.ts index c06e3ced5..cbdd8c9aa 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -9,7 +9,7 @@ import { } from "./run_trees.js"; import { InvocationParamsSchema, KVMap } from "./schemas.js"; import { isTracingEnabled } from "./env.js"; -import { TraceableLocalStorageContext } from "./traceable/context.js"; +import { TraceableLocalStorageContext } from "./singletons/traceable.js"; function isPromiseMethod( x: string | symbol @@ -680,7 +680,7 @@ export function traceable any>( return traceableFunc as TraceableFunction; } -export { getCurrentRunTree } from "./traceable/context.js"; +export { getCurrentRunTree } from "./singletons/traceable.js"; export function isTraceableFunction( x: unknown diff --git a/js/tsconfig.json b/js/tsconfig.json index 973744e15..2a7d03325 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -40,7 +40,7 @@ "src/langchain.ts", "src/wrappers/index.ts", "src/wrappers/openai.ts", - "src/traceable/context.ts" + "src/singletons/traceable.ts" ] } } From dcf569e6a0ae8d108b54be6fa595fd8cddfb5123 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 03:11:44 +0200 Subject: [PATCH 065/373] Move main logic to singletons --- js/src/singletons/traceable.ts | 99 +++++++++++++++++++++++++++++++- js/src/traceable.ts | 100 +++++---------------------------- 2 files changed, 111 insertions(+), 88 deletions(-) diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts index f622da37e..af8f6ca42 100644 --- a/js/src/singletons/traceable.ts +++ b/js/src/singletons/traceable.ts @@ -1,4 +1,4 @@ -import { RunTree } from "../run_trees.js"; +import { RunTree, RunnableConfigLike } from "../run_trees.js"; interface AsyncStorageLike { getStore: () => RunTree | undefined; @@ -44,3 +44,100 @@ export const getCurrentRunTree = () => { return runTree; }; + +export const ROOT = Symbol("langsmith:traceable:root"); + +type SmartPromise = T extends AsyncGenerator + ? T + : T extends Promise + ? T + : Promise; + +type WrapArgReturnPair = Pair extends [ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + infer Args extends any[], + infer Return +] + ? Args extends [RunTree, ...infer RestArgs] + ? { + ( + runTree: RunTree | typeof ROOT, + ...args: RestArgs + ): SmartPromise; + (config: RunnableConfigLike, ...args: RestArgs): SmartPromise; + } + : { + (...args: Args): SmartPromise; + (runTree: RunTree, ...rest: Args): SmartPromise; + (config: RunnableConfigLike, ...args: Args): SmartPromise; + } + : never; + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type UnionToIntersection = (U extends any ? (x: U) => void : never) extends ( + x: infer I +) => void + ? I + : never; + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export type TraceableFunction any> = + // function overloads are represented as intersections rather than unions + // matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448 + Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + (...args: infer A4): infer R4; + (...args: infer A5): infer R5; + } + ? UnionToIntersection< + WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4] | [A5, R5]> + > + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + (...args: infer A4): infer R4; + } + ? UnionToIntersection< + WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4]> + > + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + } + ? UnionToIntersection> + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + } + ? UnionToIntersection> + : Func extends { + (...args: infer A1): infer R1; + } + ? UnionToIntersection> + : never; + +export function isTraceableFunction( + x: unknown + // eslint-disable-next-line @typescript-eslint/no-explicit-any +): x is TraceableFunction { + return typeof x === "function" && "langsmith:traceable" in x; +} + +function isKVMap(x: unknown): x is Record { + if (typeof x !== "object" || x == null) { + return false; + } + + const prototype = Object.getPrototypeOf(x); + return ( + (prototype === null || + prototype === Object.prototype || + Object.getPrototypeOf(prototype) === null) && + !(Symbol.toStringTag in x) && + !(Symbol.iterator in x) + ); +} diff --git a/js/src/traceable.ts b/js/src/traceable.ts index cbdd8c9aa..ece357efe 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -9,7 +9,11 @@ import { } from "./run_trees.js"; import { InvocationParamsSchema, KVMap } from "./schemas.js"; import { isTracingEnabled } from "./env.js"; -import { TraceableLocalStorageContext } from "./singletons/traceable.js"; +import { + ROOT, + TraceableFunction, + TraceableLocalStorageContext, +} from "./singletons/traceable.js"; function isPromiseMethod( x: string | symbol @@ -24,83 +28,6 @@ const asyncLocalStorage = TraceableLocalStorageContext.register( new AsyncLocalStorage() ); -export const ROOT = Symbol("langsmith:traceable:root"); - -export type RunTreeLike = RunTree; - -type SmartPromise = T extends AsyncGenerator - ? T - : T extends Promise - ? T - : Promise; - -type WrapArgReturnPair = Pair extends [ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - infer Args extends any[], - infer Return -] - ? Args extends [RunTreeLike, ...infer RestArgs] - ? { - ( - runTree: RunTreeLike | typeof ROOT, - ...args: RestArgs - ): SmartPromise; - (config: RunnableConfigLike, ...args: RestArgs): SmartPromise; - } - : { - (...args: Args): SmartPromise; - (runTree: RunTreeLike, ...rest: Args): SmartPromise; - (config: RunnableConfigLike, ...args: Args): SmartPromise; - } - : never; - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -type UnionToIntersection = (U extends any ? (x: U) => void : never) extends ( - x: infer I -) => void - ? I - : never; - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -export type TraceableFunction any> = - // function overloads are represented as intersections rather than unions - // matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448 - Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - (...args: infer A4): infer R4; - (...args: infer A5): infer R5; - } - ? UnionToIntersection< - WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4] | [A5, R5]> - > - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - (...args: infer A4): infer R4; - } - ? UnionToIntersection< - WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4]> - > - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - } - ? UnionToIntersection> - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - } - ? UnionToIntersection> - : Func extends { - (...args: infer A1): infer R1; - } - ? UnionToIntersection> - : never; - const isAsyncIterable = (x: unknown): x is AsyncIterable => x != null && typeof x === "object" && @@ -399,7 +326,7 @@ export function traceable any>( const { aggregator, argsConfigPath, ...runTreeConfig } = config ?? {}; const traceableFunc = ( - ...args: Inputs | [RunTreeLike, ...Inputs] | [RunnableConfigLike, ...Inputs] + ...args: Inputs | [RunTree, ...Inputs] | [RunnableConfigLike, ...Inputs] ) => { let ensuredConfig: RunTreeConfig; try { @@ -680,14 +607,13 @@ export function traceable any>( return traceableFunc as TraceableFunction; } -export { getCurrentRunTree } from "./singletons/traceable.js"; - -export function isTraceableFunction( - x: unknown - // eslint-disable-next-line @typescript-eslint/no-explicit-any -): x is TraceableFunction { - return typeof x === "function" && "langsmith:traceable" in x; -} +export { + type TraceableFunction, + getCurrentRunTree, + isTraceableFunction, + ROOT, +} from "./singletons/traceable.js"; +export type RunTreeLike = RunTree; function isKVMap(x: unknown): x is Record { if (typeof x !== "object" || x == null) { From b74cd9856361ac2793a1399f3636fd722de7acd8 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 16:56:29 +0200 Subject: [PATCH 066/373] Use Symbol.for instead --- js/src/singletons/traceable.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts index af8f6ca42..a8645179d 100644 --- a/js/src/singletons/traceable.ts +++ b/js/src/singletons/traceable.ts @@ -45,7 +45,7 @@ export const getCurrentRunTree = () => { return runTree; }; -export const ROOT = Symbol("langsmith:traceable:root"); +export const ROOT = Symbol.for("langsmith:traceable:root"); type SmartPromise = T extends AsyncGenerator ? T From 4f8bebeeff5770543ce8d7bcc5a1a0e1543f7d5e Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 16:58:09 +0200 Subject: [PATCH 067/373] Split langchain tests out --- js/src/tests/traceable.test.ts | 188 ---------------------- js/src/tests/traceable_langchain.test.ts | 196 +++++++++++++++++++++++ 2 files changed, 196 insertions(+), 188 deletions(-) create mode 100644 js/src/tests/traceable_langchain.test.ts diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index d9912756a..38b05e4f7 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -652,194 +652,6 @@ describe("deferred input", () => { }); }); -describe("langchain", () => { - test("explicit traceable to langchain", async () => { - const { client, callSpy } = mockClient(); - - const llm = new FakeChatModel({}); - const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ - ["human", "{text}"], - ]); - const parser = new StringOutputParser(); - const chain = prompt.pipe(llm).pipe(parser); - - const main = traceable( - async (input: { text: string }) => { - return chain.invoke(input, { - callbacks: await getLangchainCallbacks(), - }); - }, - { - name: "main", - client, - tracingEnabled: true, - tags: ["welcome"], - metadata: { hello: "world" }, - } - ); - - const result = await main({ text: "Hello world" }); - expect(result).toEqual("Hello world"); - - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "main:0", - "RunnableSequence:1", - "ChatPromptTemplate:2", - "FakeChatModel:3", - "StrOutputParser:4", - ], - edges: [ - ["main:0", "RunnableSequence:1"], - ["RunnableSequence:1", "ChatPromptTemplate:2"], - ["RunnableSequence:1", "FakeChatModel:3"], - ["RunnableSequence:1", "StrOutputParser:4"], - ], - data: { - "main:0": { - inputs: { text: "Hello world" }, - outputs: { outputs: "Hello world" }, - tags: ["welcome"], - extra: { metadata: { hello: "world" } }, - }, - }, - }); - }); - - test("explicit langchain to traceable", async () => { - const { client, callSpy } = mockClient(); - - const llm = new FakeChatModel({}); - const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ - ["human", "{text}"], - ]); - const parser = new StringOutputParser(); - - const addValueTraceable = traceable( - (msg: BaseMessage) => - new HumanMessage({ content: msg.content + " world" }), - { name: "add_negligible_value" } - ); - - const chain = prompt - .pipe(llm) - .pipe(RunnableTraceable.from(addValueTraceable)) - .pipe(parser); - - // eslint-disable-next-line @typescript-eslint/ban-ts-comment - // @ts-ignore client might be of different type - const tracer = new LangChainTracer({ client }); - const response = await chain.invoke( - { text: "Hello" }, - { callbacks: [tracer] } - ); - - // callbacks are backgrounded by default - await awaitAllCallbacks(); - - expect(response).toEqual("Hello world"); - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "RunnableSequence:0", - "ChatPromptTemplate:1", - "FakeChatModel:2", - "add_negligible_value:3", - "StrOutputParser:4", - ], - edges: [ - ["RunnableSequence:0", "ChatPromptTemplate:1"], - ["RunnableSequence:0", "FakeChatModel:2"], - ["RunnableSequence:0", "add_negligible_value:3"], - ["RunnableSequence:0", "StrOutputParser:4"], - ], - }); - }); - - test("explicit nested", async () => { - const { client, callSpy } = mockClient(); - - const llm = new FakeChatModel({}); - const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ - ["human", "{text}"], - ]); - const parser = new StringOutputParser(); - const chain = prompt - .pipe(llm) - .pipe(parser) - .withConfig({ runName: "chain" }); - - const wrappedModel = new RunnableTraceable({ - func: traceable( - async (value: { input: string }) => { - const callbacks = await getLangchainCallbacks(); - - return chain.invoke( - { text: `Wrapped input: ${value.input}` }, - { callbacks } - ); - }, - { name: "wrappedModel" } - ), - }); - - const main = traceable( - async () => { - return { - response: [ - await wrappedModel.invoke( - { input: "Are you ready?" }, - { callbacks: await getLangchainCallbacks() } - ), - await wrappedModel.invoke( - { input: "I said, Are. You. Ready?" }, - { callbacks: await getLangchainCallbacks() } - ), - ], - }; - }, - { name: "main", client, tracingEnabled: true } - ); - - const result = await main(); - await awaitAllCallbacks(); - - expect(result).toEqual({ - response: [ - "Wrapped input: Are you ready?", - "Wrapped input: I said, Are. You. Ready?", - ], - }); - - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "main:0", - "wrappedModel:1", - "chain:2", - "ChatPromptTemplate:3", - "FakeChatModel:4", - "StrOutputParser:5", - "wrappedModel:6", - "chain:7", - "ChatPromptTemplate:8", - "FakeChatModel:9", - "StrOutputParser:10", - ], - edges: [ - ["main:0", "wrappedModel:1"], - ["wrappedModel:1", "chain:2"], - ["chain:2", "ChatPromptTemplate:3"], - ["chain:2", "FakeChatModel:4"], - ["chain:2", "StrOutputParser:5"], - ["main:0", "wrappedModel:6"], - ["wrappedModel:6", "chain:7"], - ["chain:7", "ChatPromptTemplate:8"], - ["chain:7", "FakeChatModel:9"], - ["chain:7", "StrOutputParser:10"], - ], - }); - }); -}); - describe("generator", () => { function gatherAll(iterator: Iterator) { const chunks: unknown[] = []; diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts new file mode 100644 index 000000000..7b49b0707 --- /dev/null +++ b/js/src/tests/traceable_langchain.test.ts @@ -0,0 +1,196 @@ +import { traceable } from "../traceable.js"; +import { getAssumedTreeFromCalls } from "./utils/tree.js"; +import { mockClient } from "./utils/mock_client.js"; +import { FakeChatModel } from "@langchain/core/utils/testing"; +import { ChatPromptTemplate } from "@langchain/core/prompts"; +import { StringOutputParser } from "@langchain/core/output_parsers"; +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; +import { BaseMessage, HumanMessage } from "@langchain/core/messages"; +import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; +import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; + +test("explicit traceable to langchain", async () => { + const { client, callSpy } = mockClient(); + + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + const chain = prompt.pipe(llm).pipe(parser); + + const main = traceable( + async (input: { text: string }) => { + return chain.invoke(input, { + callbacks: await getLangchainCallbacks(), + }); + }, + { + name: "main", + client, + tracingEnabled: true, + tags: ["welcome"], + metadata: { hello: "world" }, + } + ); + + const result = await main({ text: "Hello world" }); + expect(result).toEqual("Hello world"); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "RunnableSequence:1", + "ChatPromptTemplate:2", + "FakeChatModel:3", + "StrOutputParser:4", + ], + edges: [ + ["main:0", "RunnableSequence:1"], + ["RunnableSequence:1", "ChatPromptTemplate:2"], + ["RunnableSequence:1", "FakeChatModel:3"], + ["RunnableSequence:1", "StrOutputParser:4"], + ], + data: { + "main:0": { + inputs: { text: "Hello world" }, + outputs: { outputs: "Hello world" }, + tags: ["welcome"], + extra: { metadata: { hello: "world" } }, + }, + }, + }); + + test("explicit langchain to traceable", async () => { + const { client, callSpy } = mockClient(); + + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + + const addValueTraceable = traceable( + (msg: BaseMessage) => + new HumanMessage({ content: msg.content + " world" }), + { name: "add_negligible_value" } + ); + + const chain = prompt + .pipe(llm) + .pipe(RunnableTraceable.from(addValueTraceable)) + .pipe(parser); + + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + const tracer = new LangChainTracer({ client }); + const response = await chain.invoke( + { text: "Hello" }, + { callbacks: [tracer] } + ); + + // callbacks are backgrounded by default + await awaitAllCallbacks(); + + expect(response).toEqual("Hello world"); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "RunnableSequence:0", + "ChatPromptTemplate:1", + "FakeChatModel:2", + "add_negligible_value:3", + "StrOutputParser:4", + ], + edges: [ + ["RunnableSequence:0", "ChatPromptTemplate:1"], + ["RunnableSequence:0", "FakeChatModel:2"], + ["RunnableSequence:0", "add_negligible_value:3"], + ["RunnableSequence:0", "StrOutputParser:4"], + ], + }); + }); + + test("explicit nested", async () => { + const { client, callSpy } = mockClient(); + + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + const chain = prompt + .pipe(llm) + .pipe(parser) + .withConfig({ runName: "chain" }); + + const wrappedModel = new RunnableTraceable({ + func: traceable( + async (value: { input: string }) => { + const callbacks = await getLangchainCallbacks(); + + return chain.invoke( + { text: `Wrapped input: ${value.input}` }, + { callbacks } + ); + }, + { name: "wrappedModel" } + ), + }); + + const main = traceable( + async () => { + return { + response: [ + await wrappedModel.invoke( + { input: "Are you ready?" }, + { callbacks: await getLangchainCallbacks() } + ), + await wrappedModel.invoke( + { input: "I said, Are. You. Ready?" }, + { callbacks: await getLangchainCallbacks() } + ), + ], + }; + }, + { name: "main", client, tracingEnabled: true } + ); + + const result = await main(); + await awaitAllCallbacks(); + + expect(result).toEqual({ + response: [ + "Wrapped input: Are you ready?", + "Wrapped input: I said, Are. You. Ready?", + ], + }); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "wrappedModel:1", + "chain:2", + "ChatPromptTemplate:3", + "FakeChatModel:4", + "StrOutputParser:5", + "wrappedModel:6", + "chain:7", + "ChatPromptTemplate:8", + "FakeChatModel:9", + "StrOutputParser:10", + ], + edges: [ + ["main:0", "wrappedModel:1"], + ["wrappedModel:1", "chain:2"], + ["chain:2", "ChatPromptTemplate:3"], + ["chain:2", "FakeChatModel:4"], + ["chain:2", "StrOutputParser:5"], + ["main:0", "wrappedModel:6"], + ["wrappedModel:6", "chain:7"], + ["chain:7", "ChatPromptTemplate:8"], + ["chain:7", "FakeChatModel:9"], + ["chain:7", "StrOutputParser:10"], + ], + }); + }); +}); From 157685f4f63cb543f81ca71ed1f8ce428df04ce8 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 17:09:54 +0200 Subject: [PATCH 068/373] Add tests verifying stream and batch --- js/src/tests/traceable_langchain.test.ts | 311 ++++++++++++++--------- 1 file changed, 195 insertions(+), 116 deletions(-) diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index 7b49b0707..b3056ac53 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -9,9 +9,7 @@ import { BaseMessage, HumanMessage } from "@langchain/core/messages"; import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; -test("explicit traceable to langchain", async () => { - const { client, callSpy } = mockClient(); - +describe("to langchain", () => { const llm = new FakeChatModel({}); const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ ["human", "{text}"], @@ -19,49 +17,133 @@ test("explicit traceable to langchain", async () => { const parser = new StringOutputParser(); const chain = prompt.pipe(llm).pipe(parser); - const main = traceable( - async (input: { text: string }) => { - return chain.invoke(input, { - callbacks: await getLangchainCallbacks(), - }); - }, - { - name: "main", - client, - tracingEnabled: true, - tags: ["welcome"], - metadata: { hello: "world" }, + test("invoke", async () => { + const { client, callSpy } = mockClient(); + + const main = traceable( + async (input: { text: string }) => { + return chain.invoke(input, { + callbacks: await getLangchainCallbacks(), + }); + }, + { + name: "main", + client, + tracingEnabled: true, + tags: ["welcome"], + metadata: { hello: "world" }, + } + ); + + const result = await main({ text: "Hello world" }); + expect(result).toEqual("Hello world"); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "RunnableSequence:1", + "ChatPromptTemplate:2", + "FakeChatModel:3", + "StrOutputParser:4", + ], + edges: [ + ["main:0", "RunnableSequence:1"], + ["RunnableSequence:1", "ChatPromptTemplate:2"], + ["RunnableSequence:1", "FakeChatModel:3"], + ["RunnableSequence:1", "StrOutputParser:4"], + ], + data: { + "main:0": { + inputs: { text: "Hello world" }, + outputs: { outputs: "Hello world" }, + tags: ["welcome"], + extra: { metadata: { hello: "world" } }, + }, + }, + }); + }); + + test("to langchain stream", async () => { + const { client, callSpy } = mockClient(); + + const main = traceable( + async function* main(input: { text: string }) { + for await (const token of await chain.stream(input, { + callbacks: await getLangchainCallbacks(), + })) { + yield token; + } + }, + { client, tracingEnabled: true } + ); + + const result = []; + for await (const token of main({ text: "Hello world" })) { + result.push(token); } - ); - const result = await main({ text: "Hello world" }); - expect(result).toEqual("Hello world"); + expect(result).toEqual(["Hello world"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "RunnableSequence:1", + "ChatPromptTemplate:2", + "FakeChatModel:3", + "StrOutputParser:4", + ], + edges: [ + ["main:0", "RunnableSequence:1"], + ["RunnableSequence:1", "ChatPromptTemplate:2"], + ["RunnableSequence:1", "FakeChatModel:3"], + ["RunnableSequence:1", "StrOutputParser:4"], + ], + }); + }); - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "main:0", - "RunnableSequence:1", - "ChatPromptTemplate:2", - "FakeChatModel:3", - "StrOutputParser:4", - ], - edges: [ - ["main:0", "RunnableSequence:1"], - ["RunnableSequence:1", "ChatPromptTemplate:2"], - ["RunnableSequence:1", "FakeChatModel:3"], - ["RunnableSequence:1", "StrOutputParser:4"], - ], - data: { - "main:0": { - inputs: { text: "Hello world" }, - outputs: { outputs: "Hello world" }, - tags: ["welcome"], - extra: { metadata: { hello: "world" } }, + test("to langchain batch", async () => { + const { client, callSpy } = mockClient(); + + const main = traceable( + async (input: { texts: string[] }) => { + return chain.batch( + input.texts.map((text) => ({ text })), + { callbacks: await getLangchainCallbacks() } + ); }, - }, + { name: "main", client, tracingEnabled: true } + ); + + const result = await main({ texts: ["Hello world", "Who are you?"] }); + + expect(result).toEqual(["Hello world", "Who are you?"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "RunnableSequence:1", + "RunnableSequence:2", + "ChatPromptTemplate:3", + "ChatPromptTemplate:4", + "FakeChatModel:5", + "FakeChatModel:6", + "StrOutputParser:7", + "StrOutputParser:8", + ], + edges: [ + ["main:0", "RunnableSequence:1"], + ["main:0", "RunnableSequence:2"], + ["RunnableSequence:1", "ChatPromptTemplate:3"], + ["RunnableSequence:2", "ChatPromptTemplate:4"], + ["RunnableSequence:1", "FakeChatModel:5"], + ["RunnableSequence:2", "FakeChatModel:6"], + ["RunnableSequence:1", "StrOutputParser:7"], + ["RunnableSequence:2", "StrOutputParser:8"], + ], + }); }); +}); - test("explicit langchain to traceable", async () => { +describe("to traceable", () => { + test("invoke", async () => { const { client, callSpy } = mockClient(); const llm = new FakeChatModel({}); @@ -109,88 +191,85 @@ test("explicit traceable to langchain", async () => { ], }); }); +}); - test("explicit nested", async () => { - const { client, callSpy } = mockClient(); +test("explicit nested", async () => { + const { client, callSpy } = mockClient(); - const llm = new FakeChatModel({}); - const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ - ["human", "{text}"], - ]); - const parser = new StringOutputParser(); - const chain = prompt - .pipe(llm) - .pipe(parser) - .withConfig({ runName: "chain" }); - - const wrappedModel = new RunnableTraceable({ - func: traceable( - async (value: { input: string }) => { - const callbacks = await getLangchainCallbacks(); - - return chain.invoke( - { text: `Wrapped input: ${value.input}` }, - { callbacks } - ); - }, - { name: "wrappedModel" } - ), - }); + const llm = new FakeChatModel({}); + const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([ + ["human", "{text}"], + ]); + const parser = new StringOutputParser(); + const chain = prompt.pipe(llm).pipe(parser).withConfig({ runName: "chain" }); - const main = traceable( - async () => { - return { - response: [ - await wrappedModel.invoke( - { input: "Are you ready?" }, - { callbacks: await getLangchainCallbacks() } - ), - await wrappedModel.invoke( - { input: "I said, Are. You. Ready?" }, - { callbacks: await getLangchainCallbacks() } - ), - ], - }; + const wrappedModel = new RunnableTraceable({ + func: traceable( + async (value: { input: string }) => { + const callbacks = await getLangchainCallbacks(); + + return chain.invoke( + { text: `Wrapped input: ${value.input}` }, + { callbacks } + ); }, - { name: "main", client, tracingEnabled: true } - ); + { name: "wrappedModel" } + ), + }); - const result = await main(); - await awaitAllCallbacks(); + const main = traceable( + async () => { + return { + response: [ + await wrappedModel.invoke( + { input: "Are you ready?" }, + { callbacks: await getLangchainCallbacks() } + ), + await wrappedModel.invoke( + { input: "I said, Are. You. Ready?" }, + { callbacks: await getLangchainCallbacks() } + ), + ], + }; + }, + { name: "main", client, tracingEnabled: true } + ); - expect(result).toEqual({ - response: [ - "Wrapped input: Are you ready?", - "Wrapped input: I said, Are. You. Ready?", - ], - }); + const result = await main(); + await awaitAllCallbacks(); - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "main:0", - "wrappedModel:1", - "chain:2", - "ChatPromptTemplate:3", - "FakeChatModel:4", - "StrOutputParser:5", - "wrappedModel:6", - "chain:7", - "ChatPromptTemplate:8", - "FakeChatModel:9", - "StrOutputParser:10", - ], - edges: [ - ["main:0", "wrappedModel:1"], - ["wrappedModel:1", "chain:2"], - ["chain:2", "ChatPromptTemplate:3"], - ["chain:2", "FakeChatModel:4"], - ["chain:2", "StrOutputParser:5"], - ["main:0", "wrappedModel:6"], - ["wrappedModel:6", "chain:7"], - ["chain:7", "ChatPromptTemplate:8"], - ["chain:7", "FakeChatModel:9"], - ["chain:7", "StrOutputParser:10"], - ], - }); + expect(result).toEqual({ + response: [ + "Wrapped input: Are you ready?", + "Wrapped input: I said, Are. You. Ready?", + ], + }); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "main:0", + "wrappedModel:1", + "chain:2", + "ChatPromptTemplate:3", + "FakeChatModel:4", + "StrOutputParser:5", + "wrappedModel:6", + "chain:7", + "ChatPromptTemplate:8", + "FakeChatModel:9", + "StrOutputParser:10", + ], + edges: [ + ["main:0", "wrappedModel:1"], + ["wrappedModel:1", "chain:2"], + ["chain:2", "ChatPromptTemplate:3"], + ["chain:2", "FakeChatModel:4"], + ["chain:2", "StrOutputParser:5"], + ["main:0", "wrappedModel:6"], + ["wrappedModel:6", "chain:7"], + ["chain:7", "ChatPromptTemplate:8"], + ["chain:7", "FakeChatModel:9"], + ["chain:7", "StrOutputParser:10"], + ], }); }); From a8e6e3bbdf1d2e94888ae46fd520070d65d6a255 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 17:11:31 +0200 Subject: [PATCH 069/373] Fix build --- js/src/package.json | 1 + js/src/singletons/traceable.ts | 15 --------------- js/src/tests/traceable.test.ts | 7 ------- 3 files changed, 1 insertion(+), 22 deletions(-) create mode 100644 js/src/package.json diff --git a/js/src/package.json b/js/src/package.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/js/src/package.json @@ -0,0 +1 @@ +{} diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts index a8645179d..4d0744f2b 100644 --- a/js/src/singletons/traceable.ts +++ b/js/src/singletons/traceable.ts @@ -126,18 +126,3 @@ export function isTraceableFunction( ): x is TraceableFunction { return typeof x === "function" && "langsmith:traceable" in x; } - -function isKVMap(x: unknown): x is Record { - if (typeof x !== "object" || x == null) { - return false; - } - - const prototype = Object.getPrototypeOf(x); - return ( - (prototype === null || - prototype === Object.prototype || - Object.getPrototypeOf(prototype) === null) && - !(Symbol.toStringTag in x) && - !(Symbol.iterator in x) - ); -} diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 38b05e4f7..a580ba414 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -2,13 +2,6 @@ import type { RunTree, RunTreeConfig } from "../run_trees.js"; import { ROOT, traceable } from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; -import { FakeChatModel } from "@langchain/core/utils/testing"; -import { ChatPromptTemplate } from "@langchain/core/prompts"; -import { StringOutputParser } from "@langchain/core/output_parsers"; -import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; -import { BaseMessage, HumanMessage } from "@langchain/core/messages"; -import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; -import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; test("basic traceable implementation", async () => { const { client, callSpy } = mockClient(); From 8c1f1ba3bef6d2b96d1348ea26fbc114e27c6031 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 17:12:44 +0200 Subject: [PATCH 070/373] Remove hard resolutions --- js/package.json | 3 --- 1 file changed, 3 deletions(-) diff --git a/js/package.json b/js/package.json index 606a1ca9c..ce050f5c5 100644 --- a/js/package.json +++ b/js/package.json @@ -128,9 +128,6 @@ "optional": true } }, - "resolutions": { - "langsmith": "link:." - }, "lint-staged": { "**/*.{ts,tsx}": [ "prettier --write --ignore-unknown", From ed8c5586c98992bc0c2bc6bb906ae1d33e689061 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 15 May 2024 17:30:17 +0200 Subject: [PATCH 071/373] Remove package.json --- js/src/package.json | 1 - 1 file changed, 1 deletion(-) delete mode 100644 js/src/package.json diff --git a/js/src/package.json b/js/src/package.json deleted file mode 100644 index 0967ef424..000000000 --- a/js/src/package.json +++ /dev/null @@ -1 +0,0 @@ -{} From 24a99c852d14a83b9ec4aa8e60d7be0892b3951b Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 16 May 2024 23:17:51 +0200 Subject: [PATCH 072/373] Use env vars in run trees --- js/src/run_trees.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index eb07870ce..d78bb9c8f 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -6,6 +6,7 @@ import { getRuntimeEnvironment, } from "./utils/env.js"; import { Client } from "./client.js"; +import { isTracingEnabled } from "./env.js"; const warnedMessages: Record = {}; @@ -188,8 +189,7 @@ export class RunTree implements BaseRun { name: parentRun?.name ?? "", id: parentRun?.id, client, - // TODO: handle tracing enabled - tracingEnabled: true, + tracingEnabled: isTracingEnabled(), project_name: projectName, tags: [ ...new Set((parentRun?.tags ?? []).concat(parentConfig?.tags ?? [])), From 3886649e5aafaca46fb099bcd5e3f5a43f514c2e Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 16 May 2024 23:26:40 +0200 Subject: [PATCH 073/373] Cleanup --- js/src/singletons/traceable.ts | 125 +++++++++------------------------ js/src/singletons/types.ts | 75 ++++++++++++++++++++ js/src/traceable.ts | 60 +++++++--------- 3 files changed, 133 insertions(+), 127 deletions(-) create mode 100644 js/src/singletons/types.ts diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts index 4d0744f2b..6319b99df 100644 --- a/js/src/singletons/traceable.ts +++ b/js/src/singletons/traceable.ts @@ -1,24 +1,42 @@ -import { RunTree, RunnableConfigLike } from "../run_trees.js"; +import { RunTree } from "../run_trees.js"; +import { TraceableFunction } from "./types.js"; -interface AsyncStorageLike { +interface AsyncLocalStorageInterface { getStore: () => RunTree | undefined; run: (context: RunTree | undefined, fn: () => void) => void; } -export const TraceableLocalStorageContext = (() => { - let storage: AsyncStorageLike; +class MockAsyncLocalStorage implements AsyncLocalStorageInterface { + getStore() { + return undefined; + } + + run(_: RunTree | undefined, callback: () => void): void { + return callback(); + } +} + +class AsyncLocalStorageProvider { + private asyncLocalStorage: AsyncLocalStorageInterface = + new MockAsyncLocalStorage(); - return { - register: (value: AsyncStorageLike) => { - storage ??= value; - return storage; - }, - get storage() { - return storage; - }, - }; -})(); + private hasBeenInitialized = false; + + getInstance(): AsyncLocalStorageInterface { + return this.asyncLocalStorage; + } + + initializeGlobalInstance(instance: AsyncLocalStorageInterface) { + if (!this.hasBeenInitialized) { + this.hasBeenInitialized = true; + this.asyncLocalStorage = instance; + } + } +} + +export const AsyncLocalStorageProviderSingleton = + new AsyncLocalStorageProvider(); /** * Return the current run tree from within a traceable-wrapped function. @@ -27,11 +45,7 @@ export const TraceableLocalStorageContext = (() => { * @returns The run tree for the given context. */ export const getCurrentRunTree = () => { - if (!TraceableLocalStorageContext.storage) { - throw new Error("Could not find the traceable storage context"); - } - - const runTree = TraceableLocalStorageContext.storage.getStore(); + const runTree = AsyncLocalStorageProviderSingleton.getInstance().getStore(); if (runTree === undefined) { throw new Error( [ @@ -47,79 +61,6 @@ export const getCurrentRunTree = () => { export const ROOT = Symbol.for("langsmith:traceable:root"); -type SmartPromise = T extends AsyncGenerator - ? T - : T extends Promise - ? T - : Promise; - -type WrapArgReturnPair = Pair extends [ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - infer Args extends any[], - infer Return -] - ? Args extends [RunTree, ...infer RestArgs] - ? { - ( - runTree: RunTree | typeof ROOT, - ...args: RestArgs - ): SmartPromise; - (config: RunnableConfigLike, ...args: RestArgs): SmartPromise; - } - : { - (...args: Args): SmartPromise; - (runTree: RunTree, ...rest: Args): SmartPromise; - (config: RunnableConfigLike, ...args: Args): SmartPromise; - } - : never; - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -type UnionToIntersection = (U extends any ? (x: U) => void : never) extends ( - x: infer I -) => void - ? I - : never; - -// eslint-disable-next-line @typescript-eslint/no-explicit-any -export type TraceableFunction any> = - // function overloads are represented as intersections rather than unions - // matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448 - Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - (...args: infer A4): infer R4; - (...args: infer A5): infer R5; - } - ? UnionToIntersection< - WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4] | [A5, R5]> - > - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - (...args: infer A4): infer R4; - } - ? UnionToIntersection< - WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4]> - > - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - (...args: infer A3): infer R3; - } - ? UnionToIntersection> - : Func extends { - (...args: infer A1): infer R1; - (...args: infer A2): infer R2; - } - ? UnionToIntersection> - : Func extends { - (...args: infer A1): infer R1; - } - ? UnionToIntersection> - : never; - export function isTraceableFunction( x: unknown // eslint-disable-next-line @typescript-eslint/no-explicit-any diff --git a/js/src/singletons/types.ts b/js/src/singletons/types.ts new file mode 100644 index 000000000..1ebe0eb19 --- /dev/null +++ b/js/src/singletons/types.ts @@ -0,0 +1,75 @@ +import { RunTree, RunnableConfigLike } from "../run_trees.js"; +import { ROOT } from "./traceable.js"; + +type SmartPromise = T extends AsyncGenerator + ? T + : T extends Promise + ? T + : Promise; +type WrapArgReturnPair = Pair extends [ + // eslint-disable-next-line @typescript-eslint/no-explicit-any + infer Args extends any[], + infer Return +] + ? Args extends [RunTree, ...infer RestArgs] + ? { + ( + runTree: RunTree | typeof ROOT, + ...args: RestArgs + ): SmartPromise; + (config: RunnableConfigLike, ...args: RestArgs): SmartPromise; + } + : { + (...args: Args): SmartPromise; + (runTree: RunTree, ...rest: Args): SmartPromise; + (config: RunnableConfigLike, ...args: Args): SmartPromise; + } + : never; +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type UnionToIntersection = (U extends any ? (x: U) => void : never) extends ( + x: infer I +) => void + ? I + : never; +// eslint-disable-next-line @typescript-eslint/no-explicit-any + +export type TraceableFunction any> = + // function overloads are represented as intersections rather than unions + // matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448 + Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + (...args: infer A4): infer R4; + (...args: infer A5): infer R5; + } + ? UnionToIntersection< + WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4] | [A5, R5]> + > + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + (...args: infer A4): infer R4; + } + ? UnionToIntersection< + WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4]> + > + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + (...args: infer A3): infer R3; + } + ? UnionToIntersection> + : Func extends { + (...args: infer A1): infer R1; + (...args: infer A2): infer R2; + } + ? UnionToIntersection> + : Func extends { + (...args: infer A1): infer R1; + } + ? UnionToIntersection> + : never; + +export type RunTreeLike = RunTree; diff --git a/js/src/traceable.ts b/js/src/traceable.ts index ece357efe..82f86b616 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -11,9 +11,13 @@ import { InvocationParamsSchema, KVMap } from "./schemas.js"; import { isTracingEnabled } from "./env.js"; import { ROOT, - TraceableFunction, - TraceableLocalStorageContext, + AsyncLocalStorageProviderSingleton, } from "./singletons/traceable.js"; +import { TraceableFunction } from "./singletons/types.js"; + +AsyncLocalStorageProviderSingleton.initializeGlobalInstance( + new AsyncLocalStorage() +); function isPromiseMethod( x: string | symbol @@ -24,9 +28,20 @@ function isPromiseMethod( return false; } -const asyncLocalStorage = TraceableLocalStorageContext.register( - new AsyncLocalStorage() -); +function isKVMap(x: unknown): x is Record { + if (typeof x !== "object" || x == null) { + return false; + } + + const prototype = Object.getPrototypeOf(x); + return ( + (prototype === null || + prototype === Object.prototype || + Object.getPrototypeOf(prototype) === null) && + !(Symbol.toStringTag in x) && + !(Symbol.iterator in x) + ); +} const isAsyncIterable = (x: unknown): x is AsyncIterable => x != null && @@ -34,14 +49,14 @@ const isAsyncIterable = (x: unknown): x is AsyncIterable => // eslint-disable-next-line @typescript-eslint/no-explicit-any typeof (x as any)[Symbol.asyncIterator] === "function"; -const GeneratorFunction = function* () {}.constructor; - const isIteratorLike = (x: unknown): x is Iterator => x != null && typeof x === "object" && "next" in x && typeof x.next === "function"; +const GeneratorFunction = function* () {}.constructor; + const isGenerator = (x: unknown): x is Generator => // eslint-disable-next-line no-instanceof/no-instanceof x != null && typeof x === "function" && x instanceof GeneratorFunction; @@ -379,6 +394,8 @@ export function traceable any>( }; } + const asyncLocalStorage = AsyncLocalStorageProviderSingleton.getInstance(); + // TODO: deal with possible nested promises and async iterables const processedArgs = args as unknown as Inputs; for (let i = 0; i < processedArgs.length; i++) { @@ -608,36 +625,9 @@ export function traceable any>( } export { - type TraceableFunction, getCurrentRunTree, isTraceableFunction, ROOT, } from "./singletons/traceable.js"; -export type RunTreeLike = RunTree; -function isKVMap(x: unknown): x is Record { - if (typeof x !== "object" || x == null) { - return false; - } - - const prototype = Object.getPrototypeOf(x); - return ( - (prototype === null || - prototype === Object.prototype || - Object.getPrototypeOf(prototype) === null) && - !(Symbol.toStringTag in x) && - !(Symbol.iterator in x) - ); -} - -export function wrapFunctionAndEnsureTraceable< - Func extends (...args: any[]) => any ->(target: Func, options: Partial, name = "target") { - if (typeof target === "function") { - return traceable(target, { - ...options, - name, - }); - } - throw new Error("Target must be runnable function"); -} +export type { RunTreeLike, TraceableFunction } from "./singletons/types.js"; From 0c7105b1568390f6a98a354038a8f556dbfa4ea2 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 16 May 2024 23:28:36 +0200 Subject: [PATCH 074/373] Update yarn.lock --- js/package.json | 2 +- js/yarn.lock | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/js/package.json b/js/package.json index ce050f5c5..efe1cec5d 100644 --- a/js/package.json +++ b/js/package.json @@ -227,4 +227,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} diff --git a/js/yarn.lock b/js/yarn.lock index 721bf9f6e..9da4ebf12 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -3543,13 +3543,16 @@ kleur@^3.0.3: resolved "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== -"langsmith@link:.": - version "0.0.0" - uid "" - langsmith@~0.1.1: - version "0.0.0" - uid "" + version "0.1.25" + resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.25.tgz#3d06b6fc62abb1a6fc16540d40ddb48bd795f128" + integrity sha512-Hft4Y1yoMgFgCUXVQklRZ7ndmLQ/6FmRZE9P3u5BRdMq5Fa0hpg8R7jd7bLLBXkAjqcFvWo0AGhpb8MMY5FAiA== + dependencies: + "@types/uuid" "^9.0.1" + commander "^10.0.1" + p-queue "^6.6.2" + p-retry "4" + uuid "^9.0.0" leven@^3.1.0: version "3.1.0" From d8ba1839b8722efc63984edddbc6b05ec1d5369f Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 17 May 2024 03:08:21 +0200 Subject: [PATCH 075/373] Code Review --- js/src/env.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/js/src/env.ts b/js/src/env.ts index ee7a233d3..2847b6e73 100644 --- a/js/src/env.ts +++ b/js/src/env.ts @@ -10,7 +10,5 @@ export const isTracingEnabled = (tracingEnabled?: boolean): boolean => { "LANGSMITH_TRACING", "LANGCHAIN_TRACING", ]; - return Boolean( - envVars.find((envVar) => getEnvironmentVariable(envVar) === "true") - ); + return !!envVars.find((envVar) => getEnvironmentVariable(envVar) === "true"); }; From ef193130f89177bc4b74e7c7a963734591ff8e98 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 17 May 2024 03:44:10 +0200 Subject: [PATCH 076/373] Inherit tracing enable if tracer is found --- js/src/run_trees.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index d78bb9c8f..91c812928 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -174,6 +174,8 @@ export class RunTree implements BaseRun { let projectName: string | undefined; let client: Client | undefined; + let tracingEnabled = isTracingEnabled(); + if (callbackManager) { const parentRunId = callbackManager?.getParentRunId?.() ?? ""; const langChainTracer = callbackManager?.handlers?.find( @@ -183,13 +185,14 @@ export class RunTree implements BaseRun { parentRun = langChainTracer?.getRun?.(parentRunId); projectName = langChainTracer?.projectName; client = langChainTracer?.client; + tracingEnabled = tracingEnabled || !!langChainTracer; } const parentRunTree = new RunTree({ name: parentRun?.name ?? "", id: parentRun?.id, client, - tracingEnabled: isTracingEnabled(), + tracingEnabled, project_name: projectName, tags: [ ...new Set((parentRun?.tags ?? []).concat(parentConfig?.tags ?? [])), From f0657e540dc277d2cb0acddb438feb8cc7010e95 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 17 May 2024 03:07:18 +0200 Subject: [PATCH 077/373] fix(handoff): support streaming returned generators --- js/src/langchain.ts | 44 +++++++++++- js/src/run_trees.ts | 11 +++ js/src/tests/traceable_langchain.test.ts | 89 +++++++++++++++++++++++- js/src/traceable.ts | 64 +++-------------- js/src/utils/asserts.ts | 51 ++++++++++++++ 5 files changed, 201 insertions(+), 58 deletions(-) create mode 100644 js/src/utils/asserts.ts diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 505e05424..fc51d561f 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -1,6 +1,11 @@ import { CallbackManager } from "@langchain/core/callbacks/manager"; import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; -import { Runnable, RunnableConfig } from "@langchain/core/runnables"; +import { + Runnable, + RunnableConfig, + patchConfig, + getCallbackManagerForConfig, +} from "@langchain/core/runnables"; import { RunTree } from "./run_trees.js"; import { Run } from "./schemas.js"; @@ -9,6 +14,7 @@ import { getCurrentRunTree, isTraceableFunction, } from "./traceable.js"; +import { isAsyncIterable, isIteratorLike } from "./utils/asserts.js"; /** * Converts the current run tree active within a traceable-wrapped function @@ -113,7 +119,41 @@ export class RunnableTraceable extends Runnable< async invoke(input: RunInput, options?: Partial) { const [config] = this._getOptionsList(options ?? {}, 1); - return (await this.func(config, input)) as RunOutput; + + const callbacks = await getCallbackManagerForConfig(config); + + return (await this.func( + patchConfig(config, { callbacks }), + input + )) as RunOutput; + } + + async *_streamIterator( + input: RunInput, + options?: Partial + ): AsyncGenerator { + const result = await this.invoke(input, options); + + if (isAsyncIterable(result)) { + const iterator = result[Symbol.asyncIterator](); + while (true) { + const { done, value } = await iterator.next(); + if (done) break; + yield value as RunOutput; + } + return; + } + + if (isIteratorLike(result)) { + while (true) { + const state: IteratorResult = result.next(); + if (state.done) break; + yield state.value as RunOutput; + } + return; + } + + yield result; } static from(func: AnyTraceableFunction) { diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 91c812928..639925b3c 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -188,6 +188,17 @@ export class RunTree implements BaseRun { tracingEnabled = tracingEnabled || !!langChainTracer; } + if (!parentRun) { + return new RunTree({ + name: props.name, + client, + tracingEnabled: isTracingEnabled(), + project_name: projectName, + tags: props.tags, + metadata: props.metadata, + }); + } + const parentRunTree = new RunTree({ name: parentRun?.name ?? "", id: parentRun?.id, diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index b3056ac53..779d4d7bf 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -63,7 +63,7 @@ describe("to langchain", () => { }); }); - test("to langchain stream", async () => { + test("stream", async () => { const { client, callSpy } = mockClient(); const main = traceable( @@ -100,7 +100,7 @@ describe("to langchain", () => { }); }); - test("to langchain batch", async () => { + test("batch", async () => { const { client, callSpy } = mockClient(); const main = traceable( @@ -191,6 +191,91 @@ describe("to traceable", () => { ], }); }); + + test("array stream", async () => { + const { client, callSpy } = mockClient(); + + const source = RunnableTraceable.from( + traceable(function (input: { text: string }) { + return input.text.split(" "); + }) + ); + + const tokens: unknown[] = []; + for await (const chunk of await source.stream( + { text: "Hello world" }, + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + { callbacks: [new LangChainTracer({ client })] } + )) { + tokens.push(chunk); + } + + expect(tokens).toEqual([["Hello", "world"]]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [":0"], + edges: [], + }); + }); + + test("generator stream", async () => { + const { client, callSpy } = mockClient(); + + const source = RunnableTraceable.from( + traceable(function* (input: { text: string }) { + const chunks = input.text.split(" "); + for (const chunk of chunks) { + yield chunk; + } + }) + ); + + const tokens: unknown[] = []; + for await (const chunk of await source.stream( + { text: "Hello world" }, + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + { callbacks: [new LangChainTracer({ client })] } + )) { + tokens.push(chunk); + } + + expect(tokens).toEqual(["Hello", "world"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [":0"], + edges: [], + }); + }); + + test("async generator stream", async () => { + const { client, callSpy } = mockClient(); + const source = RunnableTraceable.from( + traceable(async function* (input: { text: string }) { + const chunks = input.text.split(" "); + for (const chunk of chunks) { + yield chunk; + } + }) + ); + + const tokens: unknown[] = []; + for await (const chunk of await source.stream( + { text: "Hello world" }, + { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + callbacks: [new LangChainTracer({ client })], + } + )) { + tokens.push(chunk); + } + + expect(tokens).toEqual(["Hello", "world"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [":0"], + edges: [], + }); + }); }); test("explicit nested", async () => { diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 82f86b616..173f1cba3 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -14,65 +14,20 @@ import { AsyncLocalStorageProviderSingleton, } from "./singletons/traceable.js"; import { TraceableFunction } from "./singletons/types.js"; +import { + isKVMap, + isReadableStream, + isAsyncIterable, + isIteratorLike, + isThenable, + isGenerator, + isPromiseMethod, +} from "./utils/asserts.js"; AsyncLocalStorageProviderSingleton.initializeGlobalInstance( new AsyncLocalStorage() ); -function isPromiseMethod( - x: string | symbol -): x is "then" | "catch" | "finally" { - if (x === "then" || x === "catch" || x === "finally") { - return true; - } - return false; -} - -function isKVMap(x: unknown): x is Record { - if (typeof x !== "object" || x == null) { - return false; - } - - const prototype = Object.getPrototypeOf(x); - return ( - (prototype === null || - prototype === Object.prototype || - Object.getPrototypeOf(prototype) === null) && - !(Symbol.toStringTag in x) && - !(Symbol.iterator in x) - ); -} - -const isAsyncIterable = (x: unknown): x is AsyncIterable => - x != null && - typeof x === "object" && - // eslint-disable-next-line @typescript-eslint/no-explicit-any - typeof (x as any)[Symbol.asyncIterator] === "function"; - -const isIteratorLike = (x: unknown): x is Iterator => - x != null && - typeof x === "object" && - "next" in x && - typeof x.next === "function"; - -const GeneratorFunction = function* () {}.constructor; - -const isGenerator = (x: unknown): x is Generator => - // eslint-disable-next-line no-instanceof/no-instanceof - x != null && typeof x === "function" && x instanceof GeneratorFunction; - -const isThenable = (x: unknown): x is Promise => - x != null && - typeof x === "object" && - "then" in x && - typeof x.then === "function"; - -const isReadableStream = (x: unknown): x is ReadableStream => - x != null && - typeof x === "object" && - "getReader" in x && - typeof x.getReader === "function"; - const handleRunInputs = (rawInputs: unknown[]): KVMap => { const firstInput = rawInputs[0]; @@ -83,6 +38,7 @@ const handleRunInputs = (rawInputs: unknown[]): KVMap => { if (rawInputs.length > 1) { return { args: rawInputs }; } + if (isKVMap(firstInput)) { return firstInput; } diff --git a/js/src/utils/asserts.ts b/js/src/utils/asserts.ts new file mode 100644 index 000000000..55bc260db --- /dev/null +++ b/js/src/utils/asserts.ts @@ -0,0 +1,51 @@ +export function isPromiseMethod( + x: string | symbol +): x is "then" | "catch" | "finally" { + if (x === "then" || x === "catch" || x === "finally") { + return true; + } + return false; +} + +export function isKVMap(x: unknown): x is Record { + if (typeof x !== "object" || x == null) { + return false; + } + + const prototype = Object.getPrototypeOf(x); + return ( + (prototype === null || + prototype === Object.prototype || + Object.getPrototypeOf(prototype) === null) && + !(Symbol.toStringTag in x) && + !(Symbol.iterator in x) + ); +} +export const isAsyncIterable = (x: unknown): x is AsyncIterable => + x != null && + typeof x === "object" && + // eslint-disable-next-line @typescript-eslint/no-explicit-any + typeof (x as any)[Symbol.asyncIterator] === "function"; + +export const isIteratorLike = (x: unknown): x is Iterator => + x != null && + typeof x === "object" && + "next" in x && + typeof x.next === "function"; + +const GeneratorFunction = function* () {}.constructor; +export const isGenerator = (x: unknown): x is Generator => + // eslint-disable-next-line no-instanceof/no-instanceof + x != null && typeof x === "function" && x instanceof GeneratorFunction; + +export const isThenable = (x: unknown): x is Promise => + x != null && + typeof x === "object" && + "then" in x && + typeof x.then === "function"; + +export const isReadableStream = (x: unknown): x is ReadableStream => + x != null && + typeof x === "object" && + "getReader" in x && + typeof x.getReader === "function"; From 65a80d6bfe1094fd1765b0eb433522857069d16d Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 17 May 2024 04:01:19 +0200 Subject: [PATCH 078/373] Fix tests --- js/src/langchain.ts | 1 - js/src/run_trees.ts | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index fc51d561f..9aa0825dc 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -119,7 +119,6 @@ export class RunnableTraceable extends Runnable< async invoke(input: RunInput, options?: Partial) { const [config] = this._getOptionsList(options ?? {}, 1); - const callbacks = await getCallbackManagerForConfig(config); return (await this.func( diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 639925b3c..4ecf82173 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -190,18 +190,18 @@ export class RunTree implements BaseRun { if (!parentRun) { return new RunTree({ - name: props.name, client, - tracingEnabled: isTracingEnabled(), + tracingEnabled, project_name: projectName, + name: props.name, tags: props.tags, metadata: props.metadata, }); } const parentRunTree = new RunTree({ - name: parentRun?.name ?? "", - id: parentRun?.id, + name: parentRun.name, + id: parentRun.id, client, tracingEnabled, project_name: projectName, @@ -217,7 +217,7 @@ export class RunTree implements BaseRun { }); return parentRunTree.createChild({ - name: props?.name ?? "", + name: props.name, tags: props.tags, metadata: props.metadata, }); From 27294a25341cbad47dc1b0d92f39c25fc7b87dc8 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 16 May 2024 19:53:31 -0700 Subject: [PATCH 079/373] feat(datasets): Allow multiple splits per example DO NOT LAND UNTIL THE CORRESPONDING PR IN LANGCHAINPLUS HAS MADE IT TO PROD --- js/src/client.ts | 4 ++-- js/src/schemas.ts | 4 ++-- js/src/tests/client.int.test.ts | 23 ++++++++++++++----- python/langsmith/client.py | 15 +++++++++--- python/langsmith/schemas.py | 4 ++-- python/tests/integration_tests/test_client.py | 19 +++++++++++++-- 6 files changed, 52 insertions(+), 17 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index df427edbb..995d59668 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -239,7 +239,7 @@ export type CreateExampleOptions = { exampleId?: string; metadata?: KVMap; - split?: string; + split?: string | string[]; }; type AutoBatchQueueItem = { @@ -2036,7 +2036,7 @@ export class Client { inputs: Array; outputs?: Array; metadata?: Array; - splits?: Array; + splits?: Array>; sourceRunIds?: Array; exampleIds?: Array; datasetId?: string; diff --git a/js/src/schemas.ts b/js/src/schemas.ts index ee8a11036..f33ee1f80 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -229,7 +229,7 @@ export interface RunUpdate { export interface ExampleCreate extends BaseExample { id?: string; created_at?: string; - split?: string; + split?: string | string[]; } export interface Example extends BaseExample { @@ -245,7 +245,7 @@ export interface ExampleUpdate { inputs?: KVMap; outputs?: KVMap; metadata?: KVMap; - split?: string; + split?: string | string[]; } export interface BaseDataset { name: string; diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 7637bb821..17891e7ae 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -97,12 +97,22 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { await client.updateExample(example.id, { inputs: { col1: "updatedExampleCol1" }, outputs: { col2: "updatedExampleCol2" }, - split: "my_split2", + split: ["my_split2"], }); // Says 'example updated' or something similar const newExampleValue = await client.readExample(example.id); expect(newExampleValue.inputs.col1).toBe("updatedExampleCol1"); - expect(newExampleValue.metadata?.dataset_split).toBe("my_split2"); + expect(newExampleValue.metadata?.dataset_split).toBe(["my_split2"]); + + await client.updateExample(example.id, { + inputs: { col1: "updatedExampleCol3" }, + outputs: { col2: "updatedExampleCol4" }, + split: "my_split3", + }); + // Says 'example updated' or something similar + const newExampleValue2 = await client.readExample(example.id); + expect(newExampleValue2.inputs.col1).toBe("updatedExampleCol3"); + expect(newExampleValue2.metadata?.dataset_split).toBe(["my_split3"]); await client.deleteExample(example.id); const examples2 = await toArray( client.listExamples({ datasetId: newDataset.id }) @@ -489,7 +499,7 @@ test.concurrent( { output: "hi there 3" }, ], metadata: [{ key: "value 1" }, { key: "value 2" }, { key: "value 3" }], - splits: ["train", "test", "train"], + splits: ["train", "test", ["train", "validation"]], datasetId: dataset.id, }); const initialExamplesList = await toArray( @@ -520,19 +530,20 @@ test.concurrent( ); expect(example1?.outputs?.output).toEqual("hi there 1"); expect(example1?.metadata?.key).toEqual("value 1"); - expect(example1?.metadata?.dataset_split).toEqual("train"); + expect(example1?.metadata?.dataset_split).toEqual(["train"]); const example2 = examplesList2.find( (e) => e.inputs.input === "hello world 2" ); expect(example2?.outputs?.output).toEqual("hi there 2"); expect(example2?.metadata?.key).toEqual("value 2"); - expect(example2?.metadata?.dataset_split).toEqual("test"); + expect(example2?.metadata?.dataset_split).toEqual(["test"]); const example3 = examplesList2.find( (e) => e.inputs.input === "hello world 3" ); expect(example3?.outputs?.output).toEqual("hi there 3"); expect(example3?.metadata?.key).toEqual("value 3"); - expect(example3?.metadata?.dataset_split).toEqual("train"); + expect(example3?.metadata?.dataset_split).toContain("train"); + expect(example3?.metadata?.dataset_split).toContain("validation"); await client.createExample( { input: "hello world" }, diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 9a822fd90..f63a47f65 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -2936,7 +2936,7 @@ def create_examples( inputs: Sequence[Mapping[str, Any]], outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, metadata: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, - splits: Optional[Sequence[Optional[str]]] = None, + splits: Optional[Sequence[Optional[str | List[str]]]] = None, source_run_ids: Optional[Sequence[Optional[ID_TYPE]]] = None, ids: Optional[Sequence[Optional[ID_TYPE]]] = None, dataset_id: Optional[ID_TYPE] = None, @@ -2953,6 +2953,9 @@ def create_examples( The output values for the examples. metadata : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None The metadata for the examples. + split : Optional[Sequence[Optional[str | List[str]]]], default=None + The splits for the examples, which are divisions + of your dataset such as 'train', 'test', or 'validation'. source_run_ids : Optional[Sequence[Optional[ID_TYPE]]], default=None The IDs of the source runs associated with the examples. ids : Optional[Sequence[ID_TYPE]], default=None @@ -3012,7 +3015,7 @@ def create_example( created_at: Optional[datetime.datetime] = None, outputs: Optional[Mapping[str, Any]] = None, metadata: Optional[Mapping[str, Any]] = None, - split: Optional[str] = None, + split: Optional[str | List[str]] = None, example_id: Optional[ID_TYPE] = None, ) -> ls_schemas.Example: """Create a dataset example in the LangSmith API. @@ -3034,6 +3037,9 @@ def create_example( The output values for the example. metadata : Mapping[str, Any] or None, default=None The metadata for the example. + split : str or List[str] or None, default=None + The splits for the example, which are divisions + of your dataset such as 'train', 'test', or 'validation'. exemple_id : UUID or None, default=None The ID of the example to create. If not provided, a new example will be created. @@ -3165,7 +3171,7 @@ def update_example( inputs: Optional[Dict[str, Any]] = None, outputs: Optional[Mapping[str, Any]] = None, metadata: Optional[Dict] = None, - split: Optional[str] = None, + split: Optional[str | List[str]] = None, dataset_id: Optional[ID_TYPE] = None, ) -> Dict[str, Any]: """Update a specific example. @@ -3180,6 +3186,9 @@ def update_example( The output values to update. metadata : Dict or None, default=None The metadata to update. + split : str or List[str] or None, default=None + The dataset split to update, such as + 'train', 'test', or 'validation'. dataset_id : UUID or None, default=None The ID of the dataset to update. diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index ee57ffd32..9f1893a85 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -63,7 +63,7 @@ class ExampleCreate(ExampleBase): id: Optional[UUID] created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) - split: Optional[str] = None + split: Optional[str | List[str]] = None class Example(ExampleBase): @@ -106,7 +106,7 @@ class ExampleUpdate(BaseModel): inputs: Optional[Dict[str, Any]] = None outputs: Optional[Dict[str, Any]] = None metadata: Optional[Dict[str, Any]] = None - split: Optional[str] = None + split: Optional[str | List[str]] = None class Config: """Configuration class for the schema.""" diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index c037bfd65..a1540f44c 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -109,9 +109,9 @@ def test_datasets(langchain_client: Client) -> None: def test_list_examples(langchain_client: Client) -> None: """Test list_examples.""" examples = [ - ("Shut up, idiot", "Toxic", "train"), + ("Shut up, idiot", "Toxic", ["train", "validation"]), ("You're a wonderful person", "Not toxic", "test"), - ("This is the worst thing ever", "Toxic", "train"), + ("This is the worst thing ever", "Toxic", ["train"]), ("I had a great day today", "Not toxic", "test"), ("Nobody likes you", "Toxic", "train"), ("This is unacceptable. I want to speak to the manager.", "Not toxic", None), @@ -133,6 +133,11 @@ def test_list_examples(langchain_client: Client) -> None: ) assert len(example_list) == 3 + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["validation"]) + ) + assert len(example_list) == 1 + example_list = list( langchain_client.list_examples(dataset_id=dataset.id, splits=["test"]) ) @@ -153,6 +158,16 @@ def test_list_examples(langchain_client: Client) -> None: split="train", ) + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["test"]) + ) + assert len(example_list) == 2 + + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, splits=["train"]) + ) + assert len(example_list) == 4 + langchain_client.create_example( inputs={"text": "What's up!"}, outputs={"label": "Not toxic"}, From 97393b226c721eac6b6e2853c81c7d91cdfb3062 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Fri, 17 May 2024 18:13:01 -0700 Subject: [PATCH 080/373] add the splits to metadata in evaluate --- js/src/evaluation/_runner.ts | 16 ++++ js/src/tests/client.int.test.ts | 4 +- js/src/tests/evaluate.int.test.ts | 85 ++++++++++++++++++- python/langsmith/evaluation/_runner.py | 18 ++++ python/tests/integration_tests/test_client.py | 4 +- 5 files changed, 122 insertions(+), 5 deletions(-) diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index cdced3c67..f3f981dae 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -694,6 +694,21 @@ class _ExperimentManager { ).date; } + async _getDatasetSplits(): Promise { + const examples = await this.getExamples(); + const allSplits = examples.reduce((acc, ex) => { + if (ex.metadata && ex.metadata.dataset_split) { + if (Array.isArray(ex.metadata.dataset_split)) { + ex.metadata.dataset_split.forEach((split) => acc.add(split)); + } else if (typeof ex.metadata.dataset_split === "string") { + acc.add(ex.metadata.dataset_split); + } + } + return acc; + }, new Set()); + return allSplits.size ? Array.from(allSplits) : undefined; + } + async _end(): Promise { const experiment = this._experiment; if (!experiment) { @@ -701,6 +716,7 @@ class _ExperimentManager { } const projectMetadata = await this._getExperimentMetadata(); projectMetadata["dataset_version"] = await this._getDatasetVersion(); + projectMetadata["dataset_splits"] = await this._getDatasetSplits(); // Update revision_id if not already set if (!projectMetadata["revision_id"]) { projectMetadata["revision_id"] = await getDefaultRevisionId(); diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 17891e7ae..0b87522e9 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -102,7 +102,7 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { // Says 'example updated' or something similar const newExampleValue = await client.readExample(example.id); expect(newExampleValue.inputs.col1).toBe("updatedExampleCol1"); - expect(newExampleValue.metadata?.dataset_split).toBe(["my_split2"]); + expect(newExampleValue.metadata?.dataset_split).toStrictEqual(["my_split2"]); await client.updateExample(example.id, { inputs: { col1: "updatedExampleCol3" }, @@ -112,7 +112,7 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { // Says 'example updated' or something similar const newExampleValue2 = await client.readExample(example.id); expect(newExampleValue2.inputs.col1).toBe("updatedExampleCol3"); - expect(newExampleValue2.metadata?.dataset_split).toBe(["my_split3"]); + expect(newExampleValue2.metadata?.dataset_split).toStrictEqual(["my_split3"]); await client.deleteExample(example.id); const examples2 = await toArray( client.listExamples({ datasetId: newDataset.id }) diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index 198f66473..5e1321d06 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -1,6 +1,6 @@ import { EvaluationResult } from "../evaluation/evaluator.js"; import { evaluate } from "../evaluation/_runner.js"; -import { Example, Run } from "../schemas.js"; +import { Example, Run, TracerSession } from "../schemas.js"; import { Client } from "../index.js"; import { afterAll, beforeAll } from "@jest/globals"; import { RunnableLambda } from "@langchain/core/runnables"; @@ -30,6 +30,13 @@ afterAll(async () => { await client.deleteDataset({ datasetName: TESTING_DATASET_NAME, }); + try { + await client.deleteDataset({ + datasetName: "my_splits_ds2", + }); + } catch (_) { + //pass + } }); test("evaluate can evaluate", async () => { @@ -351,6 +358,82 @@ test("can pass multiple evaluators", async () => { ); }); +test("split info saved correctly", async () => { + const client = new Client(); + // create a new dataset + await client.createDataset("my_splits_ds2", { + description: + "For testing purposed. Is created & deleted for each test run.", + }); + // create examples + await client.createExamples({ + inputs: [{ input: 1 }, { input: 2 }, { input: 3 }], + outputs: [{ output: 2 }, { output: 3 }, { output: 4 }], + splits: [["test"], ["train"], ["validation", "test"]], + datasetName: "my_splits_ds2", + }); + + const targetFunc = (input: Record) => { + console.log("__input__", input); + return { + foo: input.input + 1, + }; + }; + await evaluate(targetFunc, { + data: client.listExamples({ datasetName: "my_splits_ds2" }), + description: "splits info saved correctly", + }); + + const exp = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + let myExp: TracerSession | null = null; + for await (const session of exp) { + myExp = session; + } + expect(myExp?.extra?.metadata?.dataset_splits.sort()).toEqual( + ["test", "train", "validation"].sort() + ); + + await evaluate(targetFunc, { + data: client.listExamples({ + datasetName: "my_splits_ds2", + splits: ["test"], + }), + description: "splits info saved correctly", + }); + + const exp2 = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + let myExp2: TracerSession | null = null; + for await (const session of exp2) { + if (myExp2 === null || session.start_time > myExp2.start_time) { + myExp2 = session; + } + } + + expect(myExp2?.extra?.metadata?.dataset_splits.sort()).toEqual( + ["test", "validation"].sort() + ); + + await evaluate(targetFunc, { + data: client.listExamples({ + datasetName: "my_splits_ds2", + splits: ["train"], + }), + description: "splits info saved correctly", + }); + + const exp3 = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + let myExp3: TracerSession | null = null; + for await (const session of exp3) { + if (myExp3 === null || session.start_time > myExp3.start_time) { + myExp3 = session; + } + } + + expect(myExp3?.extra?.metadata?.dataset_splits.sort()).toEqual( + ["train"].sort() + ); +}); + test("can pass multiple summary evaluators", async () => { const targetFunc = (input: Record) => { console.log("__input__", input); diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index 3c07ed165..27910b90b 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -1322,6 +1322,23 @@ def _get_dataset_version(self) -> Optional[str]: max_modified_at = max(modified_at) if modified_at else None return max_modified_at.isoformat() if max_modified_at else None + def _get_dataset_splits(self) -> Optional[list[str]]: + examples = list(self.examples) + splits = set() + for example in examples: + if ( + example.metadata + and example.metadata.get("dataset_split") + and isinstance(example.metadata["dataset_split"], list) + ): + for split in example.metadata["dataset_split"]: + if isinstance(split, str): + splits.add(split) + else: + splits.add("base") + + return list(splits) + def _end(self) -> None: experiment = self._experiment if experiment is None: @@ -1329,6 +1346,7 @@ def _end(self) -> None: project_metadata = self._get_experiment_metadata() project_metadata["dataset_version"] = self._get_dataset_version() + project_metadata["dataset_splits"] = self._get_dataset_splits() self.client.update_project( experiment.id, end_time=datetime.datetime.now(datetime.timezone.utc), diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index a1540f44c..9107cc9f9 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -153,7 +153,7 @@ def test_list_examples(langchain_client: Client) -> None: example.id for example in example_list if example.metadata is not None - and example.metadata.get("dataset_split") == "test" + and "test" in example.metadata.get("dataset_split", []) ][0], split="train", ) @@ -161,7 +161,7 @@ def test_list_examples(langchain_client: Client) -> None: example_list = list( langchain_client.list_examples(dataset_id=dataset.id, splits=["test"]) ) - assert len(example_list) == 2 + assert len(example_list) == 1 example_list = list( langchain_client.list_examples(dataset_id=dataset.id, splits=["train"]) From 78bf1e16b887ad812c0f4f59c94e674ae5a2107c Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Fri, 17 May 2024 18:26:53 -0700 Subject: [PATCH 081/373] fix lint --- python/langsmith/schemas.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 9f1893a85..758530e03 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -63,7 +63,7 @@ class ExampleCreate(ExampleBase): id: Optional[UUID] created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) - split: Optional[str | List[str]] = None + split: Optional[Union[str, List[str]]] = None class Example(ExampleBase): @@ -106,7 +106,7 @@ class ExampleUpdate(BaseModel): inputs: Optional[Dict[str, Any]] = None outputs: Optional[Dict[str, Any]] = None metadata: Optional[Dict[str, Any]] = None - split: Optional[str | List[str]] = None + split: Optional[Union[str, List[str]]] = None class Config: """Configuration class for the schema.""" From a82b1d7b62c1f7f58bd7f5dd3150f34b436991fd Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 20 May 2024 12:12:16 +0200 Subject: [PATCH 082/373] Add missing user python dependencies for doctest --- .github/workflows/integration_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index f28d73ede..e862470f7 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -42,7 +42,7 @@ jobs: - name: Install dependencies run: | poetry install --with dev - poetry run pip install -U langchain + poetry run pip install -U langchain langchain_anthropic langchain_openai rapidfuzz - name: Run Python integration tests uses: ./.github/actions/python-integration-tests with: From 98af510c8cb41d60eab856c8694c495c4c6a48e3 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 20 May 2024 16:52:31 +0200 Subject: [PATCH 083/373] fix(js): populate this._experiment when creating a new project --- js/src/evaluation/_runner.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index f3f981dae..0948d6e7a 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -306,6 +306,7 @@ class _ExperimentManager { metadata: projectMetadata, description: this._description, }); + this._experiment = project; } catch (e) { if (String(e).includes("already exists")) { throw e; From 7b9f3168b6dd14871724511cfc918fcfafb94eae Mon Sep 17 00:00:00 2001 From: infra Date: Mon, 20 May 2024 08:39:35 -0700 Subject: [PATCH 084/373] fix: bump 0.5.7 --- python/langsmith/cli/.env.example | 2 +- python/langsmith/cli/docker-compose.yaml | 14 +++++++------- python/langsmith/cli/main.py | 14 +++++++------- python/pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index 757ca17c9..eb9d9120f 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,5 +1,5 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.5.1 +_LANGSMITH_IMAGE_VERSION=0.5.7 LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key OPENAI_API_KEY=your-openai-api-key # Needed for Online Evals and Magic Query features AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 1dff7840b..58922da51 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,11 +1,11 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.5.1} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.5.7} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} @@ -16,7 +16,7 @@ services: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker @@ -49,7 +49,7 @@ services: condition: service_completed_successfully restart: always langchain-platform-backend: - image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} environment: - PORT=1986 - LANGCHAIN_ENV=local_docker @@ -75,7 +75,7 @@ services: condition: service_completed_successfully restart: always langchain-queue: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} environment: - LANGCHAIN_ENV=local_docker - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} @@ -163,7 +163,7 @@ services: timeout: 2s retries: 30 clickhouse-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} depends_on: langchain-clickhouse: condition: service_healthy @@ -182,7 +182,7 @@ services: "migrate -source file://clickhouse/migrations -database 'clickhouse://${CLICKHOUSE_HOST}:${CLICKHOUSE_NATIVE_PORT}?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}&database=${CLICKHOUSE_DB}&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", ] postgres-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.1} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} depends_on: langchain-db: condition: service_healthy diff --git a/python/langsmith/cli/main.py b/python/langsmith/cli/main.py index 116437aa3..f6240ef06 100644 --- a/python/langsmith/cli/main.py +++ b/python/langsmith/cli/main.py @@ -101,12 +101,12 @@ def _start_local(self) -> None: def pull( self, *, - version: str = "0.5.1", + version: str = "0.5.7", ) -> None: """Pull the latest LangSmith images. Args: - version: The LangSmith version to use for LangSmith. Defaults to 0.5.1 + version: The LangSmith version to use for LangSmith. Defaults to 0.5.7 """ os.environ["_LANGSMITH_IMAGE_VERSION"] = version subprocess.run( @@ -123,7 +123,7 @@ def start( *, openai_api_key: Optional[str] = None, langsmith_license_key: str, - version: str = "0.5.1", + version: str = "0.5.7", ) -> None: """Run the LangSmith server locally. @@ -251,8 +251,8 @@ def main() -> None: ) server_start_parser.add_argument( "--version", - default="0.5.1", - help="The LangSmith version to use for LangSmith. Defaults to 0.5.1.", + default="0.5.7", + help="The LangSmith version to use for LangSmith. Defaults to 0.5.7.", ) server_start_parser.set_defaults( func=lambda args: server_command.start( @@ -279,8 +279,8 @@ def main() -> None: ) server_pull_parser.add_argument( "--version", - default="0.5.1", - help="The LangSmith version to use for LangSmith. Defaults to 0.5.1.", + default="0.5.7", + help="The LangSmith version to use for LangSmith. Defaults to 0.5.7.", ) server_pull_parser.set_defaults( func=lambda args: server_command.pull(version=args.version) diff --git a/python/pyproject.toml b/python/pyproject.toml index 90f4db811..976f30004 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.59" +version = "0.1.60" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 6f56a9b6b76f8171db6969ed0da4c26ad862c45a Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 20 May 2024 18:29:30 +0200 Subject: [PATCH 085/373] chore(js): reexport TraceableFunction type from singleton --- js/src/singletons/traceable.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts index 6319b99df..14c9168dc 100644 --- a/js/src/singletons/traceable.ts +++ b/js/src/singletons/traceable.ts @@ -67,3 +67,5 @@ export function isTraceableFunction( ): x is TraceableFunction { return typeof x === "function" && "langsmith:traceable" in x; } + +export type { TraceableFunction } from "./types.js"; From ccb56b8ff7d0f3c6412bedb7b7bd95c146d92ed7 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 20 May 2024 20:52:42 +0200 Subject: [PATCH 086/373] feat(python): append invocation params when using wrap_openai --- python/langsmith/run_helpers.py | 13 +++++++++++ python/langsmith/wrappers/_openai.py | 34 +++++++++++++++++++++++----- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 478d119d8..f00ebc4cd 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -237,6 +237,7 @@ def traceable( reduce_fn: Optional[Callable] = None, project_name: Optional[str] = None, process_inputs: Optional[Callable[[dict], dict]] = None, + invocation_params_fn: Optional[Callable[[dict], dict]] = None, ) -> Callable[[Callable[..., R]], SupportsLangsmithExtra[R]]: ... @@ -404,6 +405,7 @@ def manual_extra_function(x): project_name=kwargs.pop("project_name", None), run_type=run_type, process_inputs=kwargs.pop("process_inputs", None), + invocation_params_fn=kwargs.pop("invocation_params_fn", None), ) if kwargs: warnings.warn( @@ -921,6 +923,7 @@ class _ContainerInput(TypedDict, total=False): project_name: Optional[str] run_type: ls_client.RUN_TYPE_T process_inputs: Optional[Callable[[dict], dict]] + invocation_params_fn: Optional[Callable[[dict], dict]] def _container_end( @@ -1037,6 +1040,16 @@ def _setup_run( metadata_["ls_method"] = "traceable" extra_inner["metadata"] = metadata_ inputs = _get_inputs_safe(signature, *args, **kwargs) + invocation_params_fn = container_input.get("invocation_params_fn") + if invocation_params_fn: + try: + invocation_params = { + k: v for k, v in invocation_params_fn(inputs).items() if v is not None + } + if invocation_params and isinstance(invocation_params, dict): + metadata_.update(invocation_params) + except BaseException as e: + LOGGER.error(f"Failed to infer invocation params for {name_}: {e}") process_inputs = container_input.get("process_inputs") if process_inputs: try: diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 4fe214b13..dd5c59ec0 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -57,6 +57,23 @@ def _strip_not_given(d: dict) -> dict: return d +def _infer_invocation_params(model_type: str, kwargs: dict): + stripped = _strip_not_given(kwargs) + + stop = stripped.get("stop") + if stop and isinstance(stop, str): + stop = [stop] + + return { + "ls_provider": "openai", + "ls_model_type": model_type, + "ls_model_name": stripped.get("model", None), + "ls_temperature": stripped.get("temperature", None), + "ls_max_tokens": stripped.get("max_tokens", None), + "ls_stop": stop, + } + + def _reduce_choices(choices: List[Choice]) -> dict: reversed_choices = list(reversed(choices)) message: Dict[str, Any] = { @@ -97,13 +114,13 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"][ - "name" - ] += chunk.function.name + message["tool_calls"][index]["function"]["name"] += ( + chunk.function.name + ) if chunk.function.arguments: - message["tool_calls"][index]["function"][ - "arguments" - ] += chunk.function.arguments + message["tool_calls"][index]["function"]["arguments"] += ( + chunk.function.arguments + ) return { "index": choices[0].index, "finish_reason": next( @@ -150,6 +167,7 @@ def _get_wrapper( name: str, reduce_fn: Callable, tracing_extra: Optional[TracingExtra] = None, + invocation_params_fn: Optional[Callable] = None, ) -> Callable: textra = tracing_extra or {} @@ -160,6 +178,7 @@ def create(*args, stream: bool = False, **kwargs): run_type="llm", reduce_fn=reduce_fn if stream else None, process_inputs=_strip_not_given, + invocation_params_fn=invocation_params_fn, **textra, ) @@ -173,6 +192,7 @@ async def acreate(*args, stream: bool = False, **kwargs): run_type="llm", reduce_fn=reduce_fn if stream else None, process_inputs=_strip_not_given, + invocation_params_fn=invocation_params_fn, **textra, ) if stream: @@ -208,11 +228,13 @@ def wrap_openai(client: C, *, tracing_extra: Optional[TracingExtra] = None) -> C "ChatOpenAI", _reduce_chat, tracing_extra=tracing_extra, + invocation_params_fn=functools.partial(_infer_invocation_params, "chat"), ) client.completions.create = _get_wrapper( # type: ignore[method-assign] client.completions.create, "OpenAI", _reduce_completions, tracing_extra=tracing_extra, + invocation_params_fn=functools.partial(_infer_invocation_params, "text"), ) return client From 0a9452e6d7e0440849fd8caaabbe6bf75aeb1c78 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 20 May 2024 21:02:45 +0200 Subject: [PATCH 087/373] Fix lint --- python/langsmith/wrappers/_openai.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index dd5c59ec0..cd9c29385 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,13 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"]["name"] += ( - chunk.function.name - ) + message["tool_calls"][index]["function"][ + "name" + ] += chunk.function.name if chunk.function.arguments: - message["tool_calls"][index]["function"]["arguments"] += ( - chunk.function.arguments - ) + message["tool_calls"][index]["function"][ + "arguments" + ] += chunk.function.arguments return { "index": choices[0].index, "finish_reason": next( From efe2972ac584d9ca1c4b4a7b0ab5c5908622c820 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 21 May 2024 00:44:19 +0200 Subject: [PATCH 088/373] chore(js): make sure we use "type": "text" for invocation params tracking --- js/src/schemas.ts | 2 +- js/src/wrappers/openai.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/js/src/schemas.ts b/js/src/schemas.ts index f33ee1f80..6cba693ef 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -397,7 +397,7 @@ export type RetrieverOutput = Array<{ export interface InvocationParamsSchema { ls_provider?: string; ls_model_name?: string; - ls_model_type: "chat"; + ls_model_type: "chat" | "text"; ls_temperature?: number; ls_max_tokens?: number; ls_stop?: string[]; diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index e43c55775..0ea56f882 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -263,7 +263,7 @@ export const wrapOpenAI = ( return { ls_provider: "openai", - ls_model_type: "chat", + ls_model_type: "text", ls_model_name: params.model, ls_max_tokens: params.max_tokens ?? undefined, ls_temperature: params.temperature ?? undefined, From 73aaf443d5aba318a01e922e3724a512fd47cf0e Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 21 May 2024 02:00:59 +0200 Subject: [PATCH 089/373] feat(js): move `getLangchainCallbacks` to package --- js/.gitignore | 4 + js/package.json | 25 ++- js/scripts/create-entrypoints.js | 1 + js/src/evaluation/langchain.ts | 69 ++++++++ js/src/langchain.ts | 6 + js/src/run_trees.ts | 10 +- js/src/tests/lcls_handoff.int.test.ts | 23 ++- js/src/utils/warn.ts | 8 + js/tsconfig.json | 1 + js/yarn.lock | 233 ++++++++++++-------------- 10 files changed, 234 insertions(+), 146 deletions(-) create mode 100644 js/src/evaluation/langchain.ts create mode 100644 js/src/utils/warn.ts diff --git a/js/.gitignore b/js/.gitignore index ae5b5a591..5864f6933 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -47,6 +47,10 @@ Chinook_Sqlite.sql /evaluation.js /evaluation.d.ts /evaluation.d.cts +/evaluation/langchain.cjs +/evaluation/langchain.js +/evaluation/langchain.d.ts +/evaluation/langchain.d.cts /schemas.cjs /schemas.js /schemas.d.ts diff --git a/js/package.json b/js/package.json index efe1cec5d..93b92e6cf 100644 --- a/js/package.json +++ b/js/package.json @@ -21,6 +21,10 @@ "evaluation.js", "evaluation.d.ts", "evaluation.d.cts", + "evaluation/langchain.cjs", + "evaluation/langchain.js", + "evaluation/langchain.d.ts", + "evaluation/langchain.d.cts", "schemas.cjs", "schemas.js", "schemas.d.ts", @@ -95,8 +99,9 @@ "@babel/preset-env": "^7.22.4", "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", - "@langchain/core": "^0.1.32", - "@langchain/langgraph": "^0.0.8", + "langchain": "^0.2.0", + "@langchain/core": "^0.2.0", + "@langchain/langgraph": "^0.0.19", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.59.8", @@ -118,16 +123,23 @@ }, "peerDependencies": { "openai": "*", + "langchain": "*", "@langchain/core": "*" }, "peerDependenciesMeta": { "openai": { "optional": true }, + "langchain": { + "optional": true + }, "@langchain/core": { "optional": true } }, + "resolutions": { + "@langchain/core": "0.2.0" + }, "lint-staged": { "**/*.{ts,tsx}": [ "prettier --write --ignore-unknown", @@ -180,6 +192,15 @@ "import": "./evaluation.js", "require": "./evaluation.cjs" }, + "./evaluation/langchain": { + "types": { + "import": "./evaluation/langchain.d.ts", + "require": "./evaluation/langchain.d.cts", + "default": "./evaluation/langchain.d.ts" + }, + "import": "./evaluation/langchain.js", + "require": "./evaluation/langchain.cjs" + }, "./schemas": { "types": { "import": "./schemas.d.ts", diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index 60fc42692..1c571c14f 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -11,6 +11,7 @@ const entrypoints = { run_trees: "run_trees", traceable: "traceable", evaluation: "evaluation/index", + "evaluation/langchain": "evaluation/langchain", schemas: "schemas", langchain: "langchain", wrappers: "wrappers/index", diff --git a/js/src/evaluation/langchain.ts b/js/src/evaluation/langchain.ts new file mode 100644 index 000000000..87010c7ec --- /dev/null +++ b/js/src/evaluation/langchain.ts @@ -0,0 +1,69 @@ +import type { Run, Example } from "../schemas.js"; +import { type LoadEvaluatorOptions, loadEvaluator } from "langchain/evaluation"; +import { getLangchainCallbacks } from "../langchain.js"; + +function isStringifiable( + value: unknown +): value is string | number | boolean | bigint { + return ( + typeof value === "string" || + typeof value === "number" || + typeof value === "boolean" || + typeof value === "bigint" + ); +} + +// utility methods for extracting stringified values +// from unknown inputs and records +function getPrimitiveValue(value: unknown) { + if (isStringifiable(value)) return String(value); + if (!Array.isArray(value) && typeof value === "object" && value != null) { + const values = Object.values(value); + if (values.length === 1 && isStringifiable(values[0])) { + return String(values[0]); + } + } + return undefined; +} + +/** + * This utility function loads a LangChain string evaluator and returns a function + * which can be used by newer `evaluate` function. + * + * @param type Type of string evaluator, one of "criteria" or "labeled_criteria + * @param options Options for loading the evaluator + * @returns Evaluator consumable by `evaluate` + */ +export async function getLangchainStringEvaluator( + type: "criteria" | "labeled_criteria", + options: LoadEvaluatorOptions & { + formatEvaluatorInputs?: ( + run: Run, + example: Example + ) => { prediction: string; reference?: string; input?: string }; + } +) { + const evaluator = await loadEvaluator(type, options); + const feedbackKey = getPrimitiveValue(options.criteria) ?? type; + + const formatEvaluatorInputs = + options.formatEvaluatorInputs ?? + ((run: Run, example: Example) => { + const prediction = getPrimitiveValue(run.outputs); + const reference = getPrimitiveValue(example.outputs); + const input = getPrimitiveValue(example.inputs); + + if (prediction == null) throw new Error("Missing prediction"); + if (type === "criteria") return { prediction, input }; + return { prediction, reference, input }; + }); + + return async (run: Run, example: Example) => { + const score = await evaluator.evaluateStrings( + formatEvaluatorInputs(run, example), + { callbacks: await getLangchainCallbacks() } + ); + + return { key: feedbackKey, ...score }; + }; +} diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 505e05424..eb2a6d7f5 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -9,6 +9,7 @@ import { getCurrentRunTree, isTraceableFunction, } from "./traceable.js"; +import { warnOnce } from "./utils/warn.js"; /** * Converts the current run tree active within a traceable-wrapped function @@ -24,6 +25,11 @@ export async function getLangchainCallbacks( const runTree: RunTree | undefined = currentRunTree ?? getCurrentRunTree(); if (!runTree) return undefined; + warnOnce( + "Using `getLangchainCallbacks` with newer versions of LangChain might result in unexpected behavior. \n" + + "Consider upgrading LangChain to 0.2.x or higher." + ); + // TODO: CallbackManager.configure() is only async due to LangChainTracer // factory being unnecessarily async. let callbacks = await CallbackManager.configure(); diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 91c812928..c88c4c193 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -7,15 +7,7 @@ import { } from "./utils/env.js"; import { Client } from "./client.js"; import { isTracingEnabled } from "./env.js"; - -const warnedMessages: Record = {}; - -function warnOnce(message: string): void { - if (!warnedMessages[message]) { - console.warn(message); - warnedMessages[message] = true; - } -} +import { warnOnce } from "./utils/warn.js"; function stripNonAlphanumeric(input: string) { return input.replace(/[-:.]/g, ""); diff --git a/js/src/tests/lcls_handoff.int.test.ts b/js/src/tests/lcls_handoff.int.test.ts index ff8382a75..3a064a07f 100644 --- a/js/src/tests/lcls_handoff.int.test.ts +++ b/js/src/tests/lcls_handoff.int.test.ts @@ -35,19 +35,18 @@ test.concurrent( }; // Define the two nodes we will cycle between - workflow.addNode( - "agent", - new RunnableLambda({ - func: async () => new HumanMessage({ content: "Hello!" }), - }) - ); - workflow.addNode("action", new RunnableLambda({ func: myFunc })); + workflow + .addNode( + "agent", + new RunnableLambda({ + func: async () => new HumanMessage({ content: "Hello!" }), + }) + ) + .addNode("action", new RunnableLambda({ func: myFunc })) + .addEdge("__start__", "agent") + .addEdge("agent", "action") + .addEdge("action", "__end__"); - // Set the entrypoint as `agent` - // This means that this node is the first one called - workflow.setEntryPoint("agent"); - workflow.addEdge("agent", "action"); - workflow.setFinishPoint("action"); const app = workflow.compile(); const tracer = new LangChainTracer({ projectName }); const client = new Client({ diff --git a/js/src/utils/warn.ts b/js/src/utils/warn.ts new file mode 100644 index 000000000..b7bdc70f3 --- /dev/null +++ b/js/src/utils/warn.ts @@ -0,0 +1,8 @@ +const warnedMessages: Record = {}; + +export function warnOnce(message: string): void { + if (!warnedMessages[message]) { + console.warn(message); + warnedMessages[message] = true; + } +} diff --git a/js/tsconfig.json b/js/tsconfig.json index 2a7d03325..d2b424b45 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -36,6 +36,7 @@ "src/run_trees.ts", "src/traceable.ts", "src/evaluation/index.ts", + "src/evaluation/langchain.ts", "src/schemas.ts", "src/langchain.ts", "src/wrappers/index.ts", diff --git a/js/yarn.lock b/js/yarn.lock index 9da4ebf12..1b849910a 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -10,14 +10,7 @@ "@jridgewell/gen-mapping" "^0.3.0" "@jridgewell/trace-mapping" "^0.3.9" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.21.4": - version "7.21.4" - resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.21.4.tgz" - integrity sha512-LYvhNKfwWSPpocw8GI7gpK2nq3HSDuEPC/uSYaALSJu9xjsalaaYFOq0Pwt5KmVqwEbZlDu81aLXwBOmD/Fv9g== - dependencies: - "@babel/highlight" "^7.18.6" - -"@babel/code-frame@^7.22.13": +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.21.4", "@babel/code-frame@^7.22.13": version "7.22.13" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.22.13.tgz#e3c1c099402598483b7a8c46a721d1038803755e" integrity sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w== @@ -51,17 +44,7 @@ json5 "^2.2.2" semver "^6.3.0" -"@babel/generator@^7.22.0", "@babel/generator@^7.7.2": - version "7.22.3" - resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.22.3.tgz" - integrity sha512-C17MW4wlk//ES/CJDL51kPNwl+qiBQyN7b9SKyVp11BLGFeSPoVaHrv+MNt8jwQFhQWowW88z1eeBx3pFz9v8A== - dependencies: - "@babel/types" "^7.22.3" - "@jridgewell/gen-mapping" "^0.3.2" - "@jridgewell/trace-mapping" "^0.3.17" - jsesc "^2.5.1" - -"@babel/generator@^7.23.0": +"@babel/generator@^7.22.0", "@babel/generator@^7.23.0", "@babel/generator@^7.7.2": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.23.0.tgz#df5c386e2218be505b34837acbcb874d7a983420" integrity sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g== @@ -132,25 +115,12 @@ resolve "^1.14.2" semver "^6.1.2" -"@babel/helper-environment-visitor@^7.18.9", "@babel/helper-environment-visitor@^7.22.1": - version "7.22.1" - resolved "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.1.tgz" - integrity sha512-Z2tgopurB/kTbidvzeBrc2To3PUP/9i5MUe+fU6QJCQDyPwSH2oRapkLw3KGECDYSjhQZCNxEvNvZlLw8JjGwA== - -"@babel/helper-environment-visitor@^7.22.20": +"@babel/helper-environment-visitor@^7.18.9", "@babel/helper-environment-visitor@^7.22.1", "@babel/helper-environment-visitor@^7.22.20": version "7.22.20" resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz#96159db61d34a29dba454c959f5ae4a649ba9167" integrity sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA== -"@babel/helper-function-name@^7.18.9", "@babel/helper-function-name@^7.19.0", "@babel/helper-function-name@^7.21.0": - version "7.21.0" - resolved "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz" - integrity sha512-HfK1aMRanKHpxemaY2gqBmL04iAPOPRj7DxtNbiDOrJK+gdwkiNRVpCpUJYbUT+aZyemKN8brqTOxzCaG6ExRg== - dependencies: - "@babel/template" "^7.20.7" - "@babel/types" "^7.21.0" - -"@babel/helper-function-name@^7.23.0": +"@babel/helper-function-name@^7.18.9", "@babel/helper-function-name@^7.19.0", "@babel/helper-function-name@^7.21.0", "@babel/helper-function-name@^7.23.0": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz#1f9a3cdbd5b2698a670c30d2735f9af95ed52759" integrity sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw== @@ -158,14 +128,7 @@ "@babel/template" "^7.22.15" "@babel/types" "^7.23.0" -"@babel/helper-hoist-variables@^7.18.6": - version "7.18.6" - resolved "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz" - integrity sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-hoist-variables@^7.22.5": +"@babel/helper-hoist-variables@^7.18.6", "@babel/helper-hoist-variables@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb" integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw== @@ -248,36 +211,19 @@ dependencies: "@babel/types" "^7.20.0" -"@babel/helper-split-export-declaration@^7.18.6": - version "7.18.6" - resolved "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz" - integrity sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA== - dependencies: - "@babel/types" "^7.18.6" - -"@babel/helper-split-export-declaration@^7.22.6": +"@babel/helper-split-export-declaration@^7.18.6", "@babel/helper-split-export-declaration@^7.22.6": version "7.22.6" resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz#322c61b7310c0997fe4c323955667f18fcefb91c" integrity sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g== dependencies: "@babel/types" "^7.22.5" -"@babel/helper-string-parser@^7.21.5": - version "7.21.5" - resolved "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.21.5.tgz" - integrity sha512-5pTUx3hAJaZIdW99sJ6ZUUgWq/Y+Hja7TowEnLNMm1VivRgZQL3vpBY3qUACVsvw+yQU6+YgfBVmcbLaZtrA1w== - "@babel/helper-string-parser@^7.22.5": version "7.22.5" resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz#533f36457a25814cf1df6488523ad547d784a99f" integrity sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw== -"@babel/helper-validator-identifier@^7.18.6", "@babel/helper-validator-identifier@^7.19.1": - version "7.19.1" - resolved "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz" - integrity sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w== - -"@babel/helper-validator-identifier@^7.22.20": +"@babel/helper-validator-identifier@^7.19.1", "@babel/helper-validator-identifier@^7.22.20": version "7.22.20" resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0" integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A== @@ -306,15 +252,6 @@ "@babel/traverse" "^7.22.1" "@babel/types" "^7.22.3" -"@babel/highlight@^7.18.6": - version "7.18.6" - resolved "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz" - integrity sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g== - dependencies: - "@babel/helper-validator-identifier" "^7.18.6" - chalk "^2.0.0" - js-tokens "^4.0.0" - "@babel/highlight@^7.22.13": version "7.22.20" resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.20.tgz#4ca92b71d80554b01427815e06f2df965b9c1f54" @@ -324,12 +261,7 @@ chalk "^2.4.2" js-tokens "^4.0.0" -"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.21.9", "@babel/parser@^7.22.0": - version "7.22.4" - resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.22.4.tgz" - integrity sha512-VLLsx06XkEYqBtE5YGPwfSGwfrjnyPP5oiGty3S8pQLFDFLaS8VwWSIxkTXpcvr5zeYLE6+MBNl2npl/YnfofA== - -"@babel/parser@^7.22.15", "@babel/parser@^7.23.0": +"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.22.0", "@babel/parser@^7.22.15", "@babel/parser@^7.23.0": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.0.tgz#da950e622420bf96ca0d0f2909cdddac3acd8719" integrity sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw== @@ -1013,16 +945,7 @@ dependencies: regenerator-runtime "^0.13.11" -"@babel/template@^7.18.10", "@babel/template@^7.20.7", "@babel/template@^7.21.9", "@babel/template@^7.3.3": - version "7.21.9" - resolved "https://registry.npmjs.org/@babel/template/-/template-7.21.9.tgz" - integrity sha512-MK0X5k8NKOuWRamiEfc3KEJiHMTkGZNUjzMipqCGDDc6ijRl/B7RGSKVGncu4Ro/HdyzzY6cmoXuKI2Gffk7vQ== - dependencies: - "@babel/code-frame" "^7.21.4" - "@babel/parser" "^7.21.9" - "@babel/types" "^7.21.5" - -"@babel/template@^7.22.15": +"@babel/template@^7.18.10", "@babel/template@^7.20.7", "@babel/template@^7.21.9", "@babel/template@^7.22.15", "@babel/template@^7.3.3": version "7.22.15" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.22.15.tgz#09576efc3830f0430f4548ef971dde1350ef2f38" integrity sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w== @@ -1047,16 +970,7 @@ debug "^4.1.0" globals "^11.1.0" -"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.20.0", "@babel/types@^7.20.5", "@babel/types@^7.20.7", "@babel/types@^7.21.0", "@babel/types@^7.21.4", "@babel/types@^7.21.5", "@babel/types@^7.22.0", "@babel/types@^7.22.3", "@babel/types@^7.22.4", "@babel/types@^7.3.3", "@babel/types@^7.4.4": - version "7.22.4" - resolved "https://registry.npmjs.org/@babel/types/-/types-7.22.4.tgz" - integrity sha512-Tx9x3UBHTTsMSW85WB2kphxYQVvrZ/t1FxD88IpSgIjiUJlCm9z+xWIDwyo1vffTwSqteqyznB8ZE9vYYk16zA== - dependencies: - "@babel/helper-string-parser" "^7.21.5" - "@babel/helper-validator-identifier" "^7.19.1" - to-fast-properties "^2.0.0" - -"@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0": +"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.20.0", "@babel/types@^7.20.5", "@babel/types@^7.20.7", "@babel/types@^7.21.4", "@babel/types@^7.21.5", "@babel/types@^7.22.0", "@babel/types@^7.22.15", "@babel/types@^7.22.3", "@babel/types@^7.22.4", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.3.3", "@babel/types@^7.4.4": version "7.23.0" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.0.tgz#8c1f020c9df0e737e4e247c0619f58c68458aaeb" integrity sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg== @@ -1386,29 +1300,50 @@ "@jridgewell/resolve-uri" "3.1.0" "@jridgewell/sourcemap-codec" "1.4.14" -"@langchain/core@^0.1.27", "@langchain/core@^0.1.32": - version "0.1.32" - resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.1.32.tgz#cd6748cc91b8b208ba7c736c16c6dbeb291dc86c" - integrity sha512-7b8wBQMej2QxaDDS0fCQa3/zrA2raTh1RBe2h1som7QxFpWJkHSxwVwdvGUotX9SopmsY99TK54sK0amfDvBBA== +"@langchain/core@0.2.0", "@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>0.1.56 <0.3.0", "@langchain/core@^0.1.61", "@langchain/core@^0.2.0", "@langchain/core@~0.2.0": + version "0.2.0" + resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.0.tgz#19c6374a5ad80daf8e14cb58582bc988109a1403" + integrity sha512-UbCJUp9eh2JXd9AW/vhPbTgtZoMgTqJgSan5Wf/EP27X8JM65lWdCOpJW+gHyBXvabbyrZz3/EGaptTUL5gutw== dependencies: ansi-styles "^5.0.0" camelcase "6" decamelize "1.2.0" - js-tiktoken "^1.0.8" - langsmith "~0.1.1" + js-tiktoken "^1.0.12" + langsmith "~0.1.7" ml-distance "^4.0.0" + mustache "^4.2.0" p-queue "^6.6.2" p-retry "4" uuid "^9.0.0" zod "^3.22.4" zod-to-json-schema "^3.22.3" -"@langchain/langgraph@^0.0.8": - version "0.0.8" - resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.8.tgz#910d6190effee4433fc829c3e76940c4565e53e8" - integrity sha512-NVARwCBPfRqCDS2d/VBMfbGIoZij6kB6Q+HUtTFCsfZ36FnovQ6L3YwKT11SmTf6xIil9S0zvK4gPf3asLzRaw== +"@langchain/langgraph@^0.0.19": + version "0.0.19" + resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.19.tgz#c1cfeee7d0e2b91dd31cba7144f8a7283babc61d" + integrity sha512-V0t40qbwUyzEpL3Q0jHPVTVljdLc3YJCHIF9Q+sw9HRWwfBO1nWJHHbCxgVzeJ2NsX1X/dUyNkq8LbSEsTYpTQ== + dependencies: + "@langchain/core" "^0.1.61" + uuid "^9.0.1" + +"@langchain/openai@~0.0.28": + version "0.0.33" + resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.0.33.tgz#af88d815ff0095018c879d3a1a5a32b2795b5c69" + integrity sha512-hTBo9y9bHtFvMT5ySBW7TrmKhLSA91iNahigeqAFBVrLmBDz+6rzzLFc1mpq6JEAR3fZKdaUXqso3nB23jIpTw== + dependencies: + "@langchain/core" ">0.1.56 <0.3.0" + js-tiktoken "^1.0.12" + openai "^4.41.1" + zod "^3.22.4" + zod-to-json-schema "^3.22.3" + +"@langchain/textsplitters@~0.0.0": + version "0.0.2" + resolved "https://registry.yarnpkg.com/@langchain/textsplitters/-/textsplitters-0.0.2.tgz#500baa8341fb7fc86fca531a4192665a319504a3" + integrity sha512-6bQOuYHTGYlkgPY/8M5WPq4nnXZpEysGzRopQCYjg2WLcEoIPUMMrXsAaNNdvU3BOeMrhin8izvpDPD165hX6Q== dependencies: - "@langchain/core" "^0.1.27" + "@langchain/core" ">0.1.0 <0.3.0" + js-tiktoken "^1.0.12" "@nodelib/fs.scandir@2.1.5": version "2.1.5" @@ -1936,6 +1871,11 @@ base64-js@^1.5.1: resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== +binary-extensions@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" + integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== + binary-search@^1.3.5: version "1.3.6" resolved "https://registry.yarnpkg.com/binary-search/-/binary-search-1.3.6.tgz#e32426016a0c5092f0f3598836a1c7da3560565c" @@ -2013,7 +1953,7 @@ caniuse-lite@^1.0.30001489: resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001491.tgz" integrity sha512-17EYIi4TLnPiTzVKMveIxU5ETlxbSO3B6iPvMbprqnKh4qJsQGk5Nh1Lp4jIMAE0XfrujsJuWZAM3oJdMHaKBA== -chalk@^2.0.0, chalk@^2.4.2: +chalk@^2.4.2: version "2.4.2" resolved "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz" integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== @@ -3474,10 +3414,10 @@ jest@^29.5.0: import-local "^3.0.2" jest-cli "^29.5.0" -js-tiktoken@^1.0.8: - version "1.0.10" - resolved "https://registry.yarnpkg.com/js-tiktoken/-/js-tiktoken-1.0.10.tgz#2b343ec169399dcee8f9ef9807dbd4fafd3b30dc" - integrity sha512-ZoSxbGjvGyMT13x6ACo9ebhDha/0FHdKA+OsQcMOWcm1Zs7r90Rhk5lhERLzji+3rA7EKpXCgwXcM5fF3DMpdA== +js-tiktoken@^1.0.12: + version "1.0.12" + resolved "https://registry.yarnpkg.com/js-tiktoken/-/js-tiktoken-1.0.12.tgz#af0f5cf58e5e7318240d050c8413234019424211" + integrity sha512-L7wURW1fH9Qaext0VzaUDpFGVQgjkdE3Dgsy9/+yXyGEpBKnylTd0mU0bfbNkKDlXRb6TEsZkwuflu1B8uQbJQ== dependencies: base64-js "^1.5.1" @@ -3538,12 +3478,44 @@ json5@^2.2.2, json5@^2.2.3: resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== +jsonpointer@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-5.0.1.tgz#2110e0af0900fd37467b5907ecd13a7884a1b559" + integrity sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ== + kleur@^3.0.3: version "3.0.3" resolved "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== -langsmith@~0.1.1: +langchain@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.2.0.tgz#555d84538962720cd7223f6c3ca4bd060978ebf3" + integrity sha512-8c7Dg9OIPk4lFIQGyfOytXbUGLLSsxs9MV53cLODspkOGzaUpwy5FGBie30SrOxIEFJo+FDaJgpDAFO3Xi4NMw== + dependencies: + "@langchain/core" "~0.2.0" + "@langchain/openai" "~0.0.28" + "@langchain/textsplitters" "~0.0.0" + binary-extensions "^2.2.0" + js-tiktoken "^1.0.12" + js-yaml "^4.1.0" + jsonpointer "^5.0.1" + langchainhub "~0.0.8" + langsmith "~0.1.7" + ml-distance "^4.0.0" + openapi-types "^12.1.3" + p-retry "4" + uuid "^9.0.0" + yaml "^2.2.1" + zod "^3.22.4" + zod-to-json-schema "^3.22.3" + +langchainhub@~0.0.8: + version "0.0.10" + resolved "https://registry.yarnpkg.com/langchainhub/-/langchainhub-0.0.10.tgz#7579440a3255d67571b7046f3910593c5664f064" + integrity sha512-mOVso7TGTMSlvTTUR1b4zUIMtu8zgie/pcwRm1SeooWwuHYMQovoNXjT6gEjvWEZ6cjt4gVH+1lu2tp1/phyIQ== + +langsmith@~0.1.7: version "0.1.25" resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.25.tgz#3d06b6fc62abb1a6fc16540d40ddb48bd795f128" integrity sha512-Hft4Y1yoMgFgCUXVQklRZ7ndmLQ/6FmRZE9P3u5BRdMq5Fa0hpg8R7jd7bLLBXkAjqcFvWo0AGhpb8MMY5FAiA== @@ -3717,16 +3689,21 @@ ml-tree-similarity@^1.0.0: binary-search "^1.3.5" num-sort "^2.0.0" -ms@2.1.2, ms@^2.1.1: +ms@2.1.2: version "2.1.2" resolved "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.0.0: +ms@^2.0.0, ms@^2.1.1: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== +mustache@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/mustache/-/mustache-4.2.0.tgz#e5892324d60a12ec9c2a73359edca52972bf6f64" + integrity sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ== + natural-compare-lite@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz" @@ -3819,10 +3796,10 @@ onetime@^5.1.2: dependencies: mimic-fn "^2.1.0" -openai@^4.38.5: - version "4.38.5" - resolved "https://registry.yarnpkg.com/openai/-/openai-4.38.5.tgz#87de78eed9f7e63331fb6b1307d8c9dd986b39d0" - integrity sha512-Ym5GJL98ZhLJJ7enBx53jjG3vwN/fsB+Ozh46nnRZZS9W1NiYqbwkJ+sXd3dkCIiWIgcyyOPL2Zr8SQAzbpj3g== +openai@^4.38.5, openai@^4.41.1: + version "4.47.1" + resolved "https://registry.yarnpkg.com/openai/-/openai-4.47.1.tgz#1d23c7a8eb3d7bcdc69709cd905f4c9af0181dba" + integrity sha512-WWSxhC/69ZhYWxH/OBsLEirIjUcfpQ5+ihkXKp06hmeYXgBBIUCa9IptMzYx6NdkiOCsSGYCnTIsxaic3AjRCQ== dependencies: "@types/node" "^18.11.18" "@types/node-fetch" "^2.6.4" @@ -3833,6 +3810,11 @@ openai@^4.38.5: node-fetch "^2.6.7" web-streams-polyfill "^3.2.1" +openapi-types@^12.1.3: + version "12.1.3" + resolved "https://registry.yarnpkg.com/openapi-types/-/openapi-types-12.1.3.tgz#471995eb26c4b97b7bd356aacf7b91b73e777dd3" + integrity sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw== + optionator@^0.9.1: version "0.9.1" resolved "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz" @@ -4495,10 +4477,10 @@ uri-js@^4.2.2: dependencies: punycode "^2.1.0" -uuid@^9.0.0: - version "9.0.0" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.0.tgz#592f550650024a38ceb0c562f2f6aa435761efb5" - integrity sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg== +uuid@^9.0.0, uuid@^9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" + integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== v8-compile-cache-lib@^3.0.1: version "3.0.1" @@ -4616,6 +4598,11 @@ yallist@^4.0.0: resolved "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz" integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== +yaml@^2.2.1: + version "2.4.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.4.2.tgz#7a2b30f2243a5fc299e1f14ca58d475ed4bc5362" + integrity sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA== + yargs-parser@^21.0.1, yargs-parser@^21.1.1: version "21.1.1" resolved "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz" From 89a912739ac22d1d19bc678a96f8050bf908c390 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 21 May 2024 02:02:48 +0200 Subject: [PATCH 090/373] Remove warning for now --- js/src/langchain.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index eb2a6d7f5..505e05424 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -9,7 +9,6 @@ import { getCurrentRunTree, isTraceableFunction, } from "./traceable.js"; -import { warnOnce } from "./utils/warn.js"; /** * Converts the current run tree active within a traceable-wrapped function @@ -25,11 +24,6 @@ export async function getLangchainCallbacks( const runTree: RunTree | undefined = currentRunTree ?? getCurrentRunTree(); if (!runTree) return undefined; - warnOnce( - "Using `getLangchainCallbacks` with newer versions of LangChain might result in unexpected behavior. \n" + - "Consider upgrading LangChain to 0.2.x or higher." - ); - // TODO: CallbackManager.configure() is only async due to LangChainTracer // factory being unnecessarily async. let callbacks = await CallbackManager.configure(); From 0733c8ca32e09bc7ca59dae8ae49b78f33f5a18b Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 21 May 2024 03:19:26 +0200 Subject: [PATCH 091/373] Denote the kwarg as internal --- python/langsmith/run_helpers.py | 4 ++-- python/langsmith/wrappers/_openai.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index f00ebc4cd..c851fd920 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -237,7 +237,7 @@ def traceable( reduce_fn: Optional[Callable] = None, project_name: Optional[str] = None, process_inputs: Optional[Callable[[dict], dict]] = None, - invocation_params_fn: Optional[Callable[[dict], dict]] = None, + _invocation_params_fn: Optional[Callable[[dict], dict]] = None, ) -> Callable[[Callable[..., R]], SupportsLangsmithExtra[R]]: ... @@ -405,7 +405,7 @@ def manual_extra_function(x): project_name=kwargs.pop("project_name", None), run_type=run_type, process_inputs=kwargs.pop("process_inputs", None), - invocation_params_fn=kwargs.pop("invocation_params_fn", None), + invocation_params_fn=kwargs.pop("_invocation_params_fn", None), ) if kwargs: warnings.warn( diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index cd9c29385..de6c77ded 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -178,7 +178,7 @@ def create(*args, stream: bool = False, **kwargs): run_type="llm", reduce_fn=reduce_fn if stream else None, process_inputs=_strip_not_given, - invocation_params_fn=invocation_params_fn, + _invocation_params_fn=invocation_params_fn, **textra, ) @@ -192,7 +192,7 @@ async def acreate(*args, stream: bool = False, **kwargs): run_type="llm", reduce_fn=reduce_fn if stream else None, process_inputs=_strip_not_given, - invocation_params_fn=invocation_params_fn, + _invocation_params_fn=invocation_params_fn, **textra, ) if stream: From ed3463438b0e2d6fb1bc65257d035938620bf47d Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 21 May 2024 18:04:22 +0200 Subject: [PATCH 092/373] Use for await, make sure readable stream works as well --- js/src/langchain.ts | 7 ++--- js/src/tests/traceable_langchain.test.ts | 35 ++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 9aa0825dc..a3a4de845 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -134,11 +134,8 @@ export class RunnableTraceable extends Runnable< const result = await this.invoke(input, options); if (isAsyncIterable(result)) { - const iterator = result[Symbol.asyncIterator](); - while (true) { - const { done, value } = await iterator.next(); - if (done) break; - yield value as RunOutput; + for await (const item of result) { + yield item as RunOutput; } return; } diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index 779d4d7bf..c5d03e027 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -247,6 +247,41 @@ describe("to traceable", () => { }); }); + test("readable stream", async () => { + const { client, callSpy } = mockClient(); + + const source = RunnableTraceable.from( + traceable(async function (input: { text: string }) { + const readStream = new ReadableStream({ + async pull(controller) { + for (const item of input.text.split(" ")) { + controller.enqueue(item); + } + controller.close(); + }, + }); + + return readStream; + }) + ); + + const tokens: unknown[] = []; + for await (const chunk of await source.stream( + { text: "Hello world" }, + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore client might be of different type + { callbacks: [new LangChainTracer({ client })] } + )) { + tokens.push(chunk); + } + + expect(tokens).toEqual(["Hello", "world"]); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [":0"], + edges: [], + }); + }); + test("async generator stream", async () => { const { client, callSpy } = mockClient(); const source = RunnableTraceable.from( From 5dcbf65675089392fb94edd9150eb59b921246bd Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 20 May 2024 16:14:03 +0200 Subject: [PATCH 093/373] chore(js): Bump version to 0.1.27 --- js/package.json | 4 ++-- js/src/index.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index 93b92e6cf..1b825cb0b 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.26", + "version": "0.1.27", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -248,4 +248,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/index.ts b/js/src/index.ts index 01f8dbfa7..2ed0e6d74 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.26"; +export const __version__ = "0.1.27"; From 7ae5b2093ffebb35d041ab5c255d7339aabc9114 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 22 May 2024 01:15:04 +0200 Subject: [PATCH 094/373] Bump to 0.1.28 --- js/package.json | 4 ++-- js/src/index.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index 1b825cb0b..9cd0018c7 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.27", + "version": "0.1.28", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -248,4 +248,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} diff --git a/js/src/index.ts b/js/src/index.ts index 2ed0e6d74..4ddce61f7 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.27"; +export const __version__ = "0.1.28"; From 8e8cea17af44ba416c8b23e891ca165c75b76c0a Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 22 May 2024 13:17:54 -0700 Subject: [PATCH 095/373] [Python] Add Null Sentry (#722) Previously, you couldn't write `expect(None).to_*` --- python/langsmith/_expect.py | 67 +++++++++++++++++++++----- python/pyproject.toml | 2 +- python/tests/unit_tests/test_expect.py | 20 ++++++++ 3 files changed, 76 insertions(+), 13 deletions(-) create mode 100644 python/tests/unit_tests/test_expect.py diff --git a/python/langsmith/_expect.py b/python/langsmith/_expect.py index db914c31e..75faa3f19 100644 --- a/python/langsmith/_expect.py +++ b/python/langsmith/_expect.py @@ -48,7 +48,15 @@ def test_output_semantically_close(): import atexit import concurrent.futures import inspect -from typing import TYPE_CHECKING, Any, Callable, Optional, Union, overload +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + Union, + overload, +) from langsmith import client as ls_client from langsmith import run_helpers as rh @@ -59,18 +67,34 @@ def test_output_semantically_close(): from langsmith._internal._embedding_distance import EmbeddingConfig +# Sentinel class used until PEP 0661 is accepted +class _NULL_SENTRY: + """A sentinel singleton class used to distinguish omitted keyword arguments + from those passed in with the value None (which may have different behavior). + """ # noqa: D205 + + def __bool__(self) -> Literal[False]: + return False + + def __repr__(self) -> str: + return "NOT_GIVEN" + + +NOT_GIVEN = _NULL_SENTRY() + + class _Matcher: """A class for making assertions on expectation values.""" def __init__( self, - client: ls_client.Client, + client: Optional[ls_client.Client], key: str, value: Any, _executor: Optional[concurrent.futures.ThreadPoolExecutor] = None, run_id: Optional[str] = None, ): - self.client = client + self._client = client self.key = key self.value = value self._executor = _executor or concurrent.futures.ThreadPoolExecutor( @@ -81,8 +105,10 @@ def __init__( def _submit_feedback(self, score: int, message: Optional[str] = None) -> None: if not ls_utils.test_tracking_is_disabled(): + if not self._client: + self._client = ls_client.Client() self._executor.submit( - self.client.create_feedback, + self._client.create_feedback, run_id=self._run_id, key="expectation", score=score, @@ -179,6 +205,18 @@ def to_equal(self, value: float) -> None: "to_equal", ) + def to_be_none(self) -> None: + """Assert that the expectation value is None. + + Raises: + AssertionError: If the expectation value is not None. + """ + self._assert( + self.value is None, + f"Expected {self.key} to be None, but got {self.value}", + "to_be_none", + ) + def to_contain(self, value: Any) -> None: """Assert that the expectation value contains the given value. @@ -216,7 +254,7 @@ class _Expect: """A class for setting expectations on test results.""" def __init__(self, *, client: Optional[ls_client.Client] = None): - self.client = client or ls_client.Client() + self._client = client self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) atexit.register(self.executor.shutdown, wait=True) @@ -271,7 +309,7 @@ def embedding_distance( }, ) return _Matcher( - self.client, "embedding_distance", score, _executor=self.executor + self._client, "embedding_distance", score, _executor=self.executor ) def edit_distance( @@ -321,7 +359,7 @@ def edit_distance( }, ) return _Matcher( - self.client, + self._client, "edit_distance", score, _executor=self.executor, @@ -339,7 +377,7 @@ def value(self, value: Any) -> _Matcher: Examples: >>> expect.value(10).to_be_less_than(20) """ - return _Matcher(self.client, "value", value, _executor=self.executor) + return _Matcher(self._client, "value", value, _executor=self.executor) def score( self, @@ -370,7 +408,7 @@ def score( "comment": comment, }, ) - return _Matcher(self.client, key, score, _executor=self.executor) + return _Matcher(self._client, key, score, _executor=self.executor) ## Private Methods @@ -381,10 +419,13 @@ def __call__(self, value: Any, /) -> _Matcher: ... def __call__(self, /, *, client: ls_client.Client) -> _Expect: ... def __call__( - self, value: Optional[Any] = None, /, client: Optional[ls_client.Client] = None + self, + value: Optional[Any] = NOT_GIVEN, + /, + client: Optional[ls_client.Client] = None, ) -> Union[_Expect, _Matcher]: expected = _Expect(client=client) - if value is not None: + if value is not NOT_GIVEN: return expected.value(value) return expected @@ -392,8 +433,10 @@ def _submit_feedback(self, key: str, results: dict): current_run = rh.get_current_run_tree() run_id = current_run.trace_id if current_run else None if not ls_utils.test_tracking_is_disabled(): + if not self._client: + self._client = ls_client.Client() self.executor.submit( - self.client.create_feedback, run_id=run_id, key=key, **results + self._client.create_feedback, run_id=run_id, key=key, **results ) diff --git a/python/pyproject.toml b/python/pyproject.toml index 976f30004..f17acaf89 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.60" +version = "0.1.61" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_expect.py b/python/tests/unit_tests/test_expect.py new file mode 100644 index 000000000..cdf7ea9b2 --- /dev/null +++ b/python/tests/unit_tests/test_expect.py @@ -0,0 +1,20 @@ +from unittest import mock + +from langsmith import expect +from langsmith._expect import ls_client + + +def _is_none(x: object) -> bool: + return x is None + + +@mock.patch.object(ls_client, "Client", autospec=True) +def test_expect_explicit_none(mock_client: mock.Mock) -> None: + expect(None).against(_is_none) + expect(None).to_be_none() + expect.score(1).to_equal(1) + expect.score(1).to_be_less_than(2) + expect.score(1).to_be_greater_than(0) + expect.score(1).to_be_between(0, 2) + expect.score(1).to_be_approximately(1, 2) + expect({1, 2}).to_contain(1) From a8eab4a012310b93ee38ec9ab2ecd3523c555918 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Wed, 22 May 2024 14:42:15 -0700 Subject: [PATCH 096/373] implement repetitions in js and python --- js/src/evaluation/_runner.ts | 21 ++++++++++- js/src/tests/evaluate.int.test.ts | 30 +++++++++++++++ python/langsmith/evaluation/_arunner.py | 50 ++++++++++++++++++++----- python/langsmith/evaluation/_runner.py | 17 +++++++++ 4 files changed, 107 insertions(+), 11 deletions(-) diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 0948d6e7a..4b0dd2015 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -67,6 +67,7 @@ interface _ExperimentManagerArgs { unknown >; examples?: Example[]; + numRepetitions?: number; _runsArray?: Run[]; } @@ -110,6 +111,12 @@ export interface EvaluateOptions { * @default undefined */ client?: Client; + /** + * The number of repetitions to perform. Each example + * will be run this many times. + * @default 1 + */ + numRepetitions?: number; } export function evaluate( @@ -149,6 +156,8 @@ class _ExperimentManager { _examples?: Example[]; + _numRepetitions?: number; + _runsArray?: Run[]; client: Client; @@ -183,7 +192,15 @@ class _ExperimentManager { for await (const example of unresolvedData) { exs.push(example); } - this.setExamples(exs); + if (this._numRepetitions) { + const repeatedExamples = []; + for (let i = 0; i < this._numRepetitions; i++) { + repeatedExamples.push(...exs); + } + this.setExamples(repeatedExamples); + } else { + this.setExamples(exs); + } } return this._examples; } @@ -264,6 +281,7 @@ class _ExperimentManager { this._evaluationResults = args.evaluationResults; this._summaryResults = args.summaryResults; + this._numRepetitions = args.numRepetitions; } _getExperiment(): TracerSession { @@ -803,6 +821,7 @@ async function _evaluate( metadata: fields.metadata, experiment: experiment_ ?? fields.experimentPrefix, runs: newRuns ?? undefined, + numRepetitions: fields.numRepetitions ?? 1, }).start(); if (_isCallable(target)) { diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index 5e1321d06..00f0e0fde 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -77,6 +77,36 @@ test("evaluate can evaluate", async () => { expect(secondRunResults.results).toHaveLength(0); }); +test("evaluate can repeat", async () => { + const targetFunc = (input: Record) => { + console.log("__input__", input); + return { + foo: input.input + 1, + }; + }; + + const evalRes = await evaluate(targetFunc, { + data: TESTING_DATASET_NAME, + description: "Experiment from evaluate can evaluate integration test", + numRepetitions: 3, + }); + // console.log(evalRes.results) + expect(evalRes.results).toHaveLength(6); + + for (let i = 0; i < 6; i++) { + expect(evalRes.results[i].run).toBeDefined(); + expect(evalRes.results[i].example).toBeDefined(); + expect(evalRes.results[i].evaluationResults).toBeDefined(); + const currRun = evalRes.results[i].run; + // The examples are not always in the same order, so it should always be 2 or 3 + expect(currRun.outputs?.foo).toBeGreaterThanOrEqual(2); + expect(currRun.outputs?.foo).toBeLessThanOrEqual(3); + + const firstRunResults = evalRes.results[i].evaluationResults; + expect(firstRunResults.results).toHaveLength(0); + } +}); + test("evaluate can evaluate with RunEvaluator evaluators", async () => { const targetFunc = (input: { input: number }) => { return { foo: input.input + 1 }; diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index eb344fdbe..5629f2db5 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -17,6 +17,7 @@ List, Optional, Sequence, + TypeVar, Union, cast, ) @@ -61,6 +62,7 @@ async def aevaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, + num_repetitions: Optional[int] = None, client: Optional[langsmith.Client] = None, blocking: bool = True, ) -> AsyncExperimentResults: @@ -81,6 +83,8 @@ async def aevaluate( description (Optional[str]): A description of the experiment. max_concurrency (Optional[int]): The maximum number of concurrent evaluations to run. Defaults to None. + num_repetitions (int): The number of times to repeat the evaluation. + Defaults to 1. client (Optional[langsmith.Client]): The LangSmith client to use. Defaults to None. blocking (bool): Whether to block until the evaluation is complete. @@ -225,6 +229,7 @@ async def aevaluate( experiment_prefix=experiment_prefix, description=description, max_concurrency=max_concurrency, + num_repetitions=num_repetitions, client=client, blocking=blocking, ) @@ -343,6 +348,7 @@ async def _aevaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, + num_repetitions: Optional[int] = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, experiment: Optional[schemas.TracerSession] = None, @@ -363,6 +369,11 @@ async def _aevaluate( metadata=metadata, experiment=experiment_ or experiment_prefix, description=description, + num_repetitions=( + num_repetitions + if num_repetitions is not None and num_repetitions > 0 + else 1 + ), runs=runs, ).astart() cache_dir = ls_utils.get_cache_dir(None) @@ -423,6 +434,7 @@ def __init__( evaluation_results: Optional[AsyncIterable[EvaluationResults]] = None, summary_results: Optional[AsyncIterable[EvaluationResults]] = None, description: Optional[str] = None, + num_repetitions: int = 1, ): super().__init__( experiment=experiment, @@ -437,10 +449,16 @@ def __init__( ) self._evaluation_results = evaluation_results self._summary_results = summary_results + self._num_repetitions = num_repetitions - def aget_examples(self) -> AsyncIterator[schemas.Example]: + async def aget_examples(self) -> AsyncIterator[schemas.Example]: if self._examples is None: self._examples = _aresolve_data(self._data, client=self.client) + if self._num_repetitions > 1: + self._examples = async_chain_from_iterable( + aitertools.atee(self._examples, self._num_repetitions) + ) + self._examples, examples_iter = aitertools.atee( aitertools.ensure_async_iterator(self._examples), 2, lock=asyncio.Lock() ) @@ -450,7 +468,7 @@ async def get_dataset_id(self) -> str: if self._experiment is None or not getattr( self._experiment, "reference_dataset_id", None ): - example = await aitertools.py_anext(self.aget_examples()) + example = await aitertools.py_anext(await self.aget_examples()) if example is None: raise ValueError("No examples found in the dataset.") return str(example.dataset_id) @@ -467,7 +485,7 @@ async def aget_runs(self) -> AsyncIterator[schemas.Run]: async def aget_evaluation_results(self) -> AsyncIterator[EvaluationResults]: if self._evaluation_results is None: - async for _ in self.aget_examples(): + async for _ in await self.aget_examples(): yield {"results": []} else: self._evaluation_results, evaluation_results = aitertools.atee( @@ -479,13 +497,13 @@ async def aget_evaluation_results(self) -> AsyncIterator[EvaluationResults]: yield result async def astart(self) -> _AsyncExperimentManager: - first_example = await aitertools.py_anext(self.aget_examples()) + first_example = await aitertools.py_anext(await self.aget_examples()) if not first_example: raise ValueError("No examples found in the dataset.") project = self._get_project(first_example) self._print_experiment_start(project, first_example) return self.__class__( - self.aget_examples(), + await self.aget_examples(), experiment=project, metadata=self._metadata, client=self.client, @@ -535,7 +553,7 @@ async def awith_summary_evaluators( wrapped_evaluators = _wrap_summary_evaluators(summary_evaluators) aggregate_feedback_gen = self._aapply_summary_evaluators(wrapped_evaluators) return _AsyncExperimentManager( - self.aget_examples(), + await self.aget_examples(), experiment=self._experiment, metadata=self._metadata, client=self.client, @@ -546,7 +564,7 @@ async def awith_summary_evaluators( async def aget_results(self) -> AsyncIterator[ExperimentResultRow]: async for run, example, evaluation_results in aitertools.async_zip( - self.aget_runs(), self.aget_examples(), self.aget_evaluation_results() + self.aget_runs(), await self.aget_examples(), self.aget_evaluation_results() ): yield ExperimentResultRow( run=run, @@ -573,7 +591,7 @@ async def _apredict( fn = _ensure_async_traceable(target) async def predict_all(): - async for example in self.aget_examples(): + async for example in await self.aget_examples(): # Yield the coroutine to be awaited later yield _aforward( fn, example, self.experiment_name, self._metadata, self.client @@ -645,7 +663,7 @@ async def _aapply_summary_evaluators( self, summary_evaluators: Sequence[SUMMARY_EVALUATOR_T] ) -> AsyncIterator[EvaluationResults]: runs, examples = [], [] - async_examples = aitertools.ensure_async_iterator(self.aget_examples()) + async_examples = aitertools.ensure_async_iterator(await self.aget_examples()) async for run, example in aitertools.async_zip( self.aget_runs(), async_examples ): @@ -694,7 +712,7 @@ async def _aapply_summary_evaluators( async def _get_dataset_version(self) -> Optional[str]: modified_at = [] - async for example in self.aget_examples(): + async for example in await self.aget_examples(): if example.modified_at: # Should always be defined in practice when fetched, # but the typing permits None @@ -823,3 +841,15 @@ def _aresolve_data( if isinstance(data, AsyncIterable): return aitertools.ensure_async_iterator(data) return aitertools.ensure_async_iterator(_resolve_data(data, client=client)) + + +T = TypeVar("T") + + +async def async_chain_from_iterable( + iterable: Iterable[AsyncIterable[T]], +) -> AsyncIterator[T]: + """Chain multiple async iterables.""" + for sub_iterable in iterable: + async for item in sub_iterable: + yield item diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index 27910b90b..cf31dc201 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -82,6 +82,7 @@ def evaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, + num_repetitions: Optional[int] = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, ) -> ExperimentResults: @@ -106,6 +107,8 @@ def evaluate( Defaults to None. blocking (bool): Whether to block until the evaluation is complete. Defaults to True. + num_repetitions (int): The number of times to repeat the evaluation. + Defaults to 1. Returns: ExperimentResults: The results of the evaluation. @@ -241,6 +244,7 @@ def evaluate( experiment_prefix=experiment_prefix, description=description, max_concurrency=max_concurrency, + num_repetitions=num_repetitions, client=client, blocking=blocking, ) @@ -766,6 +770,7 @@ def _evaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, + num_repetitions: Optional[int] = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, experiment: Optional[schemas.TracerSession] = None, @@ -785,6 +790,11 @@ def _evaluate( metadata=metadata, experiment=experiment_ or experiment_prefix, description=description, + num_repetitions=( + num_repetitions + if num_repetitions is not None and num_repetitions > 0 + else 1 + ), # If provided, we don't need to create a new experiment. runs=runs, # Create or resolve the experiment. @@ -981,6 +991,7 @@ class _ExperimentManager(_ExperimentManagerMixin): Args: data (DATA_T): The data used for the experiment. Can be a dataset name or ID OR a generator of examples. + num_repetitions (int): The number of times to run over the data. runs (Optional[Iterable[schemas.Run]]): The runs associated with the experiment predictions. experiment (Optional[schemas.TracerSession]): The tracer session @@ -1006,6 +1017,7 @@ def __init__( evaluation_results: Optional[Iterable[EvaluationResults]] = None, summary_results: Optional[Iterable[EvaluationResults]] = None, description: Optional[str] = None, + num_repetitions: int = 1, ): super().__init__( experiment=experiment, @@ -1018,11 +1030,16 @@ def __init__( self._runs = runs self._evaluation_results = evaluation_results self._summary_results = summary_results + self._num_repetitions = num_repetitions @property def examples(self) -> Iterable[schemas.Example]: if self._examples is None: self._examples = _resolve_data(self._data, client=self.client) + if self._num_repetitions > 1: + self._examples = itertools.chain.from_iterable( + itertools.tee(self._examples, self._num_repetitions) + ) self._examples, examples_iter = itertools.tee(self._examples) return examples_iter From 9d95e1b2d6b20a2758e0b8e1170fe13e57409115 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Wed, 22 May 2024 14:55:22 -0700 Subject: [PATCH 097/373] Add method to create multiple presigned feedback urls in one call --- python/langsmith/client.py | 71 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index f63a47f65..e2843f5c7 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3808,6 +3808,77 @@ def create_presigned_feedback_token( ls_utils.raise_for_status_with_text(response) return ls_schemas.FeedbackIngestToken(**response.json()) + def create_presigned_feedback_tokens( + self, + run_id: ID_TYPE, + feedback_keys: Sequence[str], + *, + expiration: Optional[datetime.datetime | datetime.timedelta] = None, + feedback_configs: Optional[ + Sequence[Optional[ls_schemas.FeedbackConfig]] + ] = None, + ) -> Sequence[ls_schemas.FeedbackIngestToken]: + """Create a pre-signed URL to send feedback data to. + + This is useful for giving browser-based clients a way to upload + feedback data directly to LangSmith without accessing the + API key. + + Args: + run_id: + feedback_key: + expiration: The expiration time of the pre-signed URL. + Either a datetime or a timedelta offset from now. + Default to 3 hours. + feedback_config: FeedbackConfig or None. + If creating a feedback_key for the first time, + this defines how the metric should be interpreted, + such as a continuous score (w/ optional bounds), + or distribution over categorical values. + + Returns: + The pre-signed URL for uploading feedback data. + """ + if feedback_configs is not None and len(feedback_keys) != len(feedback_configs): + raise ValueError( + "The length of feedback_keys and feedback_configs must be the same." + ) + if not feedback_configs: + feedback_configs = [None] * len(feedback_keys) + body: List[Dict[str, Any]] = [ + { + "run_id": run_id, + "feedback_key": feedback_key, + "feedback_config": feedback_config, + } + for feedback_key, feedback_config in zip(feedback_keys, feedback_configs) + ] + for part in body: + if expiration is None: + part["expires_in"] = ls_schemas.TimeDeltaInput( + days=0, + hours=3, + minutes=0, + ) + elif isinstance(expiration, datetime.datetime): + part["expires_at"] = expiration.isoformat() + elif isinstance(expiration, datetime.timedelta): + part["expires_in"] = ls_schemas.TimeDeltaInput( + days=expiration.days, + hours=expiration.seconds // 3600, + minutes=(expiration.seconds // 60) % 60, + ) + else: + raise ValueError(f"Unknown expiration type: {type(expiration)}") + + response = self.request_with_retries( + "POST", + "/feedback/tokens", + data=_dumps_json(body), + ) + ls_utils.raise_for_status_with_text(response) + return [ls_schemas.FeedbackIngestToken(**part) for part in response.json()] + def list_presigned_feedback_tokens( self, run_id: ID_TYPE, From b8d7ee265c200b50fe8f16a7d07e87b129950584 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Wed, 22 May 2024 16:40:58 -0700 Subject: [PATCH 098/373] Fix --- python/langsmith/client.py | 40 +++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index e2843f5c7..cc9ca49c0 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3839,38 +3839,42 @@ def create_presigned_feedback_tokens( Returns: The pre-signed URL for uploading feedback data. """ + # validate if feedback_configs is not None and len(feedback_keys) != len(feedback_configs): raise ValueError( "The length of feedback_keys and feedback_configs must be the same." ) if not feedback_configs: feedback_configs = [None] * len(feedback_keys) + # build expiry option + expires_in, expires_at = None, None + if expiration is None: + expires_in = ls_schemas.TimeDeltaInput( + days=0, + hours=3, + minutes=0, + ) + elif isinstance(expiration, datetime.datetime): + expires_at = expiration.isoformat() + elif isinstance(expiration, datetime.timedelta): + expires_in = ls_schemas.TimeDeltaInput( + days=expiration.days, + hours=expiration.seconds // 3600, + minutes=(expiration.seconds // 60) % 60, + ) + else: + raise ValueError(f"Unknown expiration type: {type(expiration)}") + # assemble body, one entry per key body: List[Dict[str, Any]] = [ { "run_id": run_id, "feedback_key": feedback_key, "feedback_config": feedback_config, + "expires_in": expires_in, + "expires_at": expires_at, } for feedback_key, feedback_config in zip(feedback_keys, feedback_configs) ] - for part in body: - if expiration is None: - part["expires_in"] = ls_schemas.TimeDeltaInput( - days=0, - hours=3, - minutes=0, - ) - elif isinstance(expiration, datetime.datetime): - part["expires_at"] = expiration.isoformat() - elif isinstance(expiration, datetime.timedelta): - part["expires_in"] = ls_schemas.TimeDeltaInput( - days=expiration.days, - hours=expiration.seconds // 3600, - minutes=(expiration.seconds // 60) % 60, - ) - else: - raise ValueError(f"Unknown expiration type: {type(expiration)}") - response = self.request_with_retries( "POST", "/feedback/tokens", From 1157eb75939f6e95def887600b64c64c098fd04d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 18:30:25 -0700 Subject: [PATCH 099/373] chore(deps): bump requests from 2.31.0 to 2.32.2 in /python (#723) --- python/poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/poetry.lock b/python/poetry.lock index da2c1e35d..e19f50404 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1104,13 +1104,13 @@ files = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.2" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, + {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, ] [package.dependencies] From 505e779cc1a933cae92ef2a965951926d384eb33 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 22 May 2024 18:47:19 -0700 Subject: [PATCH 100/373] [Python] Release (#726) --- python/poetry.lock | 11 ----------- python/pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/python/poetry.lock b/python/poetry.lock index e19f50404..14cb338fb 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1054,7 +1054,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1062,16 +1061,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1088,7 +1079,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1096,7 +1086,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, diff --git a/python/pyproject.toml b/python/pyproject.toml index f17acaf89..59113f2d8 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.61" +version = "0.1.62" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 61e1b4672b7751e5b493b5420f5f750208fab017 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 23 May 2024 09:53:22 -0400 Subject: [PATCH 101/373] add number of repetitions to metadata --- js/src/evaluation/_runner.ts | 1 + js/src/tests/evaluate.int.test.ts | 1 - python/langsmith/evaluation/_arunner.py | 18 ++++++++++++++++++ python/langsmith/evaluation/_runner.py | 1 + 4 files changed, 20 insertions(+), 1 deletion(-) diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 4b0dd2015..0ac4924fe 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -357,6 +357,7 @@ class _ExperimentManager { const firstExample = examples[0]; const project = await this._getProject(firstExample); await this._printExperimentStart(); + this._metadata["num_repetitions"] = this._numRepetitions; return new _ExperimentManager({ examples, experiment: project, diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index 00f0e0fde..0bd5d94a7 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -90,7 +90,6 @@ test("evaluate can repeat", async () => { description: "Experiment from evaluate can evaluate integration test", numRepetitions: 3, }); - // console.log(evalRes.results) expect(evalRes.results).toHaveLength(6); for (let i = 0; i < 6; i++) { diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index 5629f2db5..df3465184 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -502,6 +502,7 @@ async def astart(self) -> _AsyncExperimentManager: raise ValueError("No examples found in the dataset.") project = self._get_project(first_example) self._print_experiment_start(project, first_example) + self._metadata["num_repetitions"] = self._num_repetitions return self.__class__( await self.aget_examples(), experiment=project, @@ -721,6 +722,22 @@ async def _get_dataset_version(self) -> Optional[str]: max_modified_at = max(modified_at) if modified_at else None return max_modified_at.isoformat() if max_modified_at else None + async def _get_dataset_splits(self) -> Optional[list[str]]: + splits = set() + async for example in await self.aget_examples(): + if ( + example.metadata + and example.metadata.get("dataset_split") + and isinstance(example.metadata["dataset_split"], list) + ): + for split in example.metadata["dataset_split"]: + if isinstance(split, str): + splits.add(split) + else: + splits.add("base") + + return list(splits) + async def _aend(self) -> None: experiment = self._experiment if experiment is None: @@ -728,6 +745,7 @@ async def _aend(self) -> None: project_metadata = self._get_experiment_metadata() project_metadata["dataset_version"] = await self._get_dataset_version() + project_metadata["dataset_splits"] = await self._get_dataset_splits() self.client.update_project( experiment.id, end_time=datetime.datetime.now(datetime.timezone.utc), diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index cf31dc201..74446aa7f 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -1073,6 +1073,7 @@ def start(self) -> _ExperimentManager: first_example = next(itertools.islice(self.examples, 1)) project = self._get_project(first_example) self._print_experiment_start(project, first_example) + self._metadata["num_repetitions"] = self._num_repetitions return self.__class__( self.examples, experiment=project, From 88d70c82da17d7c968df501c170968eca4ad9c1c Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 23 May 2024 11:06:03 -0400 Subject: [PATCH 102/373] add tests for repetitions --- python/tests/evaluation/test_evaluation.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/python/tests/evaluation/test_evaluation.py b/python/tests/evaluation/test_evaluation.py index 234aeb6f0..9f93712cf 100644 --- a/python/tests/evaluation/test_evaluation.py +++ b/python/tests/evaluation/test_evaluation.py @@ -29,7 +29,7 @@ def precision(runs: Sequence[Run], examples: Sequence[Example]): def predict(inputs: dict) -> dict: return {"output": "Yes"} - evaluate( + results = evaluate( predict, data=dataset_name, evaluators=[accuracy], @@ -39,7 +39,13 @@ def predict(inputs: dict) -> dict: "my-prompt-version": "abcd-1234", "function": "evaluate", }, + num_repetitions=3, ) + results.wait() + assert len(results) == 30 + examples = client.list_examples(dataset_name=dataset_name) + for example in examples: + assert len([r for r in results if r["example"].id == example.id]) == 3 async def test_aevaluate(): @@ -65,7 +71,7 @@ async def apredict(inputs: dict) -> dict: await asyncio.sleep(0.1) return {"output": "Yes"} - await aevaluate( + results = await aevaluate( apredict, data=dataset_name, evaluators=[accuracy], @@ -76,7 +82,17 @@ async def apredict(inputs: dict) -> dict: "my-prompt-version": "abcd-1234", "function": "aevaluate", }, + num_repetitions=2, ) + assert len(results) == 20 + examples = client.list_examples(dataset_name=dataset_name) + all_results = [r async for r in results] + for example in examples: + count = 0 + for r in all_results: + if r["run"].reference_example_id == example.id: + count += 1 + assert count == 2 @unit From 2bdf6ebae390e504b2b8e28c2936e04b9337773a Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 23 May 2024 12:46:59 -0400 Subject: [PATCH 103/373] make num_repetitions non-optional --- python/langsmith/evaluation/_arunner.py | 10 +++------- python/langsmith/evaluation/_runner.py | 10 +++------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index df3465184..4c096658e 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -62,7 +62,7 @@ async def aevaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, - num_repetitions: Optional[int] = None, + num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, ) -> AsyncExperimentResults: @@ -348,7 +348,7 @@ async def _aevaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, - num_repetitions: Optional[int] = 1, + num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, experiment: Optional[schemas.TracerSession] = None, @@ -369,11 +369,7 @@ async def _aevaluate( metadata=metadata, experiment=experiment_ or experiment_prefix, description=description, - num_repetitions=( - num_repetitions - if num_repetitions is not None and num_repetitions > 0 - else 1 - ), + num_repetitions=num_repetitions, runs=runs, ).astart() cache_dir = ls_utils.get_cache_dir(None) diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index 74446aa7f..fec9d743e 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -82,7 +82,7 @@ def evaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, - num_repetitions: Optional[int] = 1, + num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, ) -> ExperimentResults: @@ -770,7 +770,7 @@ def _evaluate( experiment_prefix: Optional[str] = None, description: Optional[str] = None, max_concurrency: Optional[int] = None, - num_repetitions: Optional[int] = 1, + num_repetitions: int = 1, client: Optional[langsmith.Client] = None, blocking: bool = True, experiment: Optional[schemas.TracerSession] = None, @@ -790,11 +790,7 @@ def _evaluate( metadata=metadata, experiment=experiment_ or experiment_prefix, description=description, - num_repetitions=( - num_repetitions - if num_repetitions is not None and num_repetitions > 0 - else 1 - ), + num_repetitions=num_repetitions, # If provided, we don't need to create a new experiment. runs=runs, # Create or resolve the experiment. From 0bee48145eb0c82bfa9f641bd7b30260f9287a23 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 23 May 2024 15:47:17 -0400 Subject: [PATCH 104/373] clarify docstring --- python/langsmith/evaluation/_arunner.py | 3 ++- python/langsmith/evaluation/_runner.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index 4c096658e..ab1eaa3be 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -83,7 +83,8 @@ async def aevaluate( description (Optional[str]): A description of the experiment. max_concurrency (Optional[int]): The maximum number of concurrent evaluations to run. Defaults to None. - num_repetitions (int): The number of times to repeat the evaluation. + num_repetitions (int): The number of times to run the evaluation. + Each item in the dataset will be run and evaluated this many times. Defaults to 1. client (Optional[langsmith.Client]): The LangSmith client to use. Defaults to None. diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index fec9d743e..7854372fd 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -107,7 +107,8 @@ def evaluate( Defaults to None. blocking (bool): Whether to block until the evaluation is complete. Defaults to True. - num_repetitions (int): The number of times to repeat the evaluation. + num_repetitions (int): The number of times to run the evaluation. + Each item in the dataset will be run and evaluated this many times. Defaults to 1. Returns: From a8c8778a08dc9b6c9184b589f19c7099831f129c Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 23 May 2024 17:24:54 -0400 Subject: [PATCH 105/373] address comment, version bumps --- js/package.json | 4 ++-- js/src/evaluation/_runner.ts | 2 +- js/src/index.ts | 2 +- python/pyproject.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/js/package.json b/js/package.json index 93b92e6cf..1b825cb0b 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.26", + "version": "0.1.27", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -248,4 +248,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 0ac4924fe..e4ad361a2 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -192,7 +192,7 @@ class _ExperimentManager { for await (const example of unresolvedData) { exs.push(example); } - if (this._numRepetitions) { + if (this._numRepetitions && this._numRepetitions > 0) { const repeatedExamples = []; for (let i = 0; i < this._numRepetitions; i++) { repeatedExamples.push(...exs); diff --git a/js/src/index.ts b/js/src/index.ts index 01f8dbfa7..2ed0e6d74 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.26"; +export const __version__ = "0.1.27"; diff --git a/python/pyproject.toml b/python/pyproject.toml index 976f30004..f17acaf89 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.60" +version = "0.1.61" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 6d0f5bf69ebee8729ccb47e8ae994f45ccef3e09 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 23 May 2024 17:26:32 -0400 Subject: [PATCH 106/373] bump versions again --- js/package.json | 2 +- js/src/index.ts | 2 +- python/pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index eea5c0fec..ea269768f 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.28", + "version": "0.1.29", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/index.ts b/js/src/index.ts index 4ddce61f7..e517be345 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.28"; +export const __version__ = "0.1.29"; diff --git a/python/pyproject.toml b/python/pyproject.toml index 59113f2d8..7f87cd28a 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.62" +version = "0.1.63" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From f7d7a3581745a83716f573cd9940a5aecb50e2b2 Mon Sep 17 00:00:00 2001 From: HoangNguyen689 Date: Tue, 28 May 2024 16:52:06 +0700 Subject: [PATCH 107/373] Fix typo in feedback client (#727) `feedack_block` -> `feedback_block` --- python/langsmith/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index cc9ca49c0..afafeba0e 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3566,12 +3566,12 @@ def create_feedback( comparative_experiment_id=comparative_experiment_id, feedback_group_id=feedback_group_id, ) - feedack_block = _dumps_json(feedback.dict(exclude_none=True)) + feedback_block = _dumps_json(feedback.dict(exclude_none=True)) self.request_with_retries( "POST", "/feedback", request_kwargs={ - "data": feedack_block, + "data": feedback_block, }, stop_after_attempt=stop_after_attempt, retry_on=(ls_utils.LangSmithNotFoundError,), From 060815e68ffabffe206b3cd8a8cff8505ce00974 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 28 May 2024 14:32:58 +0200 Subject: [PATCH 108/373] feat(js): add distributed tracing --- js/src/run_trees.ts | 214 +++++++++++++++++++++++---------- js/src/tests/run_trees.test.ts | 23 ++++ 2 files changed, 172 insertions(+), 65 deletions(-) diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 1c877bc5a..8af4b3f72 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -51,6 +51,9 @@ export interface RunTreeConfig { on_end?: (runTree: RunTree) => void; execution_order?: number; child_execution_order?: number; + + trace_id?: string; + dotted_order?: string; } export interface RunnableConfigLike { @@ -89,6 +92,51 @@ interface LangChainTracerLike extends TracerLike { client: Client; } +/** + * Baggage header information + */ +class Baggage { + metadata: KVMap | undefined; + tags: string[] | undefined; + + constructor(metadata: KVMap | undefined, tags: string[] | undefined) { + this.metadata = metadata; + this.tags = tags; + } + + static fromHeader(value: string) { + const items = value.split(","); + let metadata: KVMap = {}; + let tags: string[] = []; + for (const item of items) { + const [key, uriValue] = item.split("="); + const value = decodeURIComponent(uriValue); + if (key === "langsmith-metadata") { + metadata = JSON.parse(value); + } else if (key === "langsmith-tags") { + tags = value.split(","); + } + } + + return new Baggage(metadata, tags); + } + + toHeader(): string { + const items = []; + if (this.metadata && Object.keys(this.metadata).length > 0) { + items.push( + `langsmith-metadata=${encodeURIComponent( + JSON.stringify(this.metadata) + )}` + ); + } + if (this.tags && this.tags.length > 0) { + items.push(`langsmith-tags=${encodeURIComponent(this.tags.join(","))}`); + } + return items.join(","); + } +} + export class RunTree implements BaseRun { id: string; name: RunTreeConfig["name"]; @@ -150,71 +198,6 @@ export class RunTree implements BaseRun { } } - static fromRunnableConfig( - parentConfig: RunnableConfigLike, - props: { - name: string; - tags?: string[]; - metadata?: KVMap; - } - ): RunTree { - // We only handle the callback manager case for now - const callbackManager = parentConfig?.callbacks as - | CallbackManagerLike - | undefined; - let parentRun: RunTree | undefined; - let projectName: string | undefined; - let client: Client | undefined; - - let tracingEnabled = isTracingEnabled(); - - if (callbackManager) { - const parentRunId = callbackManager?.getParentRunId?.() ?? ""; - const langChainTracer = callbackManager?.handlers?.find( - (handler: TracerLike) => handler?.name == "langchain_tracer" - ) as LangChainTracerLike | undefined; - - parentRun = langChainTracer?.getRun?.(parentRunId); - projectName = langChainTracer?.projectName; - client = langChainTracer?.client; - tracingEnabled = tracingEnabled || !!langChainTracer; - } - - if (!parentRun) { - return new RunTree({ - client, - tracingEnabled, - project_name: projectName, - name: props.name, - tags: props.tags, - metadata: props.metadata, - }); - } - - const parentRunTree = new RunTree({ - name: parentRun.name, - id: parentRun.id, - client, - tracingEnabled, - project_name: projectName, - tags: [ - ...new Set((parentRun?.tags ?? []).concat(parentConfig?.tags ?? [])), - ], - extra: { - metadata: { - ...parentRun?.extra?.metadata, - ...parentConfig?.metadata, - }, - }, - }); - - return parentRunTree.createChild({ - name: props.name, - tags: props.tags, - metadata: props.metadata, - }); - } - private static getDefaultConfig(): object { return { id: uuid.v4(), @@ -361,6 +344,107 @@ export class RunTree implements BaseRun { toJSON() { return this._convertToCreate(this, undefined, false); } + + static fromRunnableConfig( + parentConfig: RunnableConfigLike, + props: RunTreeConfig + ): RunTree { + // We only handle the callback manager case for now + const callbackManager = parentConfig?.callbacks as + | CallbackManagerLike + | undefined; + let parentRun: RunTree | undefined; + let projectName: string | undefined; + let client: Client | undefined; + + let tracingEnabled = isTracingEnabled(); + + if (callbackManager) { + const parentRunId = callbackManager?.getParentRunId?.() ?? ""; + const langChainTracer = callbackManager?.handlers?.find( + (handler: TracerLike) => handler?.name == "langchain_tracer" + ) as LangChainTracerLike | undefined; + + parentRun = langChainTracer?.getRun?.(parentRunId); + projectName = langChainTracer?.projectName; + client = langChainTracer?.client; + tracingEnabled = tracingEnabled || !!langChainTracer; + } + + if (!parentRun) { + return new RunTree({ + ...props, + client, + tracingEnabled, + project_name: projectName, + }); + } + + const parentRunTree = new RunTree({ + name: parentRun.name, + id: parentRun.id, + client, + tracingEnabled, + project_name: projectName, + tags: [ + ...new Set((parentRun?.tags ?? []).concat(parentConfig?.tags ?? [])), + ], + extra: { + metadata: { + ...parentRun?.extra?.metadata, + ...parentConfig?.metadata, + }, + }, + }); + + return parentRunTree.createChild(props); + } + + static fromDottedOrder(dottedOrder: string): RunTree | undefined { + return this.fromHeaders({ "langsmith-trace": dottedOrder }); + } + + static fromHeaders( + headers: Record, + inheritArgs?: RunTreeConfig + ): RunTree | undefined { + const headerTrace = headers["langsmith-trace"]; + if (!headerTrace || typeof headerTrace !== "string") return undefined; + + const parentDottedOrder = headerTrace.trim(); + const parsedDottedOrder = parentDottedOrder.split(".").map((part) => { + const strTime = part.slice(0, -36); + const uuid = part.slice(-36); + return { strTime, time: Date.parse(strTime), uuid }; + }); + + const traceId = parsedDottedOrder[0].uuid; + + const config: RunTreeConfig = { + ...inheritArgs, + name: inheritArgs?.["name"] ?? "parent", + run_type: inheritArgs?.["run_type"] ?? "chain", + start_time: inheritArgs?.["start_time"] ?? Date.now(), + id: parsedDottedOrder.at(-1)?.uuid, + trace_id: traceId, + dotted_order: parentDottedOrder, + }; + + if (headers["baggage"]) { + const baggage = Baggage.fromHeader(headers["baggage"]); + config.metadata = baggage.metadata; + config.tags = baggage.tags; + } + + return new RunTree(config); + } + + toHeaders() { + return { + "langsmith-trace": this.dotted_order, + baggage: new Baggage(this.extra?.metadata, this.tags).toHeader(), + }; + } } export function isRunTree(x?: unknown): x is RunTree { diff --git a/js/src/tests/run_trees.test.ts b/js/src/tests/run_trees.test.ts index 9567ed832..c9d7ea49e 100644 --- a/js/src/tests/run_trees.test.ts +++ b/js/src/tests/run_trees.test.ts @@ -89,3 +89,26 @@ test("serializing run tree", () => { ], }); }); + +test("distributed", () => { + const parent = new RunTree({ + name: "parent_1", + id: "00000000-0000-0000-0000-00000000000", + start_time: Date.parse("2021-05-03T00:00:00.000Z"), + }); + + const serialized = parent.toHeaders(); + + const child2 = RunTree.fromHeaders(serialized)?.createChild({ + name: "child_2", + id: "00000000-0000-0000-0000-00000000001", + start_time: Date.parse("2021-05-03T00:00:01.000Z"), + }); + + expect(JSON.parse(JSON.stringify(child2))).toMatchObject({ + name: "child_2", + run_type: "chain", + dotted_order: + "20210503T000000000001Z00000000-0000-0000-0000-00000000000.20210503T000001000002Z00000000-0000-0000-0000-00000000001", + }); +}); From f58c6435a4b7da9b2397dbd84e09dfd81fbba1cc Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 29 May 2024 00:34:11 +0200 Subject: [PATCH 109/373] Split by Z --- js/src/run_trees.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 8af4b3f72..462ed7171 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -413,9 +413,8 @@ export class RunTree implements BaseRun { const parentDottedOrder = headerTrace.trim(); const parsedDottedOrder = parentDottedOrder.split(".").map((part) => { - const strTime = part.slice(0, -36); - const uuid = part.slice(-36); - return { strTime, time: Date.parse(strTime), uuid }; + const [strTime, uuid] = part.split("Z"); + return { strTime, time: Date.parse(strTime + "Z"), uuid }; }); const traceId = parsedDottedOrder[0].uuid; From 9d5034cba33920191aa94ec298e98d0fa46d28c5 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 29 May 2024 00:35:42 +0200 Subject: [PATCH 110/373] Bump to JS 0.1.30 --- js/package.json | 2 +- js/src/index.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/js/package.json b/js/package.json index ea269768f..d13c53f95 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.29", + "version": "0.1.30", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/index.ts b/js/src/index.ts index e517be345..0f26075f0 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.29"; +export const __version__ = "0.1.30"; From 7fa50fecf134cc95de048136f484bab02e0132e7 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Wed, 29 May 2024 14:37:05 -0700 Subject: [PATCH 111/373] Fix tracing for as_runnable - before it was creating the run with a separate root run --- python/langsmith/run_helpers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index c851fd920..0e92257b2 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -833,8 +833,13 @@ def _configure_run_tree(callback_manager: Any) -> Optional[run_trees.RunTree]: ] if lc_tracers: lc_tracer = lc_tracers[0] + trace_id, dotted_order = lc_tracer.order_map[ + callback_manager.parent_run_id + ] run_tree = run_trees.RunTree( id=callback_manager.parent_run_id, + dotted_order=dotted_order, + trace_id=trace_id, session_name=lc_tracer.project_name, name="Wrapping", run_type="chain", From ff54e671ccada90f12cac113662f70535b9bc7a0 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Wed, 29 May 2024 14:41:02 -0700 Subject: [PATCH 112/373] Fix --- python/langsmith/run_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 0e92257b2..ac5c5604c 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -831,7 +831,7 @@ def _configure_run_tree(callback_manager: Any) -> Optional[run_trees.RunTree]: for handler in callback_manager.handlers if isinstance(handler, LangChainTracer) ] - if lc_tracers: + if lc_tracers and callback_manager.parent_run_id: lc_tracer = lc_tracers[0] trace_id, dotted_order = lc_tracer.order_map[ callback_manager.parent_run_id From 621fae6acf507a83401596239401563d7a163ff4 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Thu, 30 May 2024 02:44:26 -0700 Subject: [PATCH 113/373] [Python] 0.1.64 |Accept RunnableConfig|Customize OAI wrapper name|@traceable typing|Cache default RunTree Client MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Read parent run info from RunnableConfig if passed to function decorated with traceable - pass config into wrapped func only if signature declares it - modify signature of wrapper func to declare config as a kw arg, otherwise runnables don't pass it in - extract client and project_name from tracer, in addition to parent run info - update as_runnable to delegate run tree creation to new method ## Custom Run Name support in OpenAI Client ## Improve @traceable typing # Improves for python >= 3.10. Should in theory not change anything for users of 3.8 and 3.9 (at least, things pass in our 3.8 linting here... Apologies in advance to any mypy acolytes on 3.{8,9} who gain undesired linting errors after this change) Uses: 1. ParamSpec (available in 3.10 and onwards) 2. Protocols (already used) Even though python doesn't naturally support keyword-only concatenation, we can work around this with protocols and duck typing to communicate the actual returned type of "the same function signature + a keyword only langsmith_extra arg" Python's typing situation makes it hard to make everyone happy, but hopefully this strikes a better compromise than before (typing kwargs as Any in the wrapped function; too lenient) ## Cache default run tree client --------- Co-authored-by: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> --- python/langsmith/env/_git.py | 2 +- python/langsmith/env/_runtime_env.py | 18 ++- python/langsmith/evaluation/_arunner.py | 6 +- python/langsmith/evaluation/_runner.py | 6 +- python/langsmith/evaluation/evaluator.py | 16 +- python/langsmith/run_helpers.py | 148 ++++++++---------- python/langsmith/run_trees.py | 69 +++++++- python/langsmith/wrappers/_openai.py | 16 +- python/pyproject.toml | 2 +- .../integration_tests/wrappers/test_openai.py | 24 ++- python/tests/unit_tests/test_run_helpers.py | 81 +++++++++- 11 files changed, 277 insertions(+), 111 deletions(-) diff --git a/python/langsmith/env/_git.py b/python/langsmith/env/_git.py index 705f53c1e..ce598285f 100644 --- a/python/langsmith/env/_git.py +++ b/python/langsmith/env/_git.py @@ -47,7 +47,7 @@ def get_git_info(remote: str = "origin") -> GitInfo: dirty=None, tags=None, repo_name=None, - ) + ) return { "remote_url": exec_git(["remote", "get-url", remote]), diff --git a/python/langsmith/env/_runtime_env.py b/python/langsmith/env/_runtime_env.py index 5646263c2..5fe46a9c7 100644 --- a/python/langsmith/env/_runtime_env.py +++ b/python/langsmith/env/_runtime_env.py @@ -1,4 +1,5 @@ """Environment information.""" + import functools import logging import os @@ -77,6 +78,7 @@ def get_runtime_environment() -> dict: "py_implementation": platform.python_implementation(), "runtime_version": platform.python_version(), "langchain_version": get_langchain_environment(), + "langchain_core_version": get_langchain_core_version(), **shas, } @@ -91,6 +93,16 @@ def get_langchain_environment() -> Optional[str]: return None +@functools.lru_cache(maxsize=1) +def get_langchain_core_version() -> Optional[str]: + try: + import langchain_core # type: ignore + + return langchain_core.__version__ + except ImportError: + return None + + @functools.lru_cache(maxsize=1) def get_docker_version() -> Optional[str]: import subprocess @@ -138,9 +150,9 @@ def get_docker_environment() -> dict: compose_command = _get_compose_command() return { "docker_version": get_docker_version(), - "docker_compose_command": " ".join(compose_command) - if compose_command is not None - else None, + "docker_compose_command": ( + " ".join(compose_command) if compose_command is not None else None + ), "docker_compose_version": get_docker_compose_version(), } diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index ab1eaa3be..b5e1cc2ed 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -801,7 +801,7 @@ async def wait(self) -> None: async def _aforward( - fn: rh.SupportsLangsmithExtra[Awaitable], + fn: rh.SupportsLangsmithExtra[[dict], Awaitable], example: schemas.Example, experiment_name: str, metadata: dict, @@ -839,7 +839,9 @@ def _get_run(r: run_trees.RunTree) -> None: ) -def _ensure_async_traceable(target: ATARGET_T) -> rh.SupportsLangsmithExtra[Awaitable]: +def _ensure_async_traceable( + target: ATARGET_T, +) -> rh.SupportsLangsmithExtra[[dict], Awaitable]: if not asyncio.iscoroutinefunction(target): raise ValueError( "Target must be an async function. For sync functions, use evaluate." diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index 7854372fd..ae9d983c8 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -1466,12 +1466,14 @@ def _resolve_data( return data -def _ensure_traceable(target: TARGET_T) -> rh.SupportsLangsmithExtra: +def _ensure_traceable( + target: TARGET_T | rh.SupportsLangsmithExtra[[dict], dict], +) -> rh.SupportsLangsmithExtra[[dict], dict]: """Ensure the target function is traceable.""" if not callable(target): raise ValueError("Target must be a callable function.") if rh.is_traceable_function(target): - fn = cast(rh.SupportsLangsmithExtra, target) + fn = target else: fn = rh.traceable(name="Target")(target) return fn diff --git a/python/langsmith/evaluation/evaluator.py b/python/langsmith/evaluation/evaluator.py index 79c700feb..ee732a351 100644 --- a/python/langsmith/evaluation/evaluator.py +++ b/python/langsmith/evaluation/evaluator.py @@ -181,9 +181,8 @@ def __init__( self.afunc = run_helpers.ensure_traceable(func) self._name = getattr(func, "__name__", "DynamicRunEvaluator") else: - self.func = cast( - run_helpers.SupportsLangsmithExtra[_RUNNABLE_OUTPUT], - run_helpers.ensure_traceable(func), + self.func = run_helpers.ensure_traceable( + cast(Callable[[Run, Optional[Example]], _RUNNABLE_OUTPUT], func) ) self._name = getattr(func, "__name__", "DynamicRunEvaluator") @@ -383,9 +382,14 @@ def __init__( self.afunc = run_helpers.ensure_traceable(func) self._name = getattr(func, "__name__", "DynamicRunEvaluator") else: - self.func = cast( - run_helpers.SupportsLangsmithExtra[_COMPARISON_OUTPUT], - run_helpers.ensure_traceable(func), + self.func = run_helpers.ensure_traceable( + cast( + Callable[ + [Sequence[Run], Optional[Example]], + _COMPARISON_OUTPUT, + ], + func, + ) ) self._name = getattr(func, "__name__", "DynamicRunEvaluator") diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index ac5c5604c..7a0cc278a 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -35,12 +35,15 @@ runtime_checkable, ) +from typing_extensions import ParamSpec, TypeGuard + from langsmith import client as ls_client from langsmith import run_trees, utils from langsmith._internal import _aiter as aitertools +from langsmith.env import _runtime_env if TYPE_CHECKING: - from langchain.schema.runnable import Runnable + from langchain_core.runnables import Runnable LOGGER = logging.getLogger(__name__) _PARENT_RUN_TREE = contextvars.ContextVar[Optional[run_trees.RunTree]]( @@ -143,7 +146,9 @@ def tracing_context( get_run_tree_context = get_current_run_tree -def is_traceable_function(func: Callable) -> bool: +def is_traceable_function( + func: Callable[P, R], +) -> TypeGuard[SupportsLangsmithExtra[P, R]]: """Check if a function is @traceable decorated.""" return ( _is_traceable_function(func) @@ -152,12 +157,11 @@ def is_traceable_function(func: Callable) -> bool: ) -def ensure_traceable(func: Callable[..., R]) -> Callable[..., R]: +def ensure_traceable(func: Callable[P, R]) -> SupportsLangsmithExtra[P, R]: """Ensure that a function is traceable.""" - return cast( - SupportsLangsmithExtra, - (func if is_traceable_function(func) else traceable()(func)), - ) + if is_traceable_function(func): + return func + return traceable()(func) def is_async(func: Callable) -> bool: @@ -183,10 +187,11 @@ class LangSmithExtra(TypedDict, total=False): R = TypeVar("R", covariant=True) +P = ParamSpec("P") @runtime_checkable -class SupportsLangsmithExtra(Protocol, Generic[R]): +class SupportsLangsmithExtra(Protocol, Generic[P, R]): """Implementations of this Protoc accept an optional langsmith_extra parameter. Args: @@ -201,9 +206,9 @@ class SupportsLangsmithExtra(Protocol, Generic[R]): def __call__( self, - *args: Any, + *args: P.args, langsmith_extra: Optional[LangSmithExtra] = None, - **kwargs: Any, + **kwargs: P.kwargs, ) -> R: """Call the instance when it is called as a function. @@ -222,8 +227,8 @@ def __call__( @overload def traceable( - func: Callable[..., R], -) -> Callable[..., R]: ... + func: Callable[P, R], +) -> SupportsLangsmithExtra[P, R]: ... @overload @@ -238,7 +243,7 @@ def traceable( project_name: Optional[str] = None, process_inputs: Optional[Callable[[dict], dict]] = None, _invocation_params_fn: Optional[Callable[[dict], dict]] = None, -) -> Callable[[Callable[..., R]], SupportsLangsmithExtra[R]]: ... +) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: ... def traceable( @@ -415,6 +420,10 @@ def manual_extra_function(x): ) def decorator(func: Callable): + func_sig = inspect.signature(func) + func_accepts_parent_run = func_sig.parameters.get("run_tree", None) is not None + func_accepts_config = func_sig.parameters.get("config", None) is not None + @functools.wraps(func) async def async_wrapper( *args: Any, @@ -429,15 +438,14 @@ async def async_wrapper( args=args, kwargs=kwargs, ) - func_accepts_parent_run = ( - inspect.signature(func).parameters.get("run_tree", None) is not None - ) + try: accepts_context = aitertools.accepts_context(asyncio.create_task) if func_accepts_parent_run: - fr_coro = func(*args, run_tree=run_container["new_run"], **kwargs) - else: - fr_coro = func(*args, **kwargs) + kwargs["run_tree"] = run_container["new_run"] + if not func_accepts_config: + kwargs.pop("config", None) + fr_coro = func(*args, **kwargs) if accepts_context: function_result = await asyncio.create_task( # type: ignore[call-arg] fr_coro, context=run_container["context"] @@ -465,20 +473,16 @@ async def async_generator_wrapper( args=args, kwargs=kwargs, ) - func_accepts_parent_run = ( - inspect.signature(func).parameters.get("run_tree", None) is not None - ) results: List[Any] = [] try: if func_accepts_parent_run: - async_gen_result = func( - *args, run_tree=run_container["new_run"], **kwargs - ) - else: + kwargs["run_tree"] = run_container["new_run"] # TODO: Nesting is ambiguous if a nested traceable function is only # called mid-generation. Need to explicitly accept run_tree to get # around this. - async_gen_result = func(*args, **kwargs) + if not func_accepts_config: + kwargs.pop("config", None) + async_gen_result = func(*args, **kwargs) # Can't iterate through if it's a coroutine accepts_context = aitertools.accepts_context(asyncio.create_task) if inspect.iscoroutine(async_gen_result): @@ -555,13 +559,10 @@ def wrapper( ) try: if func_accepts_parent_run: - function_result = run_container["context"].run( - func, *args, run_tree=run_container["new_run"], **kwargs - ) - else: - function_result = run_container["context"].run( - func, *args, **kwargs - ) + kwargs["run_tree"] = run_container["new_run"] + if not func_accepts_config: + kwargs.pop("config", None) + function_result = run_container["context"].run(func, *args, **kwargs) except BaseException as e: _container_end(run_container, error=e) raise e @@ -585,16 +586,13 @@ def generator_wrapper( results: List[Any] = [] try: if func_accepts_parent_run: - generator_result = run_container["context"].run( - func, *args, run_tree=run_container["new_run"], **kwargs - ) - else: + kwargs["run_tree"] = run_container["new_run"] # TODO: Nesting is ambiguous if a nested traceable function is only # called mid-generation. Need to explicitly accept run_tree to get # around this. - generator_result = run_container["context"].run( - func, *args, **kwargs - ) + if not func_accepts_config: + kwargs.pop("config", None) + generator_result = run_container["context"].run(func, *args, **kwargs) try: while True: item = run_container["context"].run(next, generator_result) @@ -645,6 +643,17 @@ def generator_wrapper( else: selected_wrapper = wrapper setattr(selected_wrapper, "__langsmith_traceable__", True) + sig = inspect.signature(selected_wrapper) + if not sig.parameters.get("config"): + sig = sig.replace( + parameters=[ + *sig.parameters.values(), + inspect.Parameter( + "config", inspect.Parameter.KEYWORD_ONLY, default=None + ), + ] + ) + selected_wrapper.__signature__ = sig # type: ignore[attr-defined] return selected_wrapper # If the decorator is called with no arguments, then it's being used as a @@ -762,17 +771,12 @@ def as_runnable(traceable_fn: Callable) -> Runnable: >>> runnable = as_runnable(my_function) """ try: - from langchain.callbacks.manager import ( - AsyncCallbackManager, - CallbackManager, - ) - from langchain.callbacks.tracers.langchain import LangChainTracer - from langchain.schema.runnable import RunnableConfig, RunnableLambda - from langchain.schema.runnable.utils import Input, Output + from langchain_core.runnables import RunnableConfig, RunnableLambda + from langchain_core.runnables.utils import Input, Output except ImportError as e: raise ImportError( - "as_runnable requires langchain to be installed. " - "You can install it with `pip install langchain`." + "as_runnable requires langchain-core to be installed. " + "You can install it with `pip install langchain-core`." ) from e if not is_traceable_function(traceable_fn): try: @@ -822,33 +826,6 @@ def __init__( ), ) - @staticmethod - def _configure_run_tree(callback_manager: Any) -> Optional[run_trees.RunTree]: - run_tree: Optional[run_trees.RunTree] = None - if isinstance(callback_manager, (CallbackManager, AsyncCallbackManager)): - lc_tracers = [ - handler - for handler in callback_manager.handlers - if isinstance(handler, LangChainTracer) - ] - if lc_tracers and callback_manager.parent_run_id: - lc_tracer = lc_tracers[0] - trace_id, dotted_order = lc_tracer.order_map[ - callback_manager.parent_run_id - ] - run_tree = run_trees.RunTree( - id=callback_manager.parent_run_id, - dotted_order=dotted_order, - trace_id=trace_id, - session_name=lc_tracer.project_name, - name="Wrapping", - run_type="chain", - inputs={}, - tags=callback_manager.tags, - extra={"metadata": callback_manager.metadata}, - ) - return run_tree - @staticmethod def _wrap_sync( func: Callable[..., Output], @@ -856,9 +833,7 @@ def _wrap_sync( """Wrap a synchronous function to make it asynchronous.""" def wrap_traceable(inputs: dict, config: RunnableConfig) -> Any: - run_tree = RunnableTraceable._configure_run_tree( - config.get("callbacks") - ) + run_tree = run_trees.RunTree.from_runnable_config(cast(dict, config)) return func(**inputs, langsmith_extra={"run_tree": run_tree}) return cast(Callable[[Input, RunnableConfig], Output], wrap_traceable) @@ -879,9 +854,7 @@ def _wrap_async( afunc_ = cast(Callable[..., Awaitable[Output]], afunc) async def awrap_traceable(inputs: dict, config: RunnableConfig) -> Any: - run_tree = RunnableTraceable._configure_run_tree( - config.get("callbacks") - ) + run_tree = run_trees.RunTree.from_runnable_config(cast(dict, config)) return await afunc_(**inputs, langsmith_extra={"run_tree": run_tree}) return cast( @@ -970,7 +943,9 @@ def _collect_extra(extra_outer: dict, langsmith_extra: LangSmithExtra) -> dict: return extra_inner -def _get_parent_run(langsmith_extra: LangSmithExtra) -> Optional[run_trees.RunTree]: +def _get_parent_run( + langsmith_extra: LangSmithExtra, config: Optional[dict] = None +) -> Optional[run_trees.RunTree]: parent = langsmith_extra.get("parent") if isinstance(parent, run_trees.RunTree): return parent @@ -981,6 +956,9 @@ def _get_parent_run(langsmith_extra: LangSmithExtra) -> Optional[run_trees.RunTr run_tree = langsmith_extra.get("run_tree") if run_tree: return run_tree + if _runtime_env.get_langchain_core_version() is not None: + if rt := run_trees.RunTree.from_runnable_config(config): + return rt return get_current_run_tree() @@ -1000,7 +978,7 @@ def _setup_run( run_type = container_input.get("run_type") or "chain" outer_project = _PROJECT_NAME.get() langsmith_extra = langsmith_extra or LangSmithExtra() - parent_run_ = _get_parent_run(langsmith_extra) + parent_run_ = _get_parent_run(langsmith_extra, kwargs.get("config")) project_cv = _PROJECT_NAME.get() selected_project = ( project_cv # From parent trace diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index 03b59ba7c..675aee4ec 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -13,6 +13,7 @@ except ImportError: from pydantic import Field, root_validator, validator +import threading import urllib.parse from langsmith import schemas as ls_schemas @@ -23,6 +24,17 @@ LANGSMITH_PREFIX = "langsmith-" LANGSMITH_DOTTED_ORDER = f"{LANGSMITH_PREFIX}trace" +_CLIENT: Optional[Client] = None +_LOCK = threading.Lock() + + +def _get_client() -> Client: + global _CLIENT + if _CLIENT is None: + with _LOCK: + if _CLIENT is None: + _CLIENT = Client() + return _CLIENT class RunTree(ls_schemas.RunBase): @@ -43,7 +55,7 @@ class RunTree(ls_schemas.RunBase): ) session_id: Optional[UUID] = Field(default=None, alias="project_id") extra: Dict = Field(default_factory=dict) - client: Client = Field(default_factory=Client, exclude=True) + client: Client = Field(default_factory=_get_client, exclude=True) dotted_order: str = Field( default="", description="The order of the run in the tree." ) @@ -60,7 +72,7 @@ class Config: def validate_client(cls, v: Optional[Client]) -> Client: """Ensure the client is specified.""" if v is None: - return Client() + return _get_client() return v @root_validator(pre=True) @@ -286,6 +298,59 @@ def from_dotted_order( } return cast(RunTree, cls.from_headers(headers, **kwargs)) + @classmethod + def from_runnable_config( + cls, + config: Optional[dict], + **kwargs: Any, + ) -> Optional[RunTree]: + """Create a new 'child' span from the provided runnable config. + + Requires langchain to be installed. + + Returns: + Optional[RunTree]: The new span or None if + no parent span information is found. + """ + try: + from langchain_core.callbacks.manager import ( + AsyncCallbackManager, + CallbackManager, + ) + from langchain_core.runnables import RunnableConfig, ensure_config + from langchain_core.tracers.langchain import LangChainTracer + except ImportError as e: + raise ImportError( + "RunTree.from_runnable_config requires langchain-core to be installed. " + "You can install it with `pip install langchain-core`." + ) from e + config_ = ensure_config( + cast(RunnableConfig, config) if isinstance(config, dict) else None + ) + if ( + (cb := config_.get("callbacks")) + and isinstance(cb, (CallbackManager, AsyncCallbackManager)) + and cb.parent_run_id + and ( + tracer := next( + (t for t in cb.handlers if isinstance(t, LangChainTracer)), + None, + ) + ) + ): + if hasattr(tracer, "order_map"): + dotted_order = tracer.order_map[cb.parent_run_id][1] + elif ( + run := tracer.run_map.get(str(cb.parent_run_id)) + ) and run.dotted_order: + dotted_order = run.dotted_order + else: + return None + kwargs["client"] = tracer.client + kwargs["project_name"] = tracer.project_name + return RunTree.from_dotted_order(dotted_order, **kwargs) + return None + @classmethod def from_headers(cls, headers: Dict[str, str], **kwargs: Any) -> Optional[RunTree]: """Create a new 'parent' span from the provided headers. diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index de6c77ded..5b6798e8d 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -211,13 +211,23 @@ class TracingExtra(TypedDict, total=False): client: Optional[ls_client.Client] -def wrap_openai(client: C, *, tracing_extra: Optional[TracingExtra] = None) -> C: +def wrap_openai( + client: C, + *, + tracing_extra: Optional[TracingExtra] = None, + chat_name: str = "ChatOpenAI", + completions_name: str = "OpenAI", +) -> C: """Patch the OpenAI client to make it traceable. Args: client (Union[OpenAI, AsyncOpenAI]): The client to patch. tracing_extra (Optional[TracingExtra], optional): Extra tracing information. Defaults to None. + chat_name (str, optional): The run name for the chat completions endpoint. + Defaults to "ChatOpenAI". + completions_name (str, optional): The run name for the completions endpoint. + Defaults to "OpenAI". Returns: Union[OpenAI, AsyncOpenAI]: The patched client. @@ -225,14 +235,14 @@ def wrap_openai(client: C, *, tracing_extra: Optional[TracingExtra] = None) -> C """ client.chat.completions.create = _get_wrapper( # type: ignore[method-assign] client.chat.completions.create, - "ChatOpenAI", + chat_name, _reduce_chat, tracing_extra=tracing_extra, invocation_params_fn=functools.partial(_infer_invocation_params, "chat"), ) client.completions.create = _get_wrapper( # type: ignore[method-assign] client.completions.create, - "OpenAI", + completions_name, _reduce_completions, tracing_extra=tracing_extra, invocation_params_fn=functools.partial(_infer_invocation_params, "text"), diff --git a/python/pyproject.toml b/python/pyproject.toml index 7f87cd28a..5fab25448 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.63" +version = "0.1.64" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/integration_tests/wrappers/test_openai.py b/python/tests/integration_tests/wrappers/test_openai.py index 40804b96d..11bc7bf3f 100644 --- a/python/tests/integration_tests/wrappers/test_openai.py +++ b/python/tests/integration_tests/wrappers/test_openai.py @@ -4,6 +4,7 @@ import pytest +import langsmith from langsmith.wrappers import wrap_openai @@ -12,8 +13,9 @@ def test_chat_sync_api(mock_session: mock.MagicMock, stream: bool): import openai # noqa + client = langsmith.Client(session=mock_session()) original_client = openai.Client() - patched_client = wrap_openai(openai.Client()) + patched_client = wrap_openai(openai.Client(), tracing_extra={"client": client}) messages = [{"role": "user", "content": "Say 'foo'"}] original = original_client.chat.completions.create( messages=messages, # noqa: [arg-type] @@ -50,8 +52,9 @@ def test_chat_sync_api(mock_session: mock.MagicMock, stream: bool): async def test_chat_async_api(mock_session: mock.MagicMock, stream: bool): import openai # noqa + client = langsmith.Client(session=mock_session()) original_client = openai.AsyncClient() - patched_client = wrap_openai(openai.AsyncClient()) + patched_client = wrap_openai(openai.AsyncClient(), tracing_extra={"client": client}) messages = [{"role": "user", "content": "Say 'foo'"}] original = await original_client.chat.completions.create( messages=messages, stream=stream, temperature=0, seed=42, model="gpt-3.5-turbo" @@ -84,8 +87,9 @@ async def test_chat_async_api(mock_session: mock.MagicMock, stream: bool): def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool): import openai + client = langsmith.Client(session=mock_session()) original_client = openai.Client() - patched_client = wrap_openai(openai.Client()) + patched_client = wrap_openai(openai.Client(), tracing_extra={"client": client}) prompt = ("Say 'Foo' then stop.",) original = original_client.completions.create( model="gpt-3.5-turbo-instruct", @@ -124,8 +128,15 @@ def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool): async def test_completions_async_api(mock_session: mock.MagicMock, stream: bool): import openai + client = langsmith.Client(session=mock_session()) + original_client = openai.AsyncClient() - patched_client = wrap_openai(openai.AsyncClient()) + patched_client = wrap_openai( + openai.AsyncClient(), + tracing_extra={"client": client}, + chat_name="chattychat", + completions_name="incompletions", + ) prompt = ("Say 'Hi i'm ChatGPT' then stop.",) original = await original_client.completions.create( model="gpt-3.5-turbo-instruct", @@ -158,7 +169,10 @@ async def test_completions_async_api(mock_session: mock.MagicMock, stream: bool) assert type(original) == type(patched) assert original.choices == patched.choices # Give the thread a chance. - time.sleep(0.1) + for _ in range(10): + time.sleep(0.1) + if mock_session.return_value.request.call_count >= 1: + break assert mock_session.return_value.request.call_count >= 1 for call in mock_session.return_value.request.call_args_list: assert call[0][0].upper() == "POST" diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 61a1d2004..4170b4f2e 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -322,6 +322,85 @@ async def my_function(a, b, d): assert result == [6, 7] +def test_traceable_parent_from_runnable_config() -> None: + try: + from langchain.callbacks.tracers import LangChainTracer + from langchain.schema.runnable import RunnableLambda + except ImportError: + pytest.skip("Skipping test that requires langchain") + with tracing_context(enabled=True): + mock_client_ = _get_mock_client() + + @traceable() + def my_function(a: int) -> int: + return a * 2 + + my_function_runnable = RunnableLambda(my_function) + + assert ( + my_function_runnable.invoke( + 1, {"callbacks": [LangChainTracer(client=mock_client_)]} + ) + == 2 + ) + time.sleep(1) + # Inspect the mock_calls and assert that 2 runs were created, + # one for the parent and one for the child + mock_calls = mock_client_.session.request.mock_calls # type: ignore + posts = [] + for call in mock_calls: + if call.args: + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + posts.extend(body["post"]) + assert len(posts) == 2 + parent = next(p for p in posts if p["parent_run_id"] is None) + child = next(p for p in posts if p["parent_run_id"] is not None) + assert child["parent_run_id"] == parent["id"] + + +def test_traceable_parent_from_runnable_config_accepts_config() -> None: + try: + from langchain.callbacks.tracers import LangChainTracer + from langchain.schema.runnable import RunnableLambda + except ImportError: + pytest.skip("Skipping test that requires langchain") + with tracing_context(enabled=True): + mock_client_ = _get_mock_client() + + @traceable() + def my_function(a: int, config: dict) -> int: + assert isinstance(config, dict) + return a * 2 + + my_function_runnable = RunnableLambda(my_function) + + assert ( + my_function_runnable.invoke( + 1, {"callbacks": [LangChainTracer(client=mock_client_)]} + ) + == 2 + ) + time.sleep(1) + # Inspect the mock_calls and assert that 2 runs were created, + # one for the parent and one for the child + mock_calls = mock_client_.session.request.mock_calls # type: ignore + posts = [] + for call in mock_calls: + if call.args: + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + posts.extend(body["post"]) + assert len(posts) == 2 + parent = next(p for p in posts if p["parent_run_id"] is None) + child = next(p for p in posts if p["parent_run_id"] is not None) + assert child["parent_run_id"] == parent["id"] + + def test_traceable_project_name() -> None: with tracing_context(enabled=True): mock_client_ = _get_mock_client() @@ -350,7 +429,7 @@ def my_function(a: int, b: int, d: int) -> int: def my_other_function(run_tree) -> int: return my_function(1, 2, 3) - my_other_function() + my_other_function() # type: ignore time.sleep(0.25) # Inspect the mock_calls and assert that "my bar project" is in # both all POST runs in the single request. We want to ensure From a7719a1d36caffb24a856180c39c2fe2d73f70a0 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 30 May 2024 15:37:01 +0200 Subject: [PATCH 114/373] fix(js): add missing callbacks when evaluating a runnable --- js/package.json | 2 +- js/src/evaluation/_runner.ts | 41 +++++++++---------- js/src/index.ts | 2 +- js/src/tests/evaluate.int.test.ts | 66 ++++++++++++++++++++----------- 4 files changed, 63 insertions(+), 48 deletions(-) diff --git a/js/package.json b/js/package.json index d13c53f95..c4ae119d2 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.30", + "version": "0.1.31", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index e4ad361a2..21ca5d2f5 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -1,4 +1,5 @@ import { Client, RunTree, RunTreeConfig } from "../index.js"; +import { getLangchainCallbacks } from "../langchain.js"; import { BaseRun, Example, KVMap, Run, TracerSession } from "../schemas.js"; import { traceable } from "../traceable.js"; import { getDefaultRevisionId, getGitInfo } from "../utils/_git.js"; @@ -369,7 +370,7 @@ class _ExperimentManager { } async withPredictions( - target: TargetNoInvoke, + target: TargetT, options?: { maxConcurrency?: number; } @@ -482,13 +483,13 @@ class _ExperimentManager { // Private methods /** - * Run the target function on the examples. - * @param {TargetNoInvoke} target The target function to evaluate. + * Run the target function or runnable on the examples. + * @param {TargetT} target The target function or runnable to evaluate. * @param options * @returns {AsyncGenerator<_ForwardResults>} An async generator of the results. */ async *_predict( - target: TargetNoInvoke, + target: TargetT, options?: { maxConcurrency?: number; } @@ -796,13 +797,6 @@ class ExperimentResults implements AsyncIterableIterator { } } -function convertInvokeToTopLevel(fn: TargetT): TargetNoInvoke { - if ("invoke" in fn) { - return fn.invoke.bind(fn); - } - return fn; -} - async function _evaluate( target: TargetT | AsyncGenerator, fields: EvaluateOptions & { experiment?: TracerSession } @@ -826,10 +820,9 @@ async function _evaluate( }).start(); if (_isCallable(target)) { - manager = await manager.withPredictions( - convertInvokeToTopLevel(target as TargetT), - { maxConcurrency: fields.maxConcurrency } - ); + manager = await manager.withPredictions(target, { + maxConcurrency: fields.maxConcurrency, + }); } if (fields.evaluators) { @@ -846,10 +839,8 @@ async function _evaluate( return results; } -type ForwardFn = ((...args: any[]) => Promise) | ((...args: any[]) => any); - async function _forward( - fn: ForwardFn, + fn: TargetT, example: Example, experimentName: string, metadata: KVMap, @@ -872,12 +863,16 @@ async function _forward( : new Date(example.created_at).toISOString(), }, client, + tracingEnabled: true, }; - const wrappedFn = traceable(fn, { - ...options, - tracingEnabled: true, - }) as ReturnType; + const wrappedFn = + "invoke" in fn + ? traceable(async (inputs) => { + const callbacks = await getLangchainCallbacks(); + return fn.invoke(inputs, { callbacks }); + }, options) + : traceable(fn, options); try { await wrappedFn(example.inputs); @@ -1022,7 +1017,7 @@ async function _resolveExperiment( return [undefined, undefined]; } -function _isCallable(target: TargetT | AsyncGenerator): boolean { +function _isCallable(target: TargetT | AsyncGenerator): target is TargetT { return Boolean( typeof target === "function" || ("invoke" in target && typeof target.invoke === "function") diff --git a/js/src/index.ts b/js/src/index.ts index 0f26075f0..6223c5a87 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.30"; +export const __version__ = "0.1.31"; diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index 0bd5d94a7..f99de57a7 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -3,25 +3,27 @@ import { evaluate } from "../evaluation/_runner.js"; import { Example, Run, TracerSession } from "../schemas.js"; import { Client } from "../index.js"; import { afterAll, beforeAll } from "@jest/globals"; -import { RunnableLambda } from "@langchain/core/runnables"; +import { RunnableLambda, RunnableSequence } from "@langchain/core/runnables"; const TESTING_DATASET_NAME = "test_dataset_js_evaluate_123"; beforeAll(async () => { const client = new Client(); - // create a new dataset - await client.createDataset(TESTING_DATASET_NAME, { - description: - "For testing purposed. Is created & deleted for each test run.", - }); - // create examples - const res = await client.createExamples({ - inputs: [{ input: 1 }, { input: 2 }], - outputs: [{ output: 2 }, { output: 3 }], - datasetName: TESTING_DATASET_NAME, - }); - if (res.length !== 2) { - throw new Error("Failed to create examples"); + if (!(await client.hasDataset({ datasetName: TESTING_DATASET_NAME }))) { + // create a new dataset + await client.createDataset(TESTING_DATASET_NAME, { + description: + "For testing purposed. Is created & deleted for each test run.", + }); + // create examples + const res = await client.createExamples({ + inputs: [{ input: 1 }, { input: 2 }], + outputs: [{ output: 2 }, { output: 3 }], + datasetName: TESTING_DATASET_NAME, + }); + if (res.length !== 2) { + throw new Error("Failed to create examples"); + } } }); @@ -34,7 +36,7 @@ afterAll(async () => { await client.deleteDataset({ datasetName: "my_splits_ds2", }); - } catch (_) { + } catch { //pass } }); @@ -629,14 +631,14 @@ test("max concurrency works with summary evaluators", async () => { }); test("Target func can be a runnable", async () => { - const targetFunc = new RunnableLambda({ - func: (input: Record) => { - console.log("__input__", input); - return { - foo: input.input + 1, - }; - }, - }); + const targetFunc = RunnableSequence.from([ + RunnableLambda.from((input: Record) => ({ + foo: input.input + 1, + })).withConfig({ runName: "First Step" }), + RunnableLambda.from((input: { foo: number }) => ({ + foo: input.foo + 1, + })).withConfig({ runName: "Second Step" }), + ]); const customEvaluator = async (run: Run, example?: Example) => { return Promise.resolve({ @@ -672,6 +674,24 @@ test("Target func can be a runnable", async () => { expect(firstEvalResults.results).toHaveLength(1); expect(firstEvalResults.results[0].key).toEqual("key"); expect(firstEvalResults.results[0].score).toEqual(1); + + // check if the evaluated function has valid children + const gatheredChildRunNames = []; + const queue = [firstRun]; + const visited = new Set(); + while (queue.length > 0) { + const current = queue.shift(); + if (!current || visited.has(current.id)) continue; + visited.add(current.id); + if (current.child_runs) { + gatheredChildRunNames.push(...current.child_runs.map((run) => run.name)); + queue.push(...current.child_runs); + } + } + + expect(gatheredChildRunNames).toEqual( + expect.arrayContaining(["RunnableSequence", "First Step", "Second Step"]) + ); }); test("evaluate can accept array of examples", async () => { From 3c9e91f2ee28249b0f94864a07d96a55af1af246 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 30 May 2024 15:39:34 +0200 Subject: [PATCH 115/373] Fix lint --- js/src/evaluation/_runner.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 21ca5d2f5..99fccad57 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -24,11 +24,6 @@ type TargetT = | { invoke: (input: TInput, config?: KVMap) => TOutput } | { invoke: (input: TInput, config?: KVMap) => Promise }; -// eslint-disable-next-line @typescript-eslint/no-explicit-any -type TargetNoInvoke = - | ((input: TInput, config?: KVMap) => Promise) - | ((input: TInput, config?: KVMap) => TOutput); - // Data format: dataset-name, dataset_id, or examples type DataT = string | AsyncIterable | Example[]; From edccdb17deaa8a79dd3dd2a491a7e4f713235938 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Thu, 30 May 2024 07:07:06 -0700 Subject: [PATCH 116/373] [Python] 0.1.66 ## Update RunnableConfig <> RunTree Precedence (#747) ## Attempt deeper copy to avoid mutations (735) For some code paths currently, if you mutate inputs/outputs within Xs of invocation, langsmith will log the mutated value, which is confusing and the wrong behavior. We can't naively use deepcopy, as many valid python objects cannot be copied. This PR creates a compromise solution that attempts to deepcopy and then falls back to copying up to depth 4 while handling errors. Will hopefully resolve #706 . Placed before the hide_* as well so that if people want to mutate the dict when filtering it doesn't impact the execution flow. --- python/langsmith/client.py | 11 +++- python/langsmith/run_helpers.py | 21 ++++++- python/langsmith/run_trees.py | 25 ++++----- python/langsmith/utils.py | 56 +++++++++++++++++++ python/pyproject.toml | 2 +- python/tests/unit_tests/test_client.py | 77 ++++++++++++++++++++++++-- 6 files changed, 169 insertions(+), 23 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index afafeba0e..d3966f597 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -1070,11 +1070,14 @@ def _run_transform( self, run: Union[ls_schemas.Run, dict, ls_schemas.RunLikeDict], update: bool = False, + copy: bool = False, ) -> dict: """Transform the given run object into a dictionary representation. Args: run (Union[ls_schemas.Run, dict]): The run object to transform. + update (bool, optional): Whether to update the run. Defaults to False. + copy (bool, optional): Whether to copy the run. Defaults to False. Returns: dict: The transformed run object as a dictionary. @@ -1088,8 +1091,12 @@ def _run_transform( elif isinstance(run_create["id"], str): run_create["id"] = uuid.UUID(run_create["id"]) if "inputs" in run_create and run_create["inputs"] is not None: + if copy: + run_create["inputs"] = ls_utils.deepish_copy(run_create["inputs"]) run_create["inputs"] = self._hide_run_inputs(run_create["inputs"]) if "outputs" in run_create and run_create["outputs"] is not None: + if copy: + run_create["outputs"] = ls_utils.deepish_copy(run_create["outputs"]) run_create["outputs"] = self._hide_run_outputs(run_create["outputs"]) if not update and not run_create.get("start_time"): run_create["start_time"] = datetime.datetime.now(datetime.timezone.utc) @@ -1177,9 +1184,8 @@ def create_run( } if not self._filter_for_sampling([run_create]): return - run_create = self._run_transform(run_create) + run_create = self._run_transform(run_create, copy=True) self._insert_runtime_env([run_create]) - if revision_id is not None: run_create["extra"]["metadata"]["revision_id"] = revision_id if ( @@ -1413,6 +1419,7 @@ def update_run( if inputs is not None: data["inputs"] = self._hide_run_inputs(inputs) if outputs is not None: + outputs = ls_utils.deepish_copy(outputs) data["outputs"] = self._hide_run_outputs(outputs) if events is not None: data["events"] = events diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 7a0cc278a..7167cd177 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -956,10 +956,27 @@ def _get_parent_run( run_tree = langsmith_extra.get("run_tree") if run_tree: return run_tree + crt = get_current_run_tree() if _runtime_env.get_langchain_core_version() is not None: if rt := run_trees.RunTree.from_runnable_config(config): - return rt - return get_current_run_tree() + # Still need to break ties when alternating between traceable and + # LanChain code. + # Nesting: LC -> LS -> LS, we want to still use LS as the parent + # Otherwise would look like LC -> {LS, LS} (siblings) + if ( + not crt # Simple LC -> LS + # Let user override if manually passed in or invoked in a + # RunnableSequence. This is a naive check. + or (config is not None and config.get("callbacks")) + # If the LangChain dotted order is more nested than the LangSmith + # dotted order, use the LangChain run as the parent. + # Note that this condition shouldn't be triggered in later + # versions of core, since we also update the run_tree context + # vars when updating the RunnableConfig context var. + or rt.dotted_order > crt.dotted_order + ): + return rt + return crt def _setup_run( diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index 675aee4ec..cb767108e 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -233,20 +233,17 @@ def create_child( return run def _get_dicts_safe(self): - try: - return self.dict(exclude={"child_runs"}, exclude_none=True) - except TypeError: - # Things like generators cannot be copied - self_dict = self.dict( - exclude={"child_runs", "inputs", "outputs"}, exclude_none=True - ) - if self.inputs: - # shallow copy - self_dict["inputs"] = self.inputs.copy() - if self.outputs: - # shallow copy - self_dict["outputs"] = self.outputs.copy() - return self_dict + # Things like generators cannot be copied + self_dict = self.dict( + exclude={"child_runs", "inputs", "outputs"}, exclude_none=True + ) + if self.inputs is not None: + # shallow copy. deep copying will occur in the client + self_dict["inputs"] = self.inputs.copy() + if self.outputs is not None: + # shallow copy; deep copying will occur in the client + self_dict["outputs"] = self.outputs.copy() + return self_dict def post(self, exclude_child_runs: bool = True) -> None: """Post the run tree to the API asynchronously.""" diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 0217ab4e4..b64904996 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -1,6 +1,7 @@ """Generic utility functions.""" import contextlib +import copy import enum import functools import logging @@ -20,6 +21,7 @@ Optional, Sequence, Tuple, + TypeVar, Union, ) @@ -497,3 +499,57 @@ def _format_exc() -> str: tb_lines = traceback.format_exception(*sys.exc_info()) filtered_lines = [line for line in tb_lines if "langsmith/" not in line] return "".join(filtered_lines) + + +T = TypeVar("T") + + +def _middle_copy( + val: T, memo: Dict[int, Any], max_depth: int = 4, _depth: int = 0 +) -> T: + cls = type(val) + + copier = getattr(cls, "__deepcopy__", None) + if copier is not None: + try: + return copier(memo) + except (TypeError, ValueError, RecursionError): + pass + if _depth >= max_depth: + return val + if isinstance(val, dict): + return { # type: ignore[return-value] + _middle_copy(k, memo, max_depth, _depth + 1): _middle_copy( + v, memo, max_depth, _depth + 1 + ) + for k, v in val.items() + } + if isinstance(val, list): + return [_middle_copy(item, memo, max_depth, _depth + 1) for item in val] # type: ignore[return-value] + if isinstance(val, tuple): + return tuple(_middle_copy(item, memo, max_depth, _depth + 1) for item in val) # type: ignore[return-value] + if isinstance(val, set): + return {_middle_copy(item, memo, max_depth, _depth + 1) for item in val} # type: ignore[return-value] + + return val + + +def deepish_copy(val: T) -> T: + """Deep copy a value with a compromise for uncopyable objects. + + Args: + val: The value to be deep copied. + + Returns: + The deep copied value. + """ + memo: Dict[int, Any] = {} + try: + return copy.deepcopy(val, memo) + except (TypeError, ValueError, RecursionError) as e: + # Generators, locks, etc. cannot be copied + # and raise a TypeError (mentioning pickling, since the dunder methods) + # are re-used for copying. We'll try to do a compromise and copy + # what we can + _LOGGER.debug("Failed to deepcopy input: %s", repr(e)) + return _middle_copy(val, memo) diff --git a/python/pyproject.toml b/python/pyproject.toml index 5fab25448..b92ba21df 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.64" +version = "0.1.65" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index 62dd302fd..5e07ac147 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -11,7 +11,7 @@ import time import uuid import weakref -from datetime import datetime +from datetime import datetime, timezone from enum import Enum from io import BytesIO from typing import Any, NamedTuple, Optional @@ -28,6 +28,8 @@ import langsmith.env as ls_env import langsmith.utils as ls_utils +from langsmith import run_trees +from langsmith import schemas as ls_schemas from langsmith.client import ( Client, _dumps_json, @@ -37,7 +39,6 @@ _is_localhost, _serialize_json, ) -from langsmith.schemas import Example _CREATED_AT = datetime(2015, 1, 1, 0, 0, 0) @@ -173,14 +174,14 @@ def test_headers(monkeypatch: pytest.MonkeyPatch) -> None: @mock.patch("langsmith.client.requests.Session") def test_upload_csv(mock_session_cls: mock.Mock) -> None: dataset_id = str(uuid.uuid4()) - example_1 = Example( + example_1 = ls_schemas.Example( id=str(uuid.uuid4()), created_at=_CREATED_AT, inputs={"input": "1"}, outputs={"output": "2"}, dataset_id=dataset_id, ) - example_2 = Example( + example_2 = ls_schemas.Example( id=str(uuid.uuid4()), created_at=_CREATED_AT, inputs={"input": "3"}, @@ -303,6 +304,74 @@ def test_create_run_unicode() -> None: client.update_run(id_, status="completed") +def test_create_run_mutate() -> None: + inputs = {"messages": ["hi"], "mygen": (i for i in range(10))} + session = mock.Mock() + session.request = mock.Mock() + client = Client( + api_url="http://localhost:1984", + api_key="123", + session=session, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + id_ = uuid.uuid4() + run_dict = dict( + id=id_, + name="my_run", + inputs=inputs, + run_type="llm", + trace_id=id_, + dotted_order=run_trees._create_current_dotted_order( + datetime.now(timezone.utc), id_ + ), + ) + client.create_run(**run_dict) # type: ignore + inputs["messages"].append("there") # type: ignore + outputs = {"messages": ["hi", "there"]} + client.update_run( + id_, + outputs=outputs, + end_time=datetime.now(timezone.utc), + trace_id=id_, + dotted_order=run_dict["dotted_order"], + ) + for _ in range(7): + time.sleep(0.1) # Give the background thread time to stop + payloads = [ + json.loads(call[2]["data"]) + for call in session.request.mock_calls + if call.args and call.args[1].endswith("runs/batch") + ] + if payloads: + break + posts = [pr for payload in payloads for pr in payload.get("post", [])] + patches = [pr for payload in payloads for pr in payload.get("patch", [])] + inputs = next( + (pr["inputs"] for pr in itertools.chain(posts, patches) if pr.get("inputs")), + {}, + ) + outputs = next( + (pr["outputs"] for pr in itertools.chain(posts, patches) if pr.get("outputs")), + {}, + ) + # Check that the mutated value wasn't posted + assert "messages" in inputs + assert inputs["messages"] == ["hi"] + assert "mygen" in inputs + assert inputs["mygen"].startswith( # type: ignore + "." + ) + assert outputs == {"messages": ["hi", "there"]} + + class CallTracker: def __init__(self) -> None: self.counter = 0 From 26a626e1dcf4a422544256c210433d1d560b22cd Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 30 May 2024 17:07:24 +0200 Subject: [PATCH 117/373] Allow overriding the metadata --- js/src/traceable.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 173f1cba3..492899d30 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -70,8 +70,8 @@ const getTracingRunTree = ( if (invocationParams != null) { runTree.extra ??= {}; runTree.extra.metadata = { - ...runTree.extra.metadata, ...invocationParams, + ...runTree.extra.metadata, }; } From 4863cf88b2ae7376b026a8bfdf57dfb00ff81082 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Fri, 31 May 2024 09:06:18 -0700 Subject: [PATCH 118/373] [Python] Handoff Bugs (#753) 1. from_runnable_config doesn't lbyl to see if the parent is in the order_map. Should fix 2. events are null by default here but are populated in langchain, meaning run trees cant be treated as interchangeable in the base tracer. --- python/langsmith/run_trees.py | 4 +++- python/pyproject.toml | 2 +- python/tests/unit_tests/test_run_trees.py | 12 ++++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index cb767108e..7672c1f10 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -90,6 +90,8 @@ def infer_defaults(cls, values: dict) -> dict: else: values["trace_id"] = values["id"] cast(dict, values.setdefault("extra", {})) + if values.get("events") is None: + values["events"] = [] return values @root_validator(pre=False) @@ -335,7 +337,7 @@ def from_runnable_config( ) ) ): - if hasattr(tracer, "order_map"): + if hasattr(tracer, "order_map") and cb.parent_run_id in tracer.order_map: dotted_order = tracer.order_map[cb.parent_run_id][1] elif ( run := tracer.run_map.get(str(cb.parent_run_id)) diff --git a/python/pyproject.toml b/python/pyproject.toml index b92ba21df..ee9798d84 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.65" +version = "0.1.66" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_run_trees.py b/python/tests/unit_tests/test_run_trees.py index 92c398b2d..73a963eeb 100644 --- a/python/tests/unit_tests/test_run_trees.py +++ b/python/tests/unit_tests/test_run_trees.py @@ -59,3 +59,15 @@ def test_run_tree_accepts_tpe() -> None: ) def test_parse_dotted_order(inputs, expected): assert run_trees._parse_dotted_order(inputs) == expected + + +def test_run_tree_events_not_null(): + mock_client = MagicMock(spec=Client) + run_tree = run_trees.RunTree( + name="My Chat Bot", + inputs={"text": "Summarize this morning's meetings."}, + client=mock_client, + executor=ThreadPoolExecutor(), + events=None, + ) + assert run_tree.events == [] From ad3cb2dc0de74d071c6e29ea30db98b9ab61542a Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Fri, 31 May 2024 10:03:27 -0700 Subject: [PATCH 119/373] [Python] Infer parent_run_id in from_dotted_order (#754) Also needed for the handoff. I had neglected to see that some batch ingestion operations depend on it. Closes https://github.com/langchain-ai/langsmith-sdk/issues/751 Closes https://github.com/langchain-ai/langchain/issues/22353 Closes https://github.com/langchain-ai/langsmith-sdk/issues/752 --- python/langsmith/run_trees.py | 5 +++ python/pyproject.toml | 2 +- python/tests/unit_tests/test_run_trees.py | 45 ++++++++++++++++++++++- 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index 7672c1f10..9aa202bbc 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -92,6 +92,8 @@ def infer_defaults(cls, values: dict) -> dict: cast(dict, values.setdefault("extra", {})) if values.get("events") is None: values["events"] = [] + if values.get("tags") is None: + values["tags"] = [] return values @root_validator(pre=False) @@ -374,6 +376,9 @@ def from_headers(cls, headers: Dict[str, str], **kwargs: Any) -> Optional[RunTre init_args["trace_id"] = trace_id init_args["id"] = parsed_dotted_order[-1][1] init_args["dotted_order"] = parent_dotted_order + if len(parsed_dotted_order) >= 2: + # Has a parent + init_args["parent_run_id"] = parsed_dotted_order[-2][1] # All placeholders. We assume the source process # handles the life-cycle of the run. init_args["start_time"] = init_args.get("start_time") or datetime.now( diff --git a/python/pyproject.toml b/python/pyproject.toml index ee9798d84..2d063e2da 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.66" +version = "0.1.67" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_run_trees.py b/python/tests/unit_tests/test_run_trees.py index 73a963eeb..ba25860dc 100644 --- a/python/tests/unit_tests/test_run_trees.py +++ b/python/tests/unit_tests/test_run_trees.py @@ -67,7 +67,50 @@ def test_run_tree_events_not_null(): name="My Chat Bot", inputs={"text": "Summarize this morning's meetings."}, client=mock_client, - executor=ThreadPoolExecutor(), events=None, ) assert run_tree.events == [] + + +def test_nested_run_trees_from_dotted_order(): + grandparent = run_trees.RunTree( + name="Grandparent", + inputs={"text": "Summarize this morning's meetings."}, + client=MagicMock(spec=Client), + ) + parent = grandparent.create_child( + name="Parent", + ) + child = parent.create_child( + name="Child", + ) + # Check child + clone = run_trees.RunTree.from_dotted_order( + dotted_order=child.dotted_order, + name="Clone", + client=MagicMock(spec=Client), + ) + + assert clone.id == child.id + assert clone.parent_run_id == child.parent_run_id + assert clone.dotted_order == child.dotted_order + + # Check parent + parent_clone = run_trees.RunTree.from_dotted_order( + dotted_order=parent.dotted_order, + name="Parent Clone", + client=MagicMock(spec=Client), + ) + assert parent_clone.id == parent.id + assert parent_clone.parent_run_id == parent.parent_run_id + assert parent_clone.dotted_order == parent.dotted_order + + # Check grandparent + grandparent_clone = run_trees.RunTree.from_dotted_order( + dotted_order=grandparent.dotted_order, + name="Grandparent Clone", + client=MagicMock(spec=Client), + ) + assert grandparent_clone.id == grandparent.id + assert grandparent_clone.parent_run_id is None + assert grandparent_clone.dotted_order == grandparent.dotted_order From cdb8d918367dec39cdd64a911c4ed45d7f3e2048 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Mon, 3 Jun 2024 15:40:16 -0700 Subject: [PATCH 120/373] safer exception --- python/langsmith/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index b64904996..f116b044c 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -546,7 +546,7 @@ def deepish_copy(val: T) -> T: memo: Dict[int, Any] = {} try: return copy.deepcopy(val, memo) - except (TypeError, ValueError, RecursionError) as e: + except Exception as e: # Generators, locks, etc. cannot be copied # and raise a TypeError (mentioning pickling, since the dunder methods) # are re-used for copying. We'll try to do a compromise and copy From a39f66225b87b9ecdf70f4ec745f6cabc985273b Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Mon, 3 Jun 2024 15:46:11 -0700 Subject: [PATCH 121/373] cr --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index 2d063e2da..a988a1df4 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.67" +version = "0.1.68" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 7326476f85c8c032a7fefabc27800cd41af41219 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 3 Jun 2024 22:19:51 -0700 Subject: [PATCH 122/373] Improve Error Handling of Middle Copy (#759) --- python/langsmith/utils.py | 4 +- python/pyproject.toml | 2 +- python/tests/unit_tests/test_utils.py | 133 ++++++++++++++++++++++++++ 3 files changed, 136 insertions(+), 3 deletions(-) diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index f116b044c..72607b8b1 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -513,7 +513,7 @@ def _middle_copy( if copier is not None: try: return copier(memo) - except (TypeError, ValueError, RecursionError): + except BaseException: pass if _depth >= max_depth: return val @@ -546,7 +546,7 @@ def deepish_copy(val: T) -> T: memo: Dict[int, Any] = {} try: return copy.deepcopy(val, memo) - except Exception as e: + except BaseException as e: # Generators, locks, etc. cannot be copied # and raise a TypeError (mentioning pickling, since the dunder methods) # are re-used for copying. We'll try to do a compromise and copy diff --git a/python/pyproject.toml b/python/pyproject.toml index a988a1df4..e12e09ccb 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.68" +version = "0.1.69" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_utils.py b/python/tests/unit_tests/test_utils.py index d0b0119d9..29e4b4720 100644 --- a/python/tests/unit_tests/test_utils.py +++ b/python/tests/unit_tests/test_utils.py @@ -1,7 +1,18 @@ +import copy +import dataclasses +import itertools +import threading import unittest +import uuid +from datetime import datetime +from enum import Enum +from typing import Any, NamedTuple, Optional from unittest.mock import patch +import attr +import dataclasses_json import pytest +from pydantic import BaseModel import langsmith.utils as ls_utils from langsmith.run_helpers import tracing_context @@ -97,3 +108,125 @@ def test_tracing_disabled(): with tracing_context(enabled=False): assert not ls_utils.tracing_is_enabled() assert ls_utils.tracing_is_enabled() + + +def test_deepish_copy(): + class MyClass: + def __init__(self, x: int) -> None: + self.x = x + self.y = "y" + self.a_list = [1, 2, 3] + self.a_tuple = (1, 2, 3) + self.a_set = {1, 2, 3} + self.a_dict = {"foo": "bar"} + self.my_bytes = b"foo" + + class ClassWithTee: + def __init__(self) -> None: + tee_a, tee_b = itertools.tee(range(10)) + self.tee_a = tee_a + self.tee_b = tee_b + + class MyClassWithSlots: + __slots__ = ["x", "y"] + + def __init__(self, x: int) -> None: + self.x = x + self.y = "y" + + class MyPydantic(BaseModel): + foo: str + bar: int + baz: dict + + @dataclasses.dataclass + class MyDataclass: + foo: str + bar: int + + def something(self) -> None: + pass + + class MyEnum(str, Enum): + FOO = "foo" + BAR = "bar" + + class ClassWithFakeJson: + def json(self): + raise ValueError("This should not be called") + + def to_json(self) -> dict: + return {"foo": "bar"} + + @dataclasses_json.dataclass_json + @dataclasses.dataclass + class Person: + name: str + + @attr.dataclass + class AttrDict: + foo: str = attr.ib() + bar: int + + uid = uuid.uuid4() + current_time = datetime.now() + + class NestedClass: + __slots__ = ["person", "lock"] + + def __init__(self) -> None: + self.person = Person(name="foo") + self.lock = [threading.Lock()] + + def __deepcopy__(self, memo: Optional[dict] = None) -> Any: + cls = type(self) + m = cls.__new__(cls) + setattr(m, "__dict__", copy.deepcopy(self.__dict__, memo=memo)) + + class CyclicClass: + def __init__(self) -> None: + self.cyclic = self + + def __repr__(self) -> str: + return "SoCyclic" + + class CyclicClass2: + def __init__(self) -> None: + self.cyclic: Any = None + self.other: Any = None + + def __repr__(self) -> str: + return "SoCyclic2" + + cycle_2 = CyclicClass2() + cycle_2.cyclic = CyclicClass2() + cycle_2.cyclic.other = cycle_2 + + class MyNamedTuple(NamedTuple): + foo: str + bar: int + + my_dict = { + "uid": uid, + "time": current_time, + "adict": {"foo": "bar"}, + "my_class": MyClass(1), + "class_with_tee": ClassWithTee(), + "my_slotted_class": MyClassWithSlots(1), + "my_dataclass": MyDataclass("foo", 1), + "my_enum": MyEnum.FOO, + "my_pydantic": MyPydantic(foo="foo", bar=1, baz={"foo": "bar"}), + "person": Person(name="foo"), + "a_bool": True, + "a_none": None, + "a_str": "foo", + "an_int": 1, + "a_float": 1.1, + "nested_class": NestedClass(), + "attr_dict": AttrDict(foo="foo", bar=1), + "named_tuple": MyNamedTuple(foo="foo", bar=1), + "cyclic": CyclicClass(), + "cyclic2": cycle_2, + "fake_json": ClassWithFakeJson(), + } + assert ls_utils.deepish_copy(my_dict) == my_dict From 42d33c60a6da1d10a9e19c571439e164f3892dcd Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Tue, 4 Jun 2024 13:56:29 -0700 Subject: [PATCH 123/373] Limit batch size (#659) --- python/langsmith/client.py | 100 +++++++++++++++++++++++-------------- python/pyproject.toml | 2 +- 2 files changed, 64 insertions(+), 38 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index d3966f597..029badf35 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -2156,7 +2156,7 @@ def get_test_results( project_id: Optional[ID_TYPE] = None, project_name: Optional[str] = None, ) -> "pd.DataFrame": - """Read the record-level information from a test project into a Pandas DF. + """Read the record-level information from an experiment into a Pandas DF. Note: this will fetch whatever data exists in the DB. Results are not immediately available in the DB upon evaluation run completion. @@ -2166,24 +2166,47 @@ def get_test_results( pd.DataFrame A dataframe containing the test results. """ + from concurrent.futures import ThreadPoolExecutor, as_completed # type: ignore + import pandas as pd # type: ignore runs = self.list_runs( - project_id=project_id, project_name=project_name, is_root=True + project_id=project_id, + project_name=project_name, + is_root=True, + select=[ + "id", + "reference_example_id", + "inputs", + "outputs", + "error", + "feedback_stats", + "start_time", + "end_time", + ], ) - results = [] + results: list[dict] = [] example_ids = [] - for r in runs: - row = { - "example_id": r.reference_example_id, - **{f"input.{k}": v for k, v in r.inputs.items()}, - **{f"outputs.{k}": v for k, v in (r.outputs or {}).items()}, - } - if r.feedback_stats: - for k, v in r.feedback_stats.items(): - row[f"feedback.{k}"] = v.get("avg") - row.update( + + def fetch_examples(batch): + examples = self.list_examples(example_ids=batch) + return [ { + "example_id": example.id, + **{f"reference.{k}": v for k, v in (example.outputs or {}).items()}, + } + for example in examples + ] + + batch_size = 50 + cursor = 0 + with ThreadPoolExecutor() as executor: + futures = [] + for r in runs: + row = { + "example_id": r.reference_example_id, + **{f"input.{k}": v for k, v in r.inputs.items()}, + **{f"outputs.{k}": v for k, v in (r.outputs or {}).items()}, "execution_time": ( (r.end_time - r.start_time).total_seconds() if r.end_time @@ -2192,32 +2215,35 @@ def get_test_results( "error": r.error, "id": r.id, } - ) - if r.reference_example_id: - example_ids.append(r.reference_example_id) - results.append(row) - result = pd.DataFrame(results).set_index("example_id") - batch_size = 100 - example_outputs = [] - for batch in [ - example_ids[i : i + batch_size] - for i in range(0, len(example_ids), batch_size) - ]: - for example in self.list_examples(example_ids=batch): - example_outputs.append( - { - "example_id": example.id, - **{ - f"reference.{k}": v - for k, v in (example.outputs or {}).items() - }, - } - ) + if r.feedback_stats: + row.update( + { + f"feedback.{k}": v.get("avg") + for k, v in r.feedback_stats.items() + } + ) + if r.reference_example_id: + example_ids.append(r.reference_example_id) + if len(results) % batch_size == 0: + # Ensure not empty + if batch := example_ids[cursor : cursor + batch_size]: + futures.append(executor.submit(fetch_examples, batch)) + cursor += batch_size + results.append(row) + + # Handle any remaining examples + if example_ids[cursor:]: + futures.append(executor.submit(fetch_examples, example_ids[cursor:])) + result_df = pd.DataFrame(results).set_index("example_id") + example_outputs = [ + output for future in as_completed(futures) for output in future.result() + ] if example_outputs: - df = pd.DataFrame(example_outputs).set_index("example_id") - result = df.merge(result, left_index=True, right_index=True) + example_df = pd.DataFrame(example_outputs).set_index("example_id") + result_df = example_df.merge(result_df, left_index=True, right_index=True) + # Flatten dict columns into dot syntax for easier access - return pd.json_normalize(result.to_dict(orient="records")) + return pd.json_normalize(result_df.to_dict(orient="records")) def list_projects( self, diff --git a/python/pyproject.toml b/python/pyproject.toml index e12e09ccb..c40cd6387 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.69" +version = "0.1.70" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From b5dfb3cdc5697446f88afae4cdddddb4f3b7b82f Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:09:34 -0700 Subject: [PATCH 124/373] [Python] Improve kwarg support in @traceable (#762) Previously there was an error in signature.replace --------- Co-authored-by: mc-marcocheng <57459045+mc-marcocheng@users.noreply.github.com> Co-authored-by: McCheng --- python/langsmith/run_helpers.py | 11 ++++++++++- python/pyproject.toml | 2 +- python/tests/unit_tests/test_run_helpers.py | 21 +++++++++++++-------- 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 7167cd177..bceb82fc0 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -647,10 +647,19 @@ def generator_wrapper( if not sig.parameters.get("config"): sig = sig.replace( parameters=[ - *sig.parameters.values(), + *( + param + for param in sig.parameters.values() + if param.kind != inspect.Parameter.VAR_KEYWORD + ), inspect.Parameter( "config", inspect.Parameter.KEYWORD_ONLY, default=None ), + *( + param + for param in sig.parameters.values() + if param.kind == inspect.Parameter.VAR_KEYWORD + ), ] ) selected_wrapper.__signature__ = sig # type: ignore[attr-defined] diff --git a/python/pyproject.toml b/python/pyproject.toml index c40cd6387..96b2d224e 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.70" +version = "0.1.71" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 4170b4f2e..fb83afef4 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -178,12 +178,13 @@ def test_traceable_iterator(use_next: bool, mock_client: Client) -> None: with tracing_context(enabled=True): @traceable(client=mock_client) - def my_iterator_fn(a, b, d): + def my_iterator_fn(a, b, d, **kwargs): + assert kwargs == {"e": 5} for i in range(a + b + d): yield i expected = [0, 1, 2, 3, 4, 5] - genout = my_iterator_fn(1, 2, 3) + genout = my_iterator_fn(1, 2, 3, e=5) if use_next: results = [] while True: @@ -216,12 +217,13 @@ def filter_inputs(kwargs: dict): return {"a": "FOOOOOO", "b": kwargs["b"], "d": kwargs["d"]} @traceable(client=mock_client, process_inputs=filter_inputs) - async def my_iterator_fn(a, b, d): + async def my_iterator_fn(a, b, d, **kwargs): + assert kwargs == {"e": 5} for i in range(a + b + d): yield i expected = [0, 1, 2, 3, 4, 5] - genout = my_iterator_fn(1, 2, 3) + genout = my_iterator_fn(1, 2, 3, e=5) if use_next: results = [] async for item in genout: @@ -738,7 +740,8 @@ def _get_run(r: RunTree) -> None: def test_traceable_regular(): @traceable - def some_sync_func(query: str) -> list: + def some_sync_func(query: str, **kwargs: Any) -> list: + assert kwargs == {"a": 1, "b": 2} return [query, query] @traceable @@ -763,7 +766,7 @@ def summarize_answers(query: str, document_context: str) -> list: def my_answer( query: str, ) -> list: - expanded_terms = some_sync_func(query=query) + expanded_terms = some_sync_func(query=query, a=1, b=2) documents = some_func( queries=expanded_terms, ) @@ -818,7 +821,9 @@ def some_sync_func(query: str) -> list: return [query, query] @traceable - async def some_async_func(queries: list) -> list: + async def some_async_func(queries: list, *, required: str, **kwargs: Any) -> list: + assert required == "foo" + assert kwargs == {"a": 1, "b": 2} await asyncio.sleep(0.01) return queries @@ -844,7 +849,7 @@ async def my_answer( ) -> list: expanded_terms = some_sync_func(query=query) documents = await some_async_func( - queries=expanded_terms, + queries=expanded_terms, required="foo", a=1, b=2 ) await another_async_func(query=query) From e7f46d08406774e4a94f10c90dd236d78ecf4f18 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 5 Jun 2024 07:50:32 -0700 Subject: [PATCH 125/373] Honor client when dist tracing (#764) Closes https://github.com/langchain-ai/langsmith-sdk/issues/763 --- python/langsmith/run_helpers.py | 23 +++++++--- python/pyproject.toml | 2 +- python/tests/unit_tests/test_run_helpers.py | 48 +++++++++++++++++++++ 3 files changed, 65 insertions(+), 8 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index bceb82fc0..9dee2191d 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -702,7 +702,7 @@ def trace( outer_metadata = _METADATA.get() outer_project = _PROJECT_NAME.get() or utils.get_tracer_project() parent_run_ = _get_parent_run( - {"parent": parent, "run_tree": kwargs.get("run_tree")} + {"parent": parent, "run_tree": kwargs.get("run_tree"), "client": client} ) # Merge and set context variables @@ -953,21 +953,28 @@ def _collect_extra(extra_outer: dict, langsmith_extra: LangSmithExtra) -> dict: def _get_parent_run( - langsmith_extra: LangSmithExtra, config: Optional[dict] = None + langsmith_extra: LangSmithExtra, + config: Optional[dict] = None, ) -> Optional[run_trees.RunTree]: parent = langsmith_extra.get("parent") if isinstance(parent, run_trees.RunTree): return parent if isinstance(parent, dict): - return run_trees.RunTree.from_headers(parent) + return run_trees.RunTree.from_headers( + parent, client=langsmith_extra.get("client") + ) if isinstance(parent, str): - return run_trees.RunTree.from_dotted_order(parent) + return run_trees.RunTree.from_dotted_order( + parent, client=langsmith_extra.get("client") + ) run_tree = langsmith_extra.get("run_tree") if run_tree: return run_tree crt = get_current_run_tree() if _runtime_env.get_langchain_core_version() is not None: - if rt := run_trees.RunTree.from_runnable_config(config): + if rt := run_trees.RunTree.from_runnable_config( + config, client=langsmith_extra.get("client") + ): # Still need to break ties when alternating between traceable and # LanChain code. # Nesting: LC -> LS -> LS, we want to still use LS as the parent @@ -1004,7 +1011,10 @@ def _setup_run( run_type = container_input.get("run_type") or "chain" outer_project = _PROJECT_NAME.get() langsmith_extra = langsmith_extra or LangSmithExtra() - parent_run_ = _get_parent_run(langsmith_extra, kwargs.get("config")) + client_ = langsmith_extra.get("client", client) + parent_run_ = _get_parent_run( + {**langsmith_extra, "client": client_}, kwargs.get("config") + ) project_cv = _PROJECT_NAME.get() selected_project = ( project_cv # From parent trace @@ -1068,7 +1078,6 @@ def _setup_run( tags_ = (langsmith_extra.get("tags") or []) + (outer_tags or []) context.run(_TAGS.set, tags_) tags_ += tags or [] - client_ = langsmith_extra.get("client", client) if parent_run_ is not None: new_run = parent_run_.create_child( name=name_, diff --git a/python/pyproject.toml b/python/pyproject.toml index 96b2d224e..fb2f62b51 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.71" +version = "0.1.72" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index fb83afef4..5e044106d 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -17,6 +17,7 @@ _get_inputs, as_runnable, is_traceable_function, + trace, traceable, tracing_context, ) @@ -954,3 +955,50 @@ def child_fn(a: int, b: int) -> int: assert len(child_runs) == 1 assert child_runs[0].name == "child_fn" assert child_runs[0].inputs == {"a": 1, "b": 2} + + +def test_client_passed_when_traceable_parent(): + mock_client = _get_mock_client() + rt = RunTree(name="foo", client=mock_client) + headers = rt.to_headers() + + @traceable + def my_run(foo: str): + return {"baz": "buzz"} + + my_run(foo="bar", langsmith_extra={"parent": headers, "client": mock_client}) + for _ in range(1): + time.sleep(0.1) + if mock_client.session.request.call_count > 0: + break + assert mock_client.session.request.call_count == 1 + call = mock_client.session.request.call_args + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + assert body["post"][0]["inputs"] == {"foo": "bar"} + assert body["post"][0]["outputs"] == {"baz": "buzz"} + + +def test_client_passed_when_trace_parent(): + mock_client = _get_mock_client() + rt = RunTree(name="foo", client=mock_client) + headers = rt.to_headers() + + with trace( + name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client + ) as rt: + rt.outputs["bar"] = "baz" + for _ in range(1): + time.sleep(0.1) + if mock_client.session.request.call_count > 0: + break + assert mock_client.session.request.call_count == 1 + call = mock_client.session.request.call_args + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + assert body["post"][0]["inputs"] == {"foo": "bar"} + assert body["post"][0]["outputs"] == {"bar": "baz"} From 8bb007c210d573e04571e1d58fbf56ccecbc4a95 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 5 Jun 2024 12:12:59 -0700 Subject: [PATCH 126/373] [Python] Set example creation time to experiment start time (#765) For unit testing. Also fix issue where test suite name isn't being honored --- python/langsmith/_testing.py | 12 +++++++++--- python/pyproject.toml | 2 +- python/tests/evaluation/test_evaluation.py | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/python/langsmith/_testing.py b/python/langsmith/_testing.py index 5e03af80f..c808051cb 100644 --- a/python/langsmith/_testing.py +++ b/python/langsmith/_testing.py @@ -409,10 +409,13 @@ def experiment(self): @classmethod def from_test( - cls, client: Optional[ls_client.Client], func: Callable + cls, + client: Optional[ls_client.Client], + func: Callable, + test_suite_name: Optional[str] = None, ) -> _LangSmithTestSuite: client = client or ls_client.Client() - test_suite_name = _get_test_suite_name(func) + test_suite_name = test_suite_name or _get_test_suite_name(func) with cls._lock: if not cls._instances: cls._instances = {} @@ -496,6 +499,7 @@ def _sync_example( outputs=outputs_, dataset_id=self.id, metadata=metadata, + created_at=self._experiment.start_time, ) if example.modified_at: self.update_version(example.modified_at) @@ -531,7 +535,9 @@ def _ensure_example( if output_keys: for k in output_keys: outputs[k] = inputs.pop(k, None) - test_suite = _LangSmithTestSuite.from_test(client, func) + test_suite = _LangSmithTestSuite.from_test( + client, func, langtest_extra.get("test_suite_name") + ) example_id, example_name = _get_id(func, inputs, test_suite.id) example_id = langtest_extra["id"] or example_id test_suite.sync_example( diff --git a/python/pyproject.toml b/python/pyproject.toml index fb2f62b51..7004d9eae 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.72" +version = "0.1.73" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/evaluation/test_evaluation.py b/python/tests/evaluation/test_evaluation.py index 9f93712cf..a6d78db32 100644 --- a/python/tests/evaluation/test_evaluation.py +++ b/python/tests/evaluation/test_evaluation.py @@ -136,7 +136,7 @@ def test_bar_parametrized(x, y, z): return {"z": x + y} -@unit +@unit(test_suite_name="tests.evaluation.test_evaluation::test_foo_async_parametrized") @pytest.mark.parametrize("x, y", [(1, 2), (2, 3)]) async def test_foo_async_parametrized(x, y): await asyncio.sleep(0.1) From 88ddd2a6b8c81e9782a7ee093d9f5daacb3379a3 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 5 Jun 2024 21:20:18 -0700 Subject: [PATCH 127/373] [Python] s/unit/test/g (#766) Most times, people use this decorator for integration-like testing. Better to keep it generic as `@test` We'll keep `@unit` around for backwards compat --- python/langsmith/__init__.py | 15 +++++--- python/langsmith/_expect.py | 8 ++--- python/langsmith/_testing.py | 36 ++++++++++--------- python/tests/evaluation/test_evaluation.py | 20 +++++------ .../tests/external/test_instructor_evals.py | 4 +-- 5 files changed, 47 insertions(+), 36 deletions(-) diff --git a/python/langsmith/__init__.py b/python/langsmith/__init__.py index d53de8a51..5cc80a14c 100644 --- a/python/langsmith/__init__.py +++ b/python/langsmith/__init__.py @@ -4,7 +4,7 @@ if TYPE_CHECKING: from langsmith._expect import expect - from langsmith._testing import unit + from langsmith._testing import test, unit from langsmith.client import Client from langsmith.evaluation import aevaluate, evaluate from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator @@ -44,10 +44,11 @@ def __getattr__(name: str) -> Any: from langsmith.run_helpers import traceable return traceable - elif name == "unit": - from langsmith._testing import unit - return unit + elif name == "test": + from langsmith._testing import test + + return test elif name == "expect": from langsmith._expect import expect @@ -62,6 +63,11 @@ def __getattr__(name: str) -> Any: return aevaluate + elif name == "unit": + from langsmith._testing import unit + + return unit + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") @@ -74,6 +80,7 @@ def __getattr__(name: str) -> Any: "traceable", "trace", "unit", + "test", "expect", "evaluate", "aevaluate", diff --git a/python/langsmith/_expect.py b/python/langsmith/_expect.py index 75faa3f19..fe459e409 100644 --- a/python/langsmith/_expect.py +++ b/python/langsmith/_expect.py @@ -1,14 +1,14 @@ """Make approximate assertions as "expectations" on test results. -This module is designed to be used within test cases decorated with the `@unit` decorator +This module is designed to be used within test cases decorated with the `@test` decorator It allows you to log scores about a test case and optionally make assertions that log as "expectation" feedback to LangSmith. Example usage: - from langsmith import expect, unit + from langsmith import expect, test - @unit + @test def test_output_semantically_close(): response = oai_client.chat.completions.create( model="gpt-3.5-turbo", @@ -37,7 +37,7 @@ def test_output_semantically_close(): # Or using a custom check expect.value(response_txt).against(lambda x: "Hello" in x) - # You can even use this for basic metric logging within unit tests + # You can even use this for basic metric logging within tests expect.score(0.8) expect.score(0.7, key="similarity").to_be_greater_than(0.7) diff --git a/python/langsmith/_testing.py b/python/langsmith/_testing.py index c808051cb..42cec872b 100644 --- a/python/langsmith/_testing.py +++ b/python/langsmith/_testing.py @@ -40,13 +40,13 @@ class SkipException(Exception): # type: ignore[no-redef] @overload -def unit( +def test( func: Callable, ) -> Callable: ... @overload -def unit( +def test( *, id: Optional[uuid.UUID] = None, output_keys: Optional[Sequence[str]] = None, @@ -55,8 +55,8 @@ def unit( ) -> Callable[[Callable], Callable]: ... -def unit(*args: Any, **kwargs: Any) -> Callable: - """Create a unit test case in LangSmith. +def test(*args: Any, **kwargs: Any) -> Callable: + """Create a test case in LangSmith. This decorator is used to mark a function as a test case for LangSmith. It ensures that the necessary example data is created and associated with the test function. @@ -90,9 +90,9 @@ def unit(*args: Any, **kwargs: Any) -> Callable: without re-executing the code. Requires the 'langsmith[vcr]' package. Example: - For basic usage, simply decorate a test function with `@unit`: + For basic usage, simply decorate a test function with `@test`: - >>> @unit + >>> @test ... def test_addition(): ... assert 3 + 4 == 7 @@ -106,7 +106,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: ... def generate_numbers(): ... return 3, 4 - >>> @unit + >>> @test ... def test_nested(): ... # Traced code will be included in the test case ... a, b = generate_numbers() @@ -128,7 +128,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: >>> import openai >>> from langsmith.wrappers import wrap_openai >>> oai_client = wrap_openai(openai.Client()) - >>> @unit + >>> @test ... def test_openai_says_hello(): ... # Traced code will be included in the test case ... response = oai_client.chat.completions.create( @@ -144,7 +144,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: `expect` to score and make approximate assertions on your results. >>> from langsmith import expect - >>> @unit + >>> @test ... def test_output_semantically_close(): ... response = oai_client.chat.completions.create( ... model="gpt-3.5-turbo", @@ -168,7 +168,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: ... # And then log a pass/fail score to LangSmith ... ).to_be_less_than(1.0) - The `@unit` decorator works natively with pytest fixtures. + The `@test` decorator works natively with pytest fixtures. The values will populate the "inputs" of the corresponding example in LangSmith. >>> import pytest @@ -176,7 +176,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: ... def some_input(): ... return "Some input" >>> - >>> @unit + >>> @test ... def test_with_fixture(some_input: str): ... assert "input" in some_input >>> @@ -184,7 +184,7 @@ def unit(*args: Any, **kwargs: Any) -> Callable: You can still use pytest.parametrize() as usual to run multiple test cases using the same test function. - >>> @unit(output_keys=["expected"]) + >>> @test(output_keys=["expected"]) ... @pytest.mark.parametrize( ... "a, b, expected", ... [ @@ -198,18 +198,18 @@ def unit(*args: Any, **kwargs: Any) -> Callable: By default, each test case will be assigned a consistent, unique identifier based on the function name and module. You can also provide a custom identifier using the `id` argument: - >>> @unit(id="1a77e4b5-1d38-4081-b829-b0442cf3f145") + >>> @test(id="1a77e4b5-1d38-4081-b829-b0442cf3f145") ... def test_multiplication(): ... assert 3 * 4 == 12 - By default, all unit test inputs are saved as "inputs" to a dataset. + By default, all test test inputs are saved as "inputs" to a dataset. You can specify the `output_keys` argument to persist those keys within the dataset's "outputs" fields. >>> @pytest.fixture ... def expected_output(): ... return "input" - >>> @unit(output_keys=["expected_output"]) + >>> @test(output_keys=["expected_output"]) ... def test_with_expected_output(some_input: str, expected_output: str): ... assert expected_output in some_input @@ -299,7 +299,7 @@ def _get_test_suite( return client.read_dataset(dataset_name=test_suite_name) else: repo = ls_env.get_git_info().get("remote_url") or "" - description = "Unit test suite" + description = "Test suite" if repo: description += f" for {repo}" return client.create_dataset( @@ -675,3 +675,7 @@ async def _test(): cache_path, ignore_hosts=[test_suite.client.api_url] ): await _test() + + +# For backwards compatibility +unit = test diff --git a/python/tests/evaluation/test_evaluation.py b/python/tests/evaluation/test_evaluation.py index a6d78db32..ecb371806 100644 --- a/python/tests/evaluation/test_evaluation.py +++ b/python/tests/evaluation/test_evaluation.py @@ -3,7 +3,7 @@ import pytest -from langsmith import Client, aevaluate, evaluate, expect, unit +from langsmith import Client, aevaluate, evaluate, expect, test from langsmith.schemas import Example, Run @@ -95,7 +95,7 @@ async def apredict(inputs: dict) -> dict: assert count == 2 -@unit +@test def test_foo(): expect(3 + 4).to_equal(7) @@ -110,33 +110,33 @@ def expected_output(): return "input" -@unit(output_keys=["expected_output"]) +@test(output_keys=["expected_output"]) def test_bar(some_input: str, expected_output: str): expect(some_input).to_contain(expected_output) -@unit +@test async def test_baz(): await asyncio.sleep(0.1) expect(3 + 4).to_equal(7) return 7 -@unit +@test @pytest.mark.parametrize("x, y", [(1, 2), (2, 3)]) def test_foo_parametrized(x, y): expect(x + y).to_be_greater_than(0) return x + y -@unit(output_keys=["z"]) +@test(output_keys=["z"]) @pytest.mark.parametrize("x, y, z", [(1, 2, 3), (2, 3, 5)]) def test_bar_parametrized(x, y, z): expect(x + y).to_equal(z) return {"z": x + y} -@unit(test_suite_name="tests.evaluation.test_evaluation::test_foo_async_parametrized") +@test(test_suite_name="tests.evaluation.test_evaluation::test_foo_async_parametrized") @pytest.mark.parametrize("x, y", [(1, 2), (2, 3)]) async def test_foo_async_parametrized(x, y): await asyncio.sleep(0.1) @@ -144,7 +144,7 @@ async def test_foo_async_parametrized(x, y): return x + y -@unit(output_keys=["z"]) +@test(output_keys=["z"]) @pytest.mark.parametrize("x, y, z", [(1, 2, 3), (2, 3, 5)]) async def test_bar_async_parametrized(x, y, z): await asyncio.sleep(0.1) @@ -152,11 +152,11 @@ async def test_bar_async_parametrized(x, y, z): return {"z": x + y} -@unit +@test def test_pytest_skip(): pytest.skip("Skip this test") -@unit +@test async def test_async_pytest_skip(): pytest.skip("Skip this test") diff --git a/python/tests/external/test_instructor_evals.py b/python/tests/external/test_instructor_evals.py index d90a53019..c56e06d92 100644 --- a/python/tests/external/test_instructor_evals.py +++ b/python/tests/external/test_instructor_evals.py @@ -8,7 +8,7 @@ from openai import AsyncOpenAI from pydantic import BaseModel -from langsmith import unit +from langsmith import test class Models(str, Enum): @@ -58,7 +58,7 @@ class ClassifySpam(BaseModel): @pytest.mark.asyncio_cooperative -@unit() +@test() @pytest.mark.parametrize("client, data", d[:3]) async def test_classification(client, data): input, expected = data From 679585ca4f38fccb6a0e7d20ac873bd69bb37921 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 5 Jun 2024 21:29:33 -0700 Subject: [PATCH 128/373] [Python] Add to top-level imports (#767) Add tracing_context, get_current_tracing_context, and get_current_run_tree to top-level imports --- python/langsmith/__init__.py | 25 ++++++++++++++++++++++++- python/pyproject.toml | 2 +- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/python/langsmith/__init__.py b/python/langsmith/__init__.py index 5cc80a14c..0e0325d13 100644 --- a/python/langsmith/__init__.py +++ b/python/langsmith/__init__.py @@ -8,7 +8,13 @@ from langsmith.client import Client from langsmith.evaluation import aevaluate, evaluate from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator - from langsmith.run_helpers import trace, traceable + from langsmith.run_helpers import ( + get_current_run_tree, + get_tracing_context, + trace, + traceable, + tracing_context, + ) from langsmith.run_trees import RunTree @@ -62,6 +68,20 @@ def __getattr__(name: str) -> Any: from langsmith.evaluation import aevaluate return aevaluate + elif name == "tracing_context": + from langsmith.run_helpers import tracing_context + + return tracing_context + + elif name == "get_tracing_context": + from langsmith.run_helpers import get_tracing_context + + return get_tracing_context + + elif name == "get_current_run_tree": + from langsmith.run_helpers import get_current_run_tree + + return get_current_run_tree elif name == "unit": from langsmith._testing import unit @@ -84,4 +104,7 @@ def __getattr__(name: str) -> Any: "expect", "evaluate", "aevaluate", + "tracing_context", + "get_tracing_context", + "get_current_run_tree", ] diff --git a/python/pyproject.toml b/python/pyproject.toml index 7004d9eae..9e7061a3a 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.73" +version = "0.1.74" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From aa8e174a65898abdea7990be7faba97d019acc8a Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Thu, 6 Jun 2024 13:00:11 -0700 Subject: [PATCH 129/373] [Python] Check if mid-trace (#769) for "tracing is enabled". This isn't relevant for `@traceable` only tracing, but comes into play if you're crossing over to langchain. Also fix a bug in `trace` context manager where it would be persisting the run tree inferred by headers beyond its desired lifespan --- python/langsmith/run_helpers.py | 8 +++--- python/langsmith/utils.py | 10 ++++++- python/pyproject.toml | 2 +- python/tests/unit_tests/test_client.py | 2 +- python/tests/unit_tests/test_utils.py | 39 ++++++++++++++++++++++++-- 5 files changed, 51 insertions(+), 10 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 9dee2191d..f9e3f9bb5 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -698,6 +698,7 @@ def trace( f"{sorted(kwargs.keys())}.", DeprecationWarning, ) + old_ctx = get_tracing_context() outer_tags = _TAGS.get() outer_metadata = _METADATA.get() outer_project = _PROJECT_NAME.get() or utils.get_tracer_project() @@ -739,6 +740,7 @@ def trace( new_run.post() _PARENT_RUN_TREE.set(new_run) _PROJECT_NAME.set(project_name_) + try: yield new_run except (Exception, KeyboardInterrupt, BaseException) as e: @@ -751,10 +753,8 @@ def trace( new_run.patch() raise e finally: - _PARENT_RUN_TREE.set(parent_run_) - _PROJECT_NAME.set(outer_project) - _TAGS.set(outer_tags) - _METADATA.set(outer_metadata) + # Reset the old context + _set_tracing_context(old_ctx) new_run.patch() diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 72607b8b1..2c0152e0f 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -67,11 +67,19 @@ class LangSmithConnectionError(LangSmithError): def tracing_is_enabled() -> bool: """Return True if tracing is enabled.""" - from langsmith.run_helpers import get_tracing_context + from langsmith.run_helpers import get_current_run_tree, get_tracing_context tc = get_tracing_context() + # You can manually override the environment using context vars. + # Check that first. + # Doing this before checking the run tree lets us + # disable a branch within a trace. if tc["enabled"] is not None: return tc["enabled"] + # Next check if we're mid-trace + if get_current_run_tree(): + return True + # Finally, check the global environment var_result = get_env_var("TRACING_V2", default=get_env_var("TRACING", default="")) return var_result == "true" diff --git a/python/pyproject.toml b/python/pyproject.toml index 9e7061a3a..9b8436ecd 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.74" +version = "0.1.75" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index 5e07ac147..f08e17864 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -343,7 +343,7 @@ def test_create_run_mutate() -> None: trace_id=id_, dotted_order=run_dict["dotted_order"], ) - for _ in range(7): + for _ in range(10): time.sleep(0.1) # Give the background thread time to stop payloads = [ json.loads(call[2]["data"]) diff --git a/python/tests/unit_tests/test_utils.py b/python/tests/unit_tests/test_utils.py index 29e4b4720..8fa74992a 100644 --- a/python/tests/unit_tests/test_utils.py +++ b/python/tests/unit_tests/test_utils.py @@ -7,7 +7,7 @@ from datetime import datetime from enum import Enum from typing import Any, NamedTuple, Optional -from unittest.mock import patch +from unittest.mock import MagicMock, patch import attr import dataclasses_json @@ -15,6 +15,7 @@ from pydantic import BaseModel import langsmith.utils as ls_utils +from langsmith import Client, traceable from langsmith.run_helpers import tracing_context @@ -87,7 +88,9 @@ def test_correct_get_tracer_project(self): def test_tracing_enabled(): - with patch.dict("os.environ", {"LANGCHAIN_TRACING_V2": "false"}): + with patch.dict( + "os.environ", {"LANGCHAIN_TRACING_V2": "false", "LANGSMITH_TRACING": "false"} + ): assert not ls_utils.tracing_is_enabled() with tracing_context(enabled=True): assert ls_utils.tracing_is_enabled() @@ -97,9 +100,39 @@ def test_tracing_enabled(): assert not ls_utils.tracing_is_enabled() assert not ls_utils.tracing_is_enabled() + @traceable + def child_function(): + assert ls_utils.tracing_is_enabled() + return 1 + + @traceable + def untraced_child_function(): + assert not ls_utils.tracing_is_enabled() + return 1 + + @traceable + def parent_function(): + with patch.dict( + "os.environ", + {"LANGCHAIN_TRACING_V2": "false", "LANGSMITH_TRACING": "false"}, + ): + assert ls_utils.tracing_is_enabled() + child_function() + with tracing_context(enabled=False): + assert not ls_utils.tracing_is_enabled() + return untraced_child_function() + + with patch.dict( + "os.environ", {"LANGCHAIN_TRACING_V2": "true", "LANGSMITH_TRACING": "true"} + ): + mock_client = MagicMock(spec=Client) + parent_function(langsmith_extra={"client": mock_client}) + def test_tracing_disabled(): - with patch.dict("os.environ", {"LANGCHAIN_TRACING_V2": "true"}): + with patch.dict( + "os.environ", {"LANGCHAIN_TRACING_V2": "true", "LANGSMITH_TRACING": "true"} + ): assert ls_utils.tracing_is_enabled() with tracing_context(enabled=False): assert not ls_utils.tracing_is_enabled() From 9c6316a42182a8241ff5de2b3c5acaa5c568a327 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 7 Jun 2024 14:59:00 +0200 Subject: [PATCH 130/373] feat(js): add `withRunTree` method for updating context, improve types for `toHeader` and `fromHeaders` --- js/src/run_trees.ts | 33 +++++++-- js/src/singletons/traceable.ts | 16 ++++- js/src/tests/traceable.test.ts | 119 ++++++++++++++++++++++++++++++++- js/src/traceable.ts | 1 + 4 files changed, 160 insertions(+), 9 deletions(-) diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index 462ed7171..b1219d819 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -92,6 +92,11 @@ interface LangChainTracerLike extends TracerLike { client: Client; } +interface HeadersLike { + get(name: string): string | null; + set(name: string, value: string): void; +} + /** * Baggage header information */ @@ -405,10 +410,18 @@ export class RunTree implements BaseRun { } static fromHeaders( - headers: Record, + headers: Record | HeadersLike, inheritArgs?: RunTreeConfig ): RunTree | undefined { - const headerTrace = headers["langsmith-trace"]; + const rawHeaders: Record = + "get" in headers && typeof headers.get === "function" + ? { + "langsmith-trace": headers.get("langsmith-trace"), + baggage: headers.get("baggage"), + } + : (headers as Record); + + const headerTrace = rawHeaders["langsmith-trace"]; if (!headerTrace || typeof headerTrace !== "string") return undefined; const parentDottedOrder = headerTrace.trim(); @@ -429,8 +442,8 @@ export class RunTree implements BaseRun { dotted_order: parentDottedOrder, }; - if (headers["baggage"]) { - const baggage = Baggage.fromHeader(headers["baggage"]); + if (rawHeaders["baggage"] && typeof rawHeaders["baggage"] === "string") { + const baggage = Baggage.fromHeader(rawHeaders["baggage"]); config.metadata = baggage.metadata; config.tags = baggage.tags; } @@ -438,11 +451,19 @@ export class RunTree implements BaseRun { return new RunTree(config); } - toHeaders() { - return { + toHeaders(headers?: HeadersLike) { + const result = { "langsmith-trace": this.dotted_order, baggage: new Baggage(this.extra?.metadata, this.tags).toHeader(), }; + + if (headers) { + for (const [key, value] of Object.entries(result)) { + headers.set(key, value); + } + } + + return result; } } diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts index 14c9168dc..c750bc8ac 100644 --- a/js/src/singletons/traceable.ts +++ b/js/src/singletons/traceable.ts @@ -51,7 +51,7 @@ export const getCurrentRunTree = () => { [ "Could not get the current run tree.", "", - "Please make sure you are calling this method within a traceable function.", + "Please make sure you are calling this method within a traceable function or the tracing is enabled.", ].join("\n") ); } @@ -59,6 +59,20 @@ export const getCurrentRunTree = () => { return runTree; }; +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function withRunTree any>( + runTree: RunTree, + fn: Fn +): Promise>> { + const storage = AsyncLocalStorageProviderSingleton.getInstance(); + return new Promise>>((resolve, reject) => { + storage.run( + runTree, + () => void Promise.resolve(fn()).then(resolve).catch(reject) + ); + }); +} + export const ROOT = Symbol.for("langsmith:traceable:root"); export function isTraceableFunction( diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index a580ba414..efc7f57cb 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -1,5 +1,10 @@ -import type { RunTree, RunTreeConfig } from "../run_trees.js"; -import { ROOT, traceable } from "../traceable.js"; +import { RunTree, RunTreeConfig } from "../run_trees.js"; +import { + ROOT, + getCurrentRunTree, + traceable, + withRunTree, +} from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; @@ -115,6 +120,116 @@ test("passing run tree manually", async () => { }); }); +describe("distributed tracing", () => { + it("default", async () => { + const { client, callSpy } = mockClient(); + const child = traceable( + async (depth = 0): Promise => { + if (depth < 2) return child(depth + 1); + return 3; + }, + { name: "child" } + ); + + const parent = traceable( + async function parent() { + const first = await child(); + const second = await child(); + return first + second; + }, + { client, name: "parent", tracingEnabled: true } + ); + + const clientRunTree = new RunTree({ name: "client", client }); + await clientRunTree.postRun(); + + // do nothing with the client run tree + + await clientRunTree.patchRun(); + + const response = await withRunTree(clientRunTree, () => parent()); + expect(response).toBe(6); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "client:0", + "parent:1", + "child:2", + "child:3", + "child:4", + "child:5", + "child:6", + "child:7", + ], + edges: [ + ["client:0", "parent:1"], + ["parent:1", "child:2"], + ["child:2", "child:3"], + ["child:3", "child:4"], + ["parent:1", "child:5"], + ["child:5", "child:6"], + ["child:6", "child:7"], + ], + }); + }); + + it("sync function", async () => { + const { client, callSpy } = mockClient(); + const child = traceable( + async (depth = 0): Promise => { + if (depth < 2) return child(depth + 1); + return 3; + }, + { name: "child" } + ); + + const parent = traceable( + async function parent() { + console.log("parent", getCurrentRunTree()); + const first = await child(); + const second = await child(); + return first + second; + }, + { client, tracingEnabled: true } + ); + + const clientRunTree = new RunTree({ name: "client", client }); + await clientRunTree.postRun(); + await clientRunTree.patchRun(); + + let promiseOutside: Promise = Promise.resolve(); + + const response = await withRunTree(clientRunTree, () => { + promiseOutside = parent(); + }); + + expect(response).toBeUndefined(); + await promiseOutside; + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "client:0", + "parent:1", + "child:2", + "child:3", + "child:4", + "child:5", + "child:6", + "child:7", + ], + edges: [ + ["client:0", "parent:1"], + ["parent:1", "child:2"], + ["child:2", "child:3"], + ["child:3", "child:4"], + ["parent:1", "child:5"], + ["child:5", "child:6"], + ["child:6", "child:7"], + ], + }); + }); +}); + describe("async generators", () => { test("success", async () => { const { client, callSpy } = mockClient(); diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 492899d30..ee977e58f 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -583,6 +583,7 @@ export function traceable any>( export { getCurrentRunTree, isTraceableFunction, + withRunTree, ROOT, } from "./singletons/traceable.js"; From f8756fbd43b65030efd1af4a2e28fdc2bf918411 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 7 Jun 2024 15:13:26 +0200 Subject: [PATCH 131/373] Fix tests by passing the tracingEnabled parameter --- js/src/tests/traceable.test.ts | 39 +++++++++++++++++----------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index efc7f57cb..72b2d8b3f 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -131,16 +131,17 @@ describe("distributed tracing", () => { { name: "child" } ); - const parent = traceable( - async function parent() { - const first = await child(); - const second = await child(); - return first + second; - }, - { client, name: "parent", tracingEnabled: true } - ); + const parent = traceable(async function parent() { + const first = await child(); + const second = await child(); + return first + second; + }); - const clientRunTree = new RunTree({ name: "client", client }); + const clientRunTree = new RunTree({ + name: "client", + client, + tracingEnabled: true, + }); await clientRunTree.postRun(); // do nothing with the client run tree @@ -183,17 +184,17 @@ describe("distributed tracing", () => { { name: "child" } ); - const parent = traceable( - async function parent() { - console.log("parent", getCurrentRunTree()); - const first = await child(); - const second = await child(); - return first + second; - }, - { client, tracingEnabled: true } - ); + const parent = traceable(async function parent() { + const first = await child(); + const second = await child(); + return first + second; + }); - const clientRunTree = new RunTree({ name: "client", client }); + const clientRunTree = new RunTree({ + name: "client", + client, + tracingEnabled: true, + }); await clientRunTree.postRun(); await clientRunTree.patchRun(); From 5c6d59831842373d50f1fc16158904b64ff12579 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 7 Jun 2024 15:14:57 +0200 Subject: [PATCH 132/373] Fix lint --- js/src/tests/traceable.test.ts | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 72b2d8b3f..523a194b3 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -1,10 +1,5 @@ import { RunTree, RunTreeConfig } from "../run_trees.js"; -import { - ROOT, - getCurrentRunTree, - traceable, - withRunTree, -} from "../traceable.js"; +import { ROOT, traceable, withRunTree } from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; From a5e69fe9d61534d9295073f52ca530655d9fb726 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 10 Jun 2024 17:45:31 -0700 Subject: [PATCH 133/373] [Python] Keep run_type in from_runnable_config (#777) --- python/langsmith/run_trees.py | 9 ++-- python/pyproject.toml | 2 +- python/tests/unit_tests/test_run_helpers.py | 48 +++++++++++++++++++++ 3 files changed, 53 insertions(+), 6 deletions(-) diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index 9aa202bbc..4757adafb 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -339,12 +339,11 @@ def from_runnable_config( ) ) ): - if hasattr(tracer, "order_map") and cb.parent_run_id in tracer.order_map: - dotted_order = tracer.order_map[cb.parent_run_id][1] - elif ( - run := tracer.run_map.get(str(cb.parent_run_id)) - ) and run.dotted_order: + if (run := tracer.run_map.get(str(cb.parent_run_id))) and run.dotted_order: dotted_order = run.dotted_order + kwargs["run_type"] = run.run_type + elif hasattr(tracer, "order_map") and cb.parent_run_id in tracer.order_map: + dotted_order = tracer.order_map[cb.parent_run_id][1] else: return None kwargs["client"] = tracer.client diff --git a/python/pyproject.toml b/python/pyproject.toml index 9b8436ecd..5490c8694 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.75" +version = "0.1.76" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 5e044106d..d4dd361c2 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -16,6 +16,7 @@ from langsmith.run_helpers import ( _get_inputs, as_runnable, + get_current_run_tree, is_traceable_function, trace, traceable, @@ -1002,3 +1003,50 @@ def test_client_passed_when_trace_parent(): assert body["post"] assert body["post"][0]["inputs"] == {"foo": "bar"} assert body["post"][0]["outputs"] == {"bar": "baz"} + + +def test_from_runnable_config(): + try: + from langchain_core.tools import tool # type: ignore + from langchain_core.tracers.langchain import LangChainTracer # type: ignore + except ImportError: + pytest.skip("Skipping test that requires langchain") + + gc_run_id = uuid.uuid4() + + @tool + def my_grandchild_tool(text: str, callbacks: Any = None) -> str: + """Foo.""" + lct: LangChainTracer = callbacks.handlers[0] + assert str(gc_run_id) in lct.run_map + run = lct.run_map[str(gc_run_id)] + assert run.name == "my_grandchild_tool" + assert run.run_type == "tool" + parent_run = lct.run_map[str(run.parent_run_id)] + assert parent_run + assert parent_run.name == "my_traceable" + assert parent_run.run_type == "retriever" + grandparent_run = lct.run_map[str(parent_run.parent_run_id)] + assert grandparent_run + assert grandparent_run.name == "my_tool" + assert grandparent_run.run_type == "tool" + return text + + @traceable(run_type="retriever") + def my_traceable(text: str) -> str: + rt = get_current_run_tree() + assert rt + assert rt.run_type == "retriever" + assert rt.parent_run_id + assert rt.parent_run + assert rt.parent_run.run_type == "tool" + return my_grandchild_tool.invoke({"text": text}, {"run_id": gc_run_id}) + + @tool + def my_tool(text: str) -> str: + """Foo.""" + return my_traceable(text) + + mock_client = _get_mock_client() + tracer = LangChainTracer(client=mock_client) + my_tool.invoke({"text": "hello"}, {"callbacks": [tracer]}) From d67ccc08522b084868cbf2abc3031b2a54ac482e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Jun 2024 05:59:13 +0000 Subject: [PATCH 134/373] chore(deps): bump braces from 3.0.2 to 3.0.3 in /js Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3. - [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3) --- updated-dependencies: - dependency-name: braces dependency-type: indirect ... Signed-off-by: dependabot[bot] --- js/yarn.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/js/yarn.lock b/js/yarn.lock index 1b849910a..8e4cee5e8 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -1890,11 +1890,11 @@ brace-expansion@^1.1.7: concat-map "0.0.1" braces@^3.0.2: - version "3.0.2" - resolved "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: - fill-range "^7.0.1" + fill-range "^7.1.1" browserslist@^4.21.3, browserslist@^4.21.5: version "4.21.7" @@ -2534,10 +2534,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" From 598f943185d1b2d31e484b00d3993b6d4ef8ebd7 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 11 Jun 2024 13:39:06 +0200 Subject: [PATCH 135/373] fix(js): add support for evaluation results --- js/package.json | 2 +- js/src/client.ts | 76 +++++++++++++++++++++------------- js/src/evaluation/evaluator.ts | 24 ++++++++--- 3 files changed, 68 insertions(+), 34 deletions(-) diff --git a/js/package.json b/js/package.json index c4ae119d2..b2972645d 100644 --- a/js/package.json +++ b/js/package.json @@ -248,4 +248,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} diff --git a/js/src/client.ts b/js/src/client.ts index 995d59668..80a7d3caf 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -41,6 +41,7 @@ import { } from "./evaluation/evaluator.js"; import { __version__ } from "./index.js"; import { assertUuid } from "./utils/_uuid.js"; +import { warnOnce } from "./utils/warn.js"; interface ClientConfig { apiUrl?: string; @@ -2233,6 +2234,9 @@ export class Client { return result; } + /** + * @deprecated This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead. + */ public async evaluateRun( run: Run | string, evaluator: RunEvaluator, @@ -2246,6 +2250,9 @@ export class Client { referenceExample?: Example; } = { loadChildRuns: false } ): Promise { + warnOnce( + "This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead." + ); let run_: Run; if (typeof run === "string") { run_ = await this.readRun(run, { loadChildRuns }); @@ -2260,21 +2267,15 @@ export class Client { ) { referenceExample = await this.readExample(run_.reference_example_id); } + const feedbackResult = await evaluator.evaluateRun(run_, referenceExample); - let sourceInfo_ = sourceInfo ?? {}; - if (feedbackResult.evaluatorInfo) { - sourceInfo_ = { ...sourceInfo_, ...feedbackResult.evaluatorInfo }; - } - const runId = feedbackResult.targetRunId ?? run_.id; - return await this.createFeedback(runId, feedbackResult.key, { - score: feedbackResult?.score, - value: feedbackResult?.value, - comment: feedbackResult?.comment, - correction: feedbackResult?.correction, - sourceInfo: sourceInfo_, - feedbackSourceType: "model", - sourceRunId: feedbackResult?.sourceRunId, - }); + const [_, feedbacks] = await this._logEvaluationFeedback( + feedbackResult, + run_, + sourceInfo + ); + + return feedbacks[0]; } public async createFeedback( @@ -2599,14 +2600,17 @@ export class Client { return results_; } - public async logEvaluationFeedback( + async _logEvaluationFeedback( evaluatorResponse: EvaluationResult | EvaluationResults, run?: Run, sourceInfo?: { [key: string]: any } - ): Promise { - const results: Array = + ): Promise<[results: EvaluationResult[], feedbacks: Feedback[]]> { + const evalResults: Array = this._selectEvalResults(evaluatorResponse); - for (const res of results) { + + const feedbacks: Feedback[] = []; + + for (const res of evalResults) { let sourceInfo_ = sourceInfo || {}; if (res.evaluatorInfo) { sourceInfo_ = { ...res.evaluatorInfo, ...sourceInfo_ }; @@ -2618,17 +2622,33 @@ export class Client { runId_ = run.id; } - await this.createFeedback(runId_, res.key, { - score: res.score, - value: res.value, - comment: res.comment, - correction: res.correction, - sourceInfo: sourceInfo_, - sourceRunId: res.sourceRunId, - feedbackConfig: res.feedbackConfig as FeedbackConfig | undefined, - feedbackSourceType: "model", - }); + feedbacks.push( + await this.createFeedback(runId_, res.key, { + score: res.score, + value: res.value, + comment: res.comment, + correction: res.correction, + sourceInfo: sourceInfo_, + sourceRunId: res.sourceRunId, + feedbackConfig: res.feedbackConfig as FeedbackConfig | undefined, + feedbackSourceType: "model", + }) + ); } + + return [evalResults, feedbacks]; + } + + public async logEvaluationFeedback( + evaluatorResponse: EvaluationResult | EvaluationResults, + run?: Run, + sourceInfo?: { [key: string]: any } + ): Promise { + const [results] = await this._logEvaluationFeedback( + evaluatorResponse, + run, + sourceInfo + ); return results; } } diff --git a/js/src/evaluation/evaluator.ts b/js/src/evaluation/evaluator.ts index a9b6e096b..92777082b 100644 --- a/js/src/evaluation/evaluator.ts +++ b/js/src/evaluation/evaluator.ts @@ -87,7 +87,7 @@ export interface RunEvaluator { run: Run, example?: Example, options?: Partial - ): Promise; + ): Promise; } export type RunEvaluatorLike = @@ -114,12 +114,26 @@ export class DynamicRunEvaluator any> }) as Func; } + private isEvaluationResults(x: unknown): x is EvaluationResults { + return ( + typeof x === "object" && + x != null && + "results" in x && + Array.isArray(x.results) && + x.results.length > 0 + ); + } + private coerceEvaluationResults( results: Record | EvaluationResults, sourceRunId: string - ): EvaluationResult { - if ("results" in results) { - throw new Error("EvaluationResults not supported yet."); + ): EvaluationResult | EvaluationResults { + if (this.isEvaluationResults(results)) { + return { + results: results.results.map((r) => + this.coerceEvaluationResult(r, sourceRunId, false) + ), + }; } return this.coerceEvaluationResult( @@ -162,7 +176,7 @@ export class DynamicRunEvaluator any> run: Run, example?: Example, options?: Partial - ): Promise { + ): Promise { const sourceRunId = uuidv4(); const metadata: Record = { targetRunId: run.id, From 34251e3b8a42830be3336770e2a5760ccc3d55ed Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 11 Jun 2024 13:44:19 +0200 Subject: [PATCH 136/373] Update types, add tests --- js/src/evaluation/_runner.ts | 7 +++- js/src/tests/evaluate.int.test.ts | 68 +++++++++++++++++++++++-------- 2 files changed, 56 insertions(+), 19 deletions(-) diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 99fccad57..69d71ebf7 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -42,8 +42,11 @@ type SummaryEvaluatorT = // Row-level evaluator type EvaluatorT = | RunEvaluator - | ((run: Run, example?: Example) => EvaluationResult) - | ((run: Run, example?: Example) => Promise); + | ((run: Run, example?: Example) => EvaluationResult | EvaluationResults) + | (( + run: Run, + example?: Example + ) => Promise); interface _ForwardResults { run: Run; diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index f99de57a7..b61fbde39 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -1,4 +1,7 @@ -import { EvaluationResult } from "../evaluation/evaluator.js"; +import { + EvaluationResult, + EvaluationResults, +} from "../evaluation/evaluator.js"; import { evaluate } from "../evaluation/_runner.js"; import { Example, Run, TracerSession } from "../schemas.js"; import { Client } from "../index.js"; @@ -29,16 +32,16 @@ beforeAll(async () => { afterAll(async () => { const client = new Client(); - await client.deleteDataset({ - datasetName: TESTING_DATASET_NAME, - }); - try { - await client.deleteDataset({ - datasetName: "my_splits_ds2", - }); - } catch { - //pass - } + // await client.deleteDataset({ + // datasetName: TESTING_DATASET_NAME, + // }); + // try { + // await client.deleteDataset({ + // datasetName: "my_splits_ds2", + // }); + // } catch { + // //pass + // } }); test("evaluate can evaluate", async () => { @@ -361,12 +364,8 @@ test("can pass multiple evaluators", async () => { }); }; const evaluators = [ - { - evaluateRun: customEvaluatorOne, - }, - { - evaluateRun: customEvaluatorTwo, - }, + { evaluateRun: customEvaluatorOne }, + { evaluateRun: customEvaluatorTwo }, ]; const evalRes = await evaluate(targetFunc, { data: TESTING_DATASET_NAME, @@ -736,3 +735,38 @@ test("evaluate can accept array of examples", async () => { expect(firstEvalResults.evaluationResults.results).toHaveLength(1); expect(receivedCommentStrings).toEqual(expectedCommentStrings); }); + +test.only("evaluate accepts evaluators which return multiple feedback keys", async () => { + const targetFunc = (input: Record) => { + console.log("__input__", input); + return { foo: input.input + 1 }; + }; + + const customEvaluator = ( + run: Run, + example?: Example + ): Promise => { + return Promise.resolve({ + results: [ + { + key: "first-key", + score: 1, + comment: `Run: ${run.id} Example: ${example?.id}`, + }, + { + key: "second-key", + score: 2, + comment: `Run: ${run.id} Example: ${example?.id}`, + }, + ], + }); + }; + + const evalRes = await evaluate(targetFunc, { + data: TESTING_DATASET_NAME, + evaluators: [customEvaluator], + description: "evaluate can evaluate with custom evaluators", + }); + + console.log(evalRes) +}); From 078625ef568f91d86b10452ece7a5dbb954e40c0 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 11 Jun 2024 13:48:18 +0200 Subject: [PATCH 137/373] Actually have some asserts --- js/src/tests/evaluate.int.test.ts | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index b61fbde39..82ccf32c4 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -32,16 +32,16 @@ beforeAll(async () => { afterAll(async () => { const client = new Client(); - // await client.deleteDataset({ - // datasetName: TESTING_DATASET_NAME, - // }); - // try { - // await client.deleteDataset({ - // datasetName: "my_splits_ds2", - // }); - // } catch { - // //pass - // } + await client.deleteDataset({ + datasetName: TESTING_DATASET_NAME, + }); + try { + await client.deleteDataset({ + datasetName: "my_splits_ds2", + }); + } catch { + //pass + } }); test("evaluate can evaluate", async () => { @@ -738,7 +738,6 @@ test("evaluate can accept array of examples", async () => { test.only("evaluate accepts evaluators which return multiple feedback keys", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1 }; }; @@ -768,5 +767,11 @@ test.only("evaluate accepts evaluators which return multiple feedback keys", asy description: "evaluate can evaluate with custom evaluators", }); - console.log(evalRes) + expect(evalRes.results).toHaveLength(2); + + const comment = `Run: ${evalRes.results[0].run.id} Example: ${evalRes.results[0].example.id}`; + expect(evalRes.results[0].evaluationResults.results).toMatchObject([ + { key: "first-key", score: 1, comment }, + { key: "second-key", score: 2, comment }, + ]); }); From 6ec43d72e68b4372571047772300523723b208f3 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 11 Jun 2024 13:48:26 +0200 Subject: [PATCH 138/373] Remove only flag --- js/src/tests/evaluate.int.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index 82ccf32c4..733b68a6d 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -736,7 +736,7 @@ test("evaluate can accept array of examples", async () => { expect(receivedCommentStrings).toEqual(expectedCommentStrings); }); -test.only("evaluate accepts evaluators which return multiple feedback keys", async () => { +test("evaluate accepts evaluators which return multiple feedback keys", async () => { const targetFunc = (input: Record) => { return { foo: input.input + 1 }; }; From 6441bfeafdc54d0c5db5616eec48bc9424395277 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Tue, 11 Jun 2024 17:03:51 -0700 Subject: [PATCH 139/373] [Python] Handle 429 (#781) In the request_with_retries method --- python/langsmith/client.py | 56 +++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 029badf35..424cbab31 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -746,7 +746,10 @@ def request_with_retries( ] retry_on_: Tuple[Type[BaseException], ...] = ( *(retry_on or []), - *(ls_utils.LangSmithConnectionError, ls_utils.LangSmithAPIError), + *( + ls_utils.LangSmithConnectionError, + ls_utils.LangSmithAPIError, + ), ) to_ignore_: Tuple[Type[BaseException], ...] = (*(to_ignore or ()),) response = None @@ -840,13 +843,29 @@ def request_with_retries( if response is not None: logger.debug("Passing on exception %s", e) return response - # Else we still raise an error + except ls_utils.LangSmithRateLimitError: + if idx + 1 == stop_after_attempt: + raise + if response is not None: + try: + retry_after = float(response.headers.get("retry-after", "30")) + except Exception as e: + logger.warning( + "Invalid retry-after header: %s", + repr(e), + ) + retry_after = 30 + # Add exponential backoff + retry_after = retry_after * 2**idx + random.random() + time.sleep(retry_after) except retry_on_: + # Handle other exceptions more immediately if idx + 1 == stop_after_attempt: raise sleep_time = 2**idx + (random.random() * 0.5) time.sleep(sleep_time) continue + # Else we still raise an error raise ls_utils.LangSmithError( f"Failed to {method} {pathname} in LangSmith API." @@ -1328,22 +1347,6 @@ def batch_ingest_runs( self._post_batch_ingest_runs(orjson.dumps(body_chunks)) def _post_batch_ingest_runs(self, body: bytes): - def handle_429(response: requests.Response, attempt: int) -> bool: - # Min of 30 seconds, max of 1 minute - if response.status_code == 429: - try: - retry_after = float(response.headers.get("retry-after", "30")) - except ValueError: - logger.warning( - "Invalid retry-after header value: %s", - response.headers.get("retry-after"), - ) - retry_after = 30 - # Add exponential backoff - retry_after = retry_after * 2 ** (attempt - 1) + random.random() - time.sleep(retry_after) - return True - return False try: for api_url, api_key in self._write_api_urls.items(): @@ -1359,7 +1362,6 @@ def handle_429(response: requests.Response, attempt: int) -> bool: }, to_ignore=(ls_utils.LangSmithConflictError,), stop_after_attempt=3, - handle_response=handle_429, ) except Exception as e: logger.warning(f"Failed to batch ingest runs: {repr(e)}") @@ -2166,6 +2168,9 @@ def get_test_results( pd.DataFrame A dataframe containing the test results. """ + warnings.warn( + "Function get_test_results is in beta.", UserWarning, stacklevel=2 + ) from concurrent.futures import ThreadPoolExecutor, as_completed # type: ignore import pandas as pd # type: ignore @@ -4295,6 +4300,13 @@ def _evaluate_strings( evaluation=evaluation_config, ) """ # noqa: E501 + # warn as deprecated and to use `aevaluate` instead + warnings.warn( + "The `arun_on_dataset` method is deprecated and" + " will be removed in a future version." + "Please use the `aevaluate` method instead.", + DeprecationWarning, + ) try: from langchain.smith import arun_on_dataset as _arun_on_dataset except ImportError: @@ -4443,6 +4455,12 @@ def _evaluate_strings( evaluation=evaluation_config, ) """ # noqa: E501 + warnings.warn( + "The `run_on_dataset` method is deprecated and" + " will be removed in a future version." + "Please use the `evaluate` method instead.", + DeprecationWarning, + ) try: from langchain.smith import run_on_dataset as _run_on_dataset except ImportError: From 318f5c3be0553dfc08fcdb0110d57633fa04adb0 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Tue, 11 Jun 2024 17:04:04 -0700 Subject: [PATCH 140/373] [Python] Add retries to remaining endpoints (#782) Additional retries beyond the urllib3 ones we've configured --- python/langsmith/client.py | 167 +++++++++++------- python/langsmith/run_helpers.py | 2 +- .../integration_tests/wrappers/test_openai.py | 8 +- python/tests/unit_tests/test_client.py | 65 +++++-- python/tests/unit_tests/test_run_helpers.py | 63 ++++--- 5 files changed, 193 insertions(+), 112 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 424cbab31..d65ae7583 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -657,8 +657,9 @@ def info(self) -> ls_schemas.LangSmithInfo: """ if self._info is None: try: - response = self.session.get( - self.api_url + "/info", + response = self.request_with_retries( + "GET", + "/info", headers={"Accept": "application/json"}, timeout=(self.timeout_ms[0] / 1000, self.timeout_ms[1] / 1000), ) @@ -725,18 +726,19 @@ def request_with_retries( """ request_kwargs = request_kwargs or {} request_kwargs = { + "timeout": (self.timeout_ms[0] / 1000, self.timeout_ms[1] / 1000), + **request_kwargs, + **kwargs, "headers": { **self._headers, **request_kwargs.get("headers", {}), **kwargs.get("headers", {}), }, - "timeout": (self.timeout_ms[0] / 1000, self.timeout_ms[1] / 1000), - **request_kwargs, - **kwargs, } if ( method != "GET" and "data" in request_kwargs + and "files" not in request_kwargs and not request_kwargs["headers"].get("Content-Type") ): request_kwargs["headers"]["Content-Type"] = "application/json" @@ -833,9 +835,14 @@ def request_with_retries( args = list(e.args) msg = args[1] if len(args) > 1 else "" msg = msg.replace("session", "session (project)") - emsg = "\n".join( - [str(args[0])] + [msg] + [str(arg) for arg in args[2:]] - ) + if args: + emsg = "\n".join( + [str(args[0])] + + [msg] + + [str(arg) for arg in (args[2:] if len(args) > 2 else [])] + ) + else: + emsg = msg raise ls_utils.LangSmithError( f"Failed to {method} {pathname} in LangSmith API. {emsg}" ) from e @@ -1054,19 +1061,20 @@ def upload_csv( data["description"] = description if data_type: data["data_type"] = ls_utils.get_enum_value(data_type) + data["id"] = str(uuid.uuid4()) if isinstance(csv_file, str): with open(csv_file, "rb") as f: file_ = {"file": f} - response = self.session.post( - self.api_url + "/datasets/upload", - headers=self._headers, + response = self.request_with_retries( + "POST", + "/datasets/upload", data=data, files=file_, ) elif isinstance(csv_file, tuple): - response = self.session.post( - self.api_url + "/datasets/upload", - headers=self._headers, + response = self.request_with_retries( + "POST", + "/datasets/upload", data=data, files={"file": csv_file}, ) @@ -1753,8 +1761,9 @@ def share_run(self, run_id: ID_TYPE, *, share_id: Optional[ID_TYPE] = None) -> s "run_id": str(run_id_), "share_token": share_id or str(uuid.uuid4()), } - response = self.session.put( - f"{self.api_url}/runs/{run_id_}/share", + response = self.request_with_retries( + "PUT", + f"/runs/{run_id_}/share", headers=self._headers, json=data, ) @@ -1764,8 +1773,9 @@ def share_run(self, run_id: ID_TYPE, *, share_id: Optional[ID_TYPE] = None) -> s def unshare_run(self, run_id: ID_TYPE) -> None: """Delete share link for a run.""" - response = self.session.delete( - f"{self.api_url}/runs/{_as_uuid(run_id, 'run_id')}/share", + response = self.request_with_retries( + "DELETE", + f"/runs/{_as_uuid(run_id, 'run_id')}/share", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1780,8 +1790,9 @@ def read_run_shared_link(self, run_id: ID_TYPE) -> Optional[str]: Optional[str]: The shared link for the run, or None if the link is not available. """ - response = self.session.get( - f"{self.api_url}/runs/{_as_uuid(run_id, 'run_id')}/share", + response = self.request_with_retries( + "GET", + f"/runs/{_as_uuid(run_id, 'run_id')}/share", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1800,8 +1811,9 @@ def list_shared_runs( ) -> List[ls_schemas.Run]: """Get shared runs.""" params = {"id": run_ids, "share_token": str(share_token)} - response = self.session.get( - f"{self.api_url}/public/{_as_uuid(share_token, 'share_token')}/runs", + response = self.request_with_retries( + "GET", + f"/public/{_as_uuid(share_token, 'share_token')}/runs", headers=self._headers, params=params, ) @@ -1834,8 +1846,9 @@ def read_dataset_shared_schema( raise ValueError("Either dataset_id or dataset_name must be given") if dataset_id is None: dataset_id = self.read_dataset(dataset_name=dataset_name).id - response = self.session.get( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", + response = self.request_with_retries( + "GET", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1863,8 +1876,9 @@ def share_dataset( data = { "dataset_id": str(dataset_id), } - response = self.session.put( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", + response = self.request_with_retries( + "PUT", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", headers=self._headers, json=data, ) @@ -1877,8 +1891,9 @@ def share_dataset( def unshare_dataset(self, dataset_id: ID_TYPE) -> None: """Delete share link for a dataset.""" - response = self.session.delete( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", + response = self.request_with_retries( + "DELETE", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1888,8 +1903,9 @@ def read_shared_dataset( share_token: str, ) -> ls_schemas.Dataset: """Get shared datasets.""" - response = self.session.get( - f"{self.api_url}/public/{_as_uuid(share_token, 'share_token')}/datasets", + response = self.request_with_retries( + "GET", + f"/public/{_as_uuid(share_token, 'share_token')}/datasets", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -1906,8 +1922,9 @@ def list_shared_examples( params = {} if example_ids is not None: params["id"] = [str(id) for id in example_ids] - response = self.session.get( - f"{self.api_url}/public/{_as_uuid(share_token, 'share_token')}/examples", + response = self.request_with_retries( + "GET", + f"/public/{_as_uuid(share_token, 'share_token')}/examples", headers=self._headers, params=params, ) @@ -1994,13 +2011,15 @@ def create_project( "name": project_name, "extra": extra, "description": description, + "id": str(uuid.uuid4()), } params = {} if upsert: params["upsert"] = True if reference_dataset_id is not None: body["reference_dataset_id"] = reference_dataset_id - response = self.session.post( + response = self.request_with_retries( + "POST", endpoint, headers={**self._headers, "Content-Type": "application/json"}, data=_dumps_json(body), @@ -2049,7 +2068,8 @@ def update_project( "description": description, "end_time": end_time.isoformat() if end_time else None, } - response = self.session.patch( + response = self.request_with_retries( + "PATCH", endpoint, headers={**self._headers, "Content-Type": "application/json"}, data=_dumps_json(body), @@ -2331,8 +2351,9 @@ def delete_project( project_id = str(self.read_project(project_name=project_name).id) elif project_id is None: raise ValueError("Must provide project_name or project_id") - response = self.session.delete( - self.api_url + f"/sessions/{_as_uuid(project_id, 'project_id')}", + response = self.request_with_retries( + "DELETE", + f"/sessions/{_as_uuid(project_id, 'project_id')}", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -2365,8 +2386,9 @@ def create_dataset( description=description, data_type=data_type, ) - response = self.session.post( - self.api_url + "/datasets", + response = self.request_with_retries( + "POST", + "/datasets", headers={**self._headers, "Content-Type": "application/json"}, data=dataset.json(), ) @@ -2507,8 +2529,9 @@ def diff_dataset_versions( raise ValueError("Must provide either dataset name or ID") dataset_id = self.read_dataset(dataset_name=dataset_name).id dsid = _as_uuid(dataset_id, "dataset_id") - response = self.session.get( - f"{self.api_url}/datasets/{dsid}/versions/diff", + response = self.request_with_retries( + "GET", + f"/datasets/{dsid}/versions/diff", headers=self._headers, params={ "from_version": ( @@ -2615,8 +2638,9 @@ def delete_dataset( dataset_id = self.read_dataset(dataset_name=dataset_name).id if dataset_id is None: raise ValueError("Must provide either dataset name or ID") - response = self.session.delete( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}", + response = self.request_with_retries( + "DELETE", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -2667,8 +2691,9 @@ def update_dataset_tag( dataset_id = self.read_dataset(dataset_name=dataset_name).id if dataset_id is None: raise ValueError("Must provide either dataset name or ID") - response = self.session.put( - f"{self.api_url}/datasets/{_as_uuid(dataset_id, 'dataset_id')}/tags", + response = self.request_with_retries( + "PUT", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/tags", headers=self._headers, json={ "as_of": as_of.isoformat(), @@ -3024,7 +3049,7 @@ def create_examples( "dataset_id": dataset_id, "metadata": metadata_, "split": split_, - "id": id_, + "id": id_ or str(uuid.uuid4()), "source_run_id": source_run_id_, } for in_, out_, metadata_, split_, id_, source_run_id_ in zip( @@ -3037,8 +3062,9 @@ def create_examples( ) ] - response = self.session.post( - f"{self.api_url}/examples/bulk", + response = self.request_with_retries( + "POST", + "/examples/bulk", headers={**self._headers, "Content-Type": "application/json"}, data=_dumps_json(examples), ) @@ -3097,11 +3123,11 @@ def create_example( } if created_at: data["created_at"] = created_at.isoformat() - if example_id: - data["id"] = example_id + data["id"] = example_id or str(uuid.uuid4()) example = ls_schemas.ExampleCreate(**data) - response = self.session.post( - f"{self.api_url}/examples", + response = self.request_with_retries( + "POST", + "/examples", headers={**self._headers, "Content-Type": "application/json"}, data=example.json(), ) @@ -3242,8 +3268,9 @@ def update_example( metadata=metadata, split=split, ) - response = self.session.patch( - f"{self.api_url}/examples/{_as_uuid(example_id, 'example_id')}", + response = self.request_with_retries( + "PATCH", + f"/examples/{_as_uuid(example_id, 'example_id')}", headers={**self._headers, "Content-Type": "application/json"}, data=example.json(exclude_none=True), ) @@ -3258,8 +3285,9 @@ def delete_example(self, example_id: ID_TYPE) -> None: example_id : str or UUID The ID of the example to delete. """ - response = self.session.delete( - f"{self.api_url}/examples/{_as_uuid(example_id, 'example_id')}", + response = self.request_with_retries( + "DELETE", + f"/examples/{_as_uuid(example_id, 'example_id')}", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -3649,8 +3677,9 @@ def update_feedback( feedback_update["correction"] = correction if comment is not None: feedback_update["comment"] = comment - response = self.session.patch( - self.api_url + f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}", + response = self.request_with_retries( + "PATCH", + f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}", headers={**self._headers, "Content-Type": "application/json"}, data=_dumps_json(feedback_update), ) @@ -3729,8 +3758,9 @@ def delete_feedback(self, feedback_id: ID_TYPE) -> None: feedback_id : str or UUID The ID of the feedback to delete. """ - response = self.session.delete( - f"{self.api_url}/feedback/{_as_uuid(feedback_id, 'feedback_id')}", + response = self.request_with_retries( + "DELETE", + f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}", headers=self._headers, ) ls_utils.raise_for_status_with_text(response) @@ -3772,8 +3802,9 @@ def create_feedback_from_token( ) if source_api_url != self.api_url: raise ValueError(f"Invalid source API URL. {source_api_url}") - response = self.session.post( - f"{source_api_url}/feedback/tokens/{_as_uuid(token_uuid)}", + response = self.request_with_retries( + "POST", + f"/feedback/tokens/{_as_uuid(token_uuid)}", data=_dumps_json( { "score": score, @@ -3781,6 +3812,7 @@ def create_feedback_from_token( "correction": correction, "comment": comment, "metadata": metadata, + # TODO: Add ID once the API supports it. } ), headers=self._headers, @@ -3794,6 +3826,7 @@ def create_presigned_feedback_token( *, expiration: Optional[datetime.datetime | datetime.timedelta] = None, feedback_config: Optional[ls_schemas.FeedbackConfig] = None, + feedback_id: Optional[ID_TYPE] = None, ) -> ls_schemas.FeedbackIngestToken: """Create a pre-signed URL to send feedback data to. @@ -3812,6 +3845,8 @@ def create_presigned_feedback_token( this defines how the metric should be interpreted, such as a continuous score (w/ optional bounds), or distribution over categorical values. + feedback_id: The ID of the feedback to create. If not provided, a new + feedback will be created. Returns: The pre-signed URL for uploading feedback data. @@ -3820,6 +3855,7 @@ def create_presigned_feedback_token( "run_id": run_id, "feedback_key": feedback_key, "feedback_config": feedback_config, + "id": feedback_id or str(uuid.uuid4()), } if expiration is None: body["expires_in"] = ls_schemas.TimeDeltaInput( @@ -4018,7 +4054,7 @@ def create_annotation_queue( body = { "name": name, "description": description, - "id": queue_id, + "id": queue_id or str(uuid.uuid4()), } response = self.request_with_retries( "POST", @@ -4071,8 +4107,9 @@ def delete_annotation_queue(self, queue_id: ID_TYPE) -> None: Args: queue_id (ID_TYPE): The ID of the annotation queue to delete. """ - response = self.session.delete( - f"{self.api_url}/annotation-queues/{_as_uuid(queue_id, 'queue_id')}", + response = self.request_with_retries( + "DELETE", + f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}", headers={"Accept": "application/json", **self._headers}, ) ls_utils.raise_for_status_with_text(response) @@ -4153,7 +4190,7 @@ def create_comparative_experiment( if not reference_dataset: raise ValueError("A reference dataset is required.") body: Dict[str, Any] = { - "id": id, + "id": id or str(uuid.uuid4()), "name": name, "experiment_ids": experiments, "reference_dataset_id": reference_dataset, diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index f9e3f9bb5..531358cbc 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -376,7 +376,7 @@ def manual_extra_function(x): manual_extra_function(5, langsmith_extra={"metadata": {"version": "1.0"}}) - """ + """ # noqa: E501 run_type: ls_client.RUN_TYPE_T = ( args[0] if args and isinstance(args[0], str) diff --git a/python/tests/integration_tests/wrappers/test_openai.py b/python/tests/integration_tests/wrappers/test_openai.py index 11bc7bf3f..d12e77da6 100644 --- a/python/tests/integration_tests/wrappers/test_openai.py +++ b/python/tests/integration_tests/wrappers/test_openai.py @@ -43,7 +43,7 @@ def test_chat_sync_api(mock_session: mock.MagicMock, stream: bool): assert original.choices == patched.choices # Give the thread a chance. time.sleep(0.01) - for call in mock_session.return_value.request.call_args_list: + for call in mock_session.return_value.request.call_args_list[1:]: assert call[0][0].upper() == "POST" @@ -78,7 +78,7 @@ async def test_chat_async_api(mock_session: mock.MagicMock, stream: bool): assert original.choices == patched.choices # Give the thread a chance. time.sleep(0.1) - for call in mock_session.return_value.request.call_args_list: + for call in mock_session.return_value.request.call_args_list[1:]: assert call[0][0].upper() == "POST" @@ -119,7 +119,7 @@ def test_completions_sync_api(mock_session: mock.MagicMock, stream: bool): assert original.choices == patched.choices # Give the thread a chance. time.sleep(0.1) - for call in mock_session.return_value.request.call_args_list: + for call in mock_session.return_value.request.call_args_list[1:]: assert call[0][0].upper() == "POST" @@ -174,5 +174,5 @@ async def test_completions_async_api(mock_session: mock.MagicMock, stream: bool) if mock_session.return_value.request.call_count >= 1: break assert mock_session.return_value.request.call_count >= 1 - for call in mock_session.return_value.request.call_args_list: + for call in mock_session.return_value.request.call_args_list[1:]: assert call[0][0].upper() == "POST" diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index f08e17864..a653cf704 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -198,7 +198,13 @@ def test_upload_csv(mock_session_cls: mock.Mock) -> None: "examples": [example_1, example_2], } mock_session = mock.Mock() - mock_session.post.return_value = mock_response + + def mock_request(*args, **kwargs): # type: ignore + if args[0] == "POST" and args[1].endswith("datasets"): + return mock_response + return MagicMock() + + mock_session.request.return_value = mock_response mock_session_cls.return_value = mock_session client = Client( @@ -425,27 +431,43 @@ def mock_get(*args, **kwargs): assert client.tracing_queue client.tracing_queue.join() - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "POST" + ] assert len(request_calls) >= 1 for call in request_calls: assert call.args[0] == "POST" assert call.args[1] == "http://localhost:1984/runs/batch" - get_calls = [call for call in session.get.mock_calls if call.args] + get_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "GET" + ] # assert len(get_calls) == 1 for call in get_calls: - assert call.args[0] == f"{api_url}/info" + assert call.args[1] == f"{api_url}/info" else: - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "POST" + ] assert len(request_calls) == 10 for call in request_calls: assert call.args[0] == "POST" assert call.args[1] == "http://localhost:1984/runs" if auto_batch_tracing: - get_calls = [call for call in session.get.mock_calls if call.args] + get_calls = [ + call + for call in session.get.mock_calls + if call.args and call.args[0] == "GET" + ] for call in get_calls: - assert call.args[0] == f"{api_url}/info" + assert call.args[1] == f"{api_url}/info" del client time.sleep(3) # Give the background thread time to stop gc.collect() # Force garbage collection @@ -468,7 +490,11 @@ def test_client_gc_no_batched_runs(auto_batch_tracing: bool) -> None: # because no trace_id/dotted_order provided, auto batch is disabled for _ in range(10): client.create_run("my_run", inputs={}, run_type="llm", id=uuid.uuid4()) - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "POST" + ] assert len(request_calls) == 10 for call in request_calls: assert call.args[1] == "http://localhost:1984/runs" @@ -510,7 +536,11 @@ def filter_outputs(outputs: dict): ) expected.append(output_val + "goodbye") - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] in {"POST", "PATCH"} + ] all_posted = "\n".join( [call.kwargs["data"].decode("utf-8") for call in request_calls] ) @@ -549,7 +579,11 @@ def test_client_gc_after_autoscale() -> None: gc.collect() # Force garbage collection assert tracker.counter == 1, "Client was not garbage collected" - request_calls = [call for call in session.request.mock_calls if call.args] + request_calls = [ + call + for call in session.request.mock_calls + if call.args and call.args[0] == "POST" + ] assert len(request_calls) >= 500 and len(request_calls) <= 550 for call in request_calls: assert call.args[0] == "POST" @@ -874,7 +908,7 @@ def test_host_url(_: MagicMock) -> None: @patch("langsmith.client.time.sleep") def test_retry_on_connection_error(mock_sleep: MagicMock): mock_session = MagicMock() - client = Client(api_key="test", session=mock_session) + client = Client(api_key="test", session=mock_session, auto_batch_tracing=False) mock_session.request.side_effect = requests.ConnectionError() with pytest.raises(ls_utils.LangSmithConnectionError): @@ -885,7 +919,7 @@ def test_retry_on_connection_error(mock_sleep: MagicMock): @patch("langsmith.client.time.sleep") def test_http_status_500_handling(mock_sleep): mock_session = MagicMock() - client = Client(api_key="test", session=mock_session) + client = Client(api_key="test", session=mock_session, auto_batch_tracing=False) mock_response = MagicMock() mock_response.status_code = 500 mock_response.raise_for_status.side_effect = HTTPError() @@ -899,12 +933,11 @@ def test_http_status_500_handling(mock_sleep): @patch("langsmith.client.time.sleep") def test_pass_on_409_handling(mock_sleep): mock_session = MagicMock() - client = Client(api_key="test", session=mock_session) + client = Client(api_key="test", session=mock_session, auto_batch_tracing=False) mock_response = MagicMock() mock_response.status_code = 409 mock_response.raise_for_status.side_effect = HTTPError() mock_session.request.return_value = mock_response - response = client.request_with_retries( "GET", "https://test.url", @@ -1028,7 +1061,9 @@ def test_batch_ingest_run_splits_large_batches(payload_size: int): request_bodies = [ op for call in mock_session.request.call_args_list - for reqs in orjson.loads(call[1]["data"]).values() + for reqs in ( + orjson.loads(call[1]["data"]).values() if call[0][0] == "POST" else [] + ) for op in reqs ] all_run_ids = run_ids + patch_ids diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index d4dd361c2..e02a58713 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -6,7 +6,7 @@ import time import uuid import warnings -from typing import Any, AsyncGenerator, Generator, Optional, cast +from typing import Any, AsyncGenerator, Generator, Optional, Set, cast from unittest.mock import MagicMock, patch import pytest @@ -25,6 +25,27 @@ from langsmith.run_trees import RunTree +def _get_calls( + mock_client: Any, + minimum: Optional[int] = 0, + verbs: Set[str] = {"POST"}, + attempts: int = 5, +) -> list: + calls = [] + for _ in range(attempts): + time.sleep(0.1) + calls = [ + c + for c in mock_client.session.request.mock_calls # type: ignore + if c.args and c.args[0] in verbs + ] + if minimum is None: + return calls + if minimum is not None and len(calls) > minimum: + break + return calls + + def test__get_inputs_with_no_args() -> None: def foo() -> None: pass @@ -198,9 +219,9 @@ def my_iterator_fn(a, b, d, **kwargs): results = list(genout) assert results == expected # Wait for batcher - time.sleep(0.25) + # check the mock_calls - mock_calls = mock_client.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client, minimum=1) assert 1 <= len(mock_calls) <= 2 call = mock_calls[0] @@ -233,10 +254,8 @@ async def my_iterator_fn(a, b, d, **kwargs): else: results = [item async for item in genout] assert results == expected - # Wait for batcher - await asyncio.sleep(0.25) # check the mock_calls - mock_calls = mock_client.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client, minimum=1) assert 1 <= len(mock_calls) <= 2 call = mock_calls[0] @@ -347,13 +366,12 @@ def my_function(a: int) -> int: ) == 2 ) - time.sleep(1) # Inspect the mock_calls and assert that 2 runs were created, # one for the parent and one for the child - mock_calls = mock_client_.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client_, minimum=2) posts = [] for call in mock_calls: - if call.args: + if call.args and call.args[0] != "GET": assert call.args[0] == "POST" assert call.args[1].startswith("https://api.smith.langchain.com") body = json.loads(call.kwargs["data"]) @@ -387,13 +405,12 @@ def my_function(a: int, config: dict) -> int: ) == 2 ) - time.sleep(1) # Inspect the mock_calls and assert that 2 runs were created, # one for the parent and one for the child - mock_calls = mock_client_.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client_, minimum=2) posts = [] for call in mock_calls: - if call.args: + if call.args and call.args[0] != "GET": assert call.args[0] == "POST" assert call.args[1].startswith("https://api.smith.langchain.com") body = json.loads(call.kwargs["data"]) @@ -414,10 +431,9 @@ def my_function(a: int, b: int, d: int) -> int: return a + b + d my_function(1, 2, 3) - time.sleep(0.25) # Inspect the mock_calls and asser tthat "my foo project" is in # the session_name arg of the body - mock_calls = mock_client_.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client_, minimum=1) assert 1 <= len(mock_calls) <= 2 call = mock_calls[0] assert call.args[0] == "POST" @@ -434,11 +450,10 @@ def my_other_function(run_tree) -> int: return my_function(1, 2, 3) my_other_function() # type: ignore - time.sleep(0.25) # Inspect the mock_calls and assert that "my bar project" is in # both all POST runs in the single request. We want to ensure # all runs in a trace are associated with the same project. - mock_calls = mock_client_.session.request.mock_calls # type: ignore + mock_calls = _get_calls(mock_client_, minimum=1) assert 1 <= len(mock_calls) <= 2 call = mock_calls[0] assert call.args[0] == "POST" @@ -968,11 +983,8 @@ def my_run(foo: str): return {"baz": "buzz"} my_run(foo="bar", langsmith_extra={"parent": headers, "client": mock_client}) - for _ in range(1): - time.sleep(0.1) - if mock_client.session.request.call_count > 0: - break - assert mock_client.session.request.call_count == 1 + mock_calls = _get_calls(mock_client) + assert len(mock_calls) == 1 call = mock_client.session.request.call_args assert call.args[0] == "POST" assert call.args[1].startswith("https://api.smith.langchain.com") @@ -991,12 +1003,9 @@ def test_client_passed_when_trace_parent(): name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client ) as rt: rt.outputs["bar"] = "baz" - for _ in range(1): - time.sleep(0.1) - if mock_client.session.request.call_count > 0: - break - assert mock_client.session.request.call_count == 1 - call = mock_client.session.request.call_args + calls = _get_calls(mock_client) + assert len(calls) == 1 + call = calls[0] assert call.args[0] == "POST" assert call.args[1].startswith("https://api.smith.langchain.com") body = json.loads(call.kwargs["data"]) From 33b260abfea7600ef390608ea9d5114414a9a8a3 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Tue, 11 Jun 2024 17:13:33 -0700 Subject: [PATCH 141/373] [Python] Fix get_test_results for long batches (#783) --- python/langsmith/client.py | 5 ++++- python/langsmith/env/_runtime_env.py | 2 +- python/pyproject.toml | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index d65ae7583..746b41704 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -149,6 +149,7 @@ def _default_retry_config() -> Retry: # Sadly urllib3 1.x doesn't support backoff_jitter raise_on_redirect=False, raise_on_status=False, + respect_retry_after_header=True, ) # the `allowed_methods` keyword is not available in urllib3 < 1.26 @@ -2249,7 +2250,9 @@ def fetch_examples(batch): ) if r.reference_example_id: example_ids.append(r.reference_example_id) - if len(results) % batch_size == 0: + else: + logger.warning(f"Run {r.id} has no reference example ID.") + if len(example_ids) % batch_size == 0: # Ensure not empty if batch := example_ids[cursor : cursor + batch_size]: futures.append(executor.submit(fetch_examples, batch)) diff --git a/python/langsmith/env/_runtime_env.py b/python/langsmith/env/_runtime_env.py index 5fe46a9c7..7f25b3572 100644 --- a/python/langsmith/env/_runtime_env.py +++ b/python/langsmith/env/_runtime_env.py @@ -199,7 +199,7 @@ def get_langchain_env_var_metadata() -> dict: def _get_default_revision_id() -> Optional[str]: """Get the default revision ID based on `git describe`.""" try: - return exec_git(["describe", "--tags", "--dirty"]) + return exec_git(["describe", "--tags", "--always", "--dirty"]) except BaseException: return None diff --git a/python/pyproject.toml b/python/pyproject.toml index 5490c8694..f3b108665 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.76" +version = "0.1.77" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 8e151eda056780223e08009de747694692ff98ae Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 17 Jun 2024 14:02:21 -0700 Subject: [PATCH 142/373] [Python] Pass through IO (#796) When loading from runnable config --- .github/workflows/python_test.yml | 2 +- python/langsmith/run_trees.py | 4 ++ python/pyproject.toml | 2 +- python/tests/unit_tests/test_run_helpers.py | 63 ++++++++++++++++++++- 4 files changed, 66 insertions(+), 5 deletions(-) diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index 188d89b08..25e898188 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -42,7 +42,7 @@ jobs: - name: Install dependencies run: | poetry install --with dev,lint - poetry run pip install -U langchain + poetry run pip install -U langchain langchain-core - name: Build ${{ matrix.python-version }} run: poetry build - name: Lint ${{ matrix.python-version }} diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index 4757adafb..591472e04 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -342,6 +342,10 @@ def from_runnable_config( if (run := tracer.run_map.get(str(cb.parent_run_id))) and run.dotted_order: dotted_order = run.dotted_order kwargs["run_type"] = run.run_type + kwargs["inputs"] = run.inputs + kwargs["outputs"] = run.outputs + kwargs["start_time"] = run.start_time + kwargs["end_time"] = run.end_time elif hasattr(tracer, "order_map") and cb.parent_run_id in tracer.order_map: dotted_order = tracer.order_map[cb.parent_run_id][1] else: diff --git a/python/pyproject.toml b/python/pyproject.toml index f3b108665..1c6239d3c 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.77" +version = "0.1.78" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index e02a58713..4ea0d564e 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -33,7 +33,6 @@ def _get_calls( ) -> list: calls = [] for _ in range(attempts): - time.sleep(0.1) calls = [ c for c in mock_client.session.request.mock_calls # type: ignore @@ -43,6 +42,7 @@ def _get_calls( return calls if minimum is not None and len(calls) > minimum: break + time.sleep(0.1) return calls @@ -185,9 +185,9 @@ def foo(kwargs: int, *, b: int, c: int, **some_other_kwargs: Any) -> None: } -def _get_mock_client() -> Client: +def _get_mock_client(**kwargs: Any) -> Client: mock_session = MagicMock() - client = Client(session=mock_session, api_key="test") + client = Client(session=mock_session, api_key="test", **kwargs) return client @@ -1059,3 +1059,60 @@ def my_tool(text: str) -> str: mock_client = _get_mock_client() tracer = LangChainTracer(client=mock_client) my_tool.invoke({"text": "hello"}, {"callbacks": [tracer]}) + + +def test_io_interops(): + try: + from langchain.callbacks.tracers import LangChainTracer + from langchain.schema.runnable import RunnableLambda + except ImportError: + pytest.skip("Skipping test that requires langchain") + tracer = LangChainTracer(client=_get_mock_client(auto_batch_tracing=False)) + stage_added = { + "parent_input": {"original_input": "original_input_value"}, + "child_input": {"parent_input": "parent_input_value"}, + "child_output": {"child_output": "child_output_value"}, + "parent_output": {"parent_output": "parent_output_value"}, + } + + @RunnableLambda + def child(inputs: dict) -> dict: + return {**stage_added["child_output"], **inputs} + + @RunnableLambda + def parent(inputs: dict) -> dict: + return { + **stage_added["parent_output"], + **child.invoke({**stage_added["child_input"], **inputs}), + } + + expected_at_stage = {} + current = {} + for stage in stage_added: + current = {**current, **stage_added[stage]} + expected_at_stage[stage] = current + parent_result = parent.invoke(stage_added["parent_input"], {"callbacks": [tracer]}) + assert parent_result == expected_at_stage["parent_output"] + mock_posts = _get_calls(tracer.client, minimum=2) + assert len(mock_posts) == 2 + datas = [json.loads(mock_post.kwargs["data"]) for mock_post in mock_posts] + assert datas[0]["name"] == "parent" + assert datas[0]["inputs"] == expected_at_stage["parent_input"] + assert not datas[0]["outputs"] + assert datas[1]["name"] == "child" + assert datas[1]["inputs"] == expected_at_stage["child_input"] + assert not datas[1]["outputs"] + parent_uid = datas[0]["id"] + child_uid = datas[1]["id"] + + # Check the patch requests + mock_patches = _get_calls(tracer.client, verbs={"PATCH"}, minimum=2) + assert len(mock_patches) == 2 + child_patch = json.loads(mock_patches[0].kwargs["data"]) + assert child_patch["id"] == child_uid + assert child_patch["outputs"] == expected_at_stage["child_output"] + assert child_patch["inputs"] == expected_at_stage["child_input"] + parent_patch = json.loads(mock_patches[1].kwargs["data"]) + assert parent_patch["id"] == parent_uid + assert parent_patch["outputs"] == expected_at_stage["parent_output"] + assert parent_patch["inputs"] == expected_at_stage["parent_input"] From edddf27eee8dfe0c4f98ccfe20a95761aa25e9dc Mon Sep 17 00:00:00 2001 From: infra Date: Mon, 17 Jun 2024 20:55:40 -0400 Subject: [PATCH 143/373] chore: update docker-compose 0.6 --- python/langsmith/cli/.env.example | 1 + python/langsmith/cli/docker-compose.yaml | 15 ++++++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index eb9d9120f..50c5c3a99 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -18,3 +18,4 @@ CLICKHOUSE_PORT=8123 # Change to your Clickhouse port if needed CLICKHOUSE_TLS=false # Change to true if you are using TLS to connect to Clickhouse. Otherwise, leave it as is CLICKHOUSE_PASSWORD=password # Change to your Clickhouse password if needed CLICKHOUSE_NATIVE_PORT=9000 # Change to your Clickhouse native port if needed +ORG_CREATION_DISABLED=false # Set to true if you want to disable org creation diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 58922da51..e5e47da1b 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,22 +1,26 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.5.7} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.6.6} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.6.6} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - VITE_OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} + - VITE_SUBDOMAIN=langsmith + - VITE_PLAYGROUND_BASE_URL=/langsmith/api/playground ports: - - 80:80 + - 1980:1980 + volumes: + - ./nginx-override.conf:/etc/nginx/conf.d/default.conf depends_on: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.6} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker @@ -36,6 +40,7 @@ services: - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} + - FF_ORG_CREATION_DISABLED=${FF_ORG_CREATION_DISABLED:-false} ports: - 1984:1984 depends_on: @@ -49,7 +54,7 @@ services: condition: service_completed_successfully restart: always langchain-platform-backend: - image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.6} environment: - PORT=1986 - LANGCHAIN_ENV=local_docker From 87570d5eae0bff30eb8b65b74f42488eb8b7d8f8 Mon Sep 17 00:00:00 2001 From: infra Date: Mon, 17 Jun 2024 20:56:41 -0400 Subject: [PATCH 144/373] chore: update docker-compose 0.6 --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index 1c6239d3c..af79a6b6f 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.78" +version = "0.1.79" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 52973c36a54489ed807b8a703441ff980a41eeb1 Mon Sep 17 00:00:00 2001 From: infra Date: Mon, 17 Jun 2024 20:59:00 -0400 Subject: [PATCH 145/373] fix nginx override --- python/langsmith/cli/docker-compose.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index e5e47da1b..cca3a49a9 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -14,8 +14,6 @@ services: - VITE_PLAYGROUND_BASE_URL=/langsmith/api/playground ports: - 1980:1980 - volumes: - - ./nginx-override.conf:/etc/nginx/conf.d/default.conf depends_on: - langchain-backend - langchain-playground From e292629d7c90d94dcd941fb643d9f726723dcb51 Mon Sep 17 00:00:00 2001 From: infra Date: Mon, 17 Jun 2024 21:08:21 -0400 Subject: [PATCH 146/373] fix nginx override --- python/langsmith/cli/.env.example | 4 ++-- python/langsmith/cli/docker-compose.yaml | 4 +--- python/langsmith/cli/users.xml | 1 - 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index 50c5c3a99..45d8871b5 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,7 +1,7 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.5.7 +_LANGSMITH_IMAGE_VERSION=0.6.6 LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key -OPENAI_API_KEY=your-openai-api-key # Needed for Online Evals and Magic Query features +OPENAI_API_KEY=your-openai-api-key # Needed for Magic Query features AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 OAUTH_CLIENT_ID=your-client-id # Required if AUTH_TYPE=oauth OAUTH_ISSUER_URL=https://your-issuer-url # Required if AUTH_TYPE=oauth diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index cca3a49a9..622f9b99b 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -10,8 +10,6 @@ services: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} - VITE_OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL} - - VITE_SUBDOMAIN=langsmith - - VITE_PLAYGROUND_BASE_URL=/langsmith/api/playground ports: - 1980:1980 depends_on: @@ -38,7 +36,7 @@ services: - CLICKHOUSE_DB=${CLICKHOUSE_DB:-default} - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} - - FF_ORG_CREATION_DISABLED=${FF_ORG_CREATION_DISABLED:-false} + - FF_ORG_CREATION_DISABLED=${ORG_CREATION_DISABLED:-false} ports: - 1984:1984 depends_on: diff --git a/python/langsmith/cli/users.xml b/python/langsmith/cli/users.xml index d08fcfd28..c29aa8b57 100644 --- a/python/langsmith/cli/users.xml +++ b/python/langsmith/cli/users.xml @@ -12,7 +12,6 @@ 1 2000000 - 1 0 1 From db34a49f4dd97ab06bfd3ca413d4cb4c5c2f10c1 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Tue, 18 Jun 2024 01:51:03 -0400 Subject: [PATCH 147/373] Add python 3_9 and 3_10 to test matrix (#791) Add to test matrix --------- Co-authored-by: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> --- .github/workflows/python_test.yml | 2 ++ python/langsmith/client.py | 1 - python/langsmith/evaluation/_runner.py | 2 ++ python/tests/unit_tests/test_utils.py | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index 25e898188..5a45962ae 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -20,6 +20,8 @@ jobs: matrix: python-version: - "3.8" + - "3.9" + - "3.10" - "3.11" - "3.12" defaults: diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 746b41704..13e3eb15b 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -1356,7 +1356,6 @@ def batch_ingest_runs( self._post_batch_ingest_runs(orjson.dumps(body_chunks)) def _post_batch_ingest_runs(self, body: bytes): - try: for api_url, api_key in self._write_api_urls.items(): self.request_with_retries( diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index ae9d983c8..f5cc1ae4c 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -518,6 +518,8 @@ def evaluate_comparative( View the evaluation results for experiment:... >>> results_1.wait() >>> results_2.wait() + >>> import time + >>> time.sleep(10) # Wait for the traces to be fully processed Finally, you would compare the two prompts directly: >>> import json diff --git a/python/tests/unit_tests/test_utils.py b/python/tests/unit_tests/test_utils.py index 8fa74992a..9cadaa9cb 100644 --- a/python/tests/unit_tests/test_utils.py +++ b/python/tests/unit_tests/test_utils.py @@ -1,3 +1,4 @@ +# mypy: disable-error-code="annotation-unchecked" import copy import dataclasses import itertools From e99cf0c2bf67193e02e00baaf739b1baf3487cfd Mon Sep 17 00:00:00 2001 From: infra Date: Tue, 18 Jun 2024 11:32:33 -0400 Subject: [PATCH 148/373] chore: update docker-compose 0.6.7 --- python/langsmith/cli/.env.example | 2 +- python/langsmith/cli/docker-compose.yaml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index 45d8871b5..5164bcad4 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,5 +1,5 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.6.6 +_LANGSMITH_IMAGE_VERSION=0.6.7 LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key OPENAI_API_KEY=your-openai-api-key # Needed for Magic Query features AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 622f9b99b..81b5af862 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,11 +1,11 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.6.6} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.6.7} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.6.6} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.6.7} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} @@ -16,7 +16,7 @@ services: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.6} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.7} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker @@ -50,7 +50,7 @@ services: condition: service_completed_successfully restart: always langchain-platform-backend: - image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.6} + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.7} environment: - PORT=1986 - LANGCHAIN_ENV=local_docker From 1248fd259a4b37ca5ebd779f9793b04a88479d4f Mon Sep 17 00:00:00 2001 From: infra Date: Tue, 18 Jun 2024 13:22:50 -0400 Subject: [PATCH 149/373] chore: bump 0.6.7 images --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index af79a6b6f..06ac5da2e 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.79" +version = "0.1.80" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From e7a315bf0efe92965f3cf9cc23d47eb58371c425 Mon Sep 17 00:00:00 2001 From: infra Date: Tue, 18 Jun 2024 18:31:35 -0400 Subject: [PATCH 150/373] 0.6.9 --- python/langsmith/cli/.env.example | 2 +- python/langsmith/cli/docker-compose.yaml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/langsmith/cli/.env.example b/python/langsmith/cli/.env.example index 5164bcad4..2d6722926 100644 --- a/python/langsmith/cli/.env.example +++ b/python/langsmith/cli/.env.example @@ -1,5 +1,5 @@ # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key. -_LANGSMITH_IMAGE_VERSION=0.6.7 +_LANGSMITH_IMAGE_VERSION=0.6.9 LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key OPENAI_API_KEY=your-openai-api-key # Needed for Magic Query features AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 81b5af862..51c39f022 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -1,11 +1,11 @@ version: "4" services: langchain-playground: - image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.6.7} + image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.6.9} ports: - 3001:3001 langchain-frontend: - image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.6.7} + image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} environment: - VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none} - VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID} @@ -16,7 +16,7 @@ services: - langchain-backend - langchain-playground langchain-backend: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.7} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} environment: - PORT=1984 - LANGCHAIN_ENV=local_docker @@ -50,7 +50,7 @@ services: condition: service_completed_successfully restart: always langchain-platform-backend: - image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.7} + image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} environment: - PORT=1986 - LANGCHAIN_ENV=local_docker From fc063f5b54f838bdcd8deef20ed43b6c8bf8c922 Mon Sep 17 00:00:00 2001 From: infra Date: Tue, 18 Jun 2024 19:03:39 -0400 Subject: [PATCH 151/373] fix --- python/langsmith/cli/docker-compose.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 51c39f022..87130aa13 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -76,7 +76,7 @@ services: condition: service_completed_successfully restart: always langchain-queue: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} environment: - LANGCHAIN_ENV=local_docker - LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY} @@ -145,7 +145,7 @@ services: timeout: 2s retries: 30 langchain-clickhouse: - image: clickhouse/clickhouse-server:23.9 + image: clickhouse/clickhouse-server:24.2 user: "101:101" restart: always environment: @@ -164,7 +164,7 @@ services: timeout: 2s retries: 30 clickhouse-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} depends_on: langchain-clickhouse: condition: service_healthy @@ -183,7 +183,7 @@ services: "migrate -source file://clickhouse/migrations -database 'clickhouse://${CLICKHOUSE_HOST}:${CLICKHOUSE_NATIVE_PORT}?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}&database=${CLICKHOUSE_DB}&x-multi-statement=true&x-migrations-table-engine=MergeTree' up", ] postgres-setup: - image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.5.7} + image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.6.9} depends_on: langchain-db: condition: service_healthy From 845a8556ec1ae963c20632c6e704b1c9929c664b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 16:50:44 -0700 Subject: [PATCH 152/373] chore(deps): bump urllib3 from 1.26.18 to 1.26.19 in /python (#801) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.18 to 1.26.19.
Release notes

Sourced from urllib3's releases.

1.26.19

🚀 urllib3 is fundraising for HTTP/2 support

urllib3 is raising ~$40,000 USD to release HTTP/2 support and ensure long-term sustainable maintenance of the project after a sharp decline in financial support for 2023. If your company or organization uses Python and would benefit from HTTP/2 support in Requests, pip, cloud SDKs, and thousands of other projects please consider contributing financially to ensure HTTP/2 support is developed sustainably and maintained for the long-haul.

Thank you for your support.

Changes

  • Added the Proxy-Authorization header to the list of headers to strip from requests when redirecting to a different host. As before, different headers can be set via Retry.remove_headers_on_redirect.

Full Changelog: https://github.com/urllib3/urllib3/compare/1.26.18...1.26.19

Note that due to an issue with our release automation, no multiple.intoto.jsonl file is available for this release.

Changelog

Sourced from urllib3's changelog.

1.26.19 (2024-06-17)

  • Added the Proxy-Authorization header to the list of headers to strip from requests when redirecting to a different host. As before, different headers can be set via Retry.remove_headers_on_redirect.
  • Fixed handling of OpenSSL 3.2.0 new error message for misconfiguring an HTTP proxy as HTTPS. ([#3405](https://github.com/urllib3/urllib3/issues/3405) <https://github.com/urllib3/urllib3/issues/3405>__)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=urllib3&package-manager=pip&previous-version=1.26.18&new-version=1.26.19)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/langchain-ai/langsmith-sdk/network/alerts).
--------- Signed-off-by: dependabot[bot] Co-authored-by: Eugene Yurtsev Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: William FH <13333726+hinthornw@users.noreply.github.com> --- python/Makefile | 2 +- python/poetry.lock | 66 ++++++++++++++++++++----------------------- python/pyproject.toml | 4 +++ 3 files changed, 35 insertions(+), 37 deletions(-) diff --git a/python/Makefile b/python/Makefile index 795e4c168..c8646ed56 100644 --- a/python/Makefile +++ b/python/Makefile @@ -1,7 +1,7 @@ .PHONY: tests lint format build publish doctest integration_tests integration_tests_fast evals tests: - poetry run python -m pytest -n auto --durations=10 tests/unit_tests + poetry run python -m pytest --disable-socket --allow-unix-socket -n auto --durations=10 tests/unit_tests tests_watch: poetry run ptw --now . -- -vv -x tests/unit_tests diff --git a/python/poetry.lock b/python/poetry.lock index 14cb338fb..0ed84b55d 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -978,6 +978,20 @@ files = [ packaging = ">=17.1" pytest = ">=7.2" +[[package]] +name = "pytest-socket" +version = "0.7.0" +description = "Pytest Plugin to disable socket calls during tests" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "pytest_socket-0.7.0-py3-none-any.whl", hash = "sha256:7e0f4642177d55d317bbd58fc68c6bd9048d6eadb2d46a89307fa9221336ce45"}, + {file = "pytest_socket-0.7.0.tar.gz", hash = "sha256:71ab048cbbcb085c15a4423b73b619a8b35d6a307f46f78ea46be51b1b7e11b3"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + [[package]] name = "pytest-subtests" version = "0.11.0" @@ -1054,6 +1068,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1061,8 +1076,16 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1079,6 +1102,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1086,6 +1110,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -1256,20 +1281,6 @@ files = [ [package.dependencies] types-urllib3 = "*" -[[package]] -name = "types-requests" -version = "2.31.0.20240406" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-requests-2.31.0.20240406.tar.gz", hash = "sha256:4428df33c5503945c74b3f42e82b181e86ec7b724620419a2966e2de604ce1a1"}, - {file = "types_requests-2.31.0.20240406-py3-none-any.whl", hash = "sha256:6216cdac377c6b9a040ac1c0404f7284bd13199c0e1bb235f4324627e8898cf5"}, -] - -[package.dependencies] -urllib3 = ">=2" - [[package]] name = "types-tqdm" version = "4.66.0.20240106" @@ -1320,13 +1331,13 @@ typing-extensions = ">=3.7.4" [[package]] name = "urllib3" -version = "1.26.18" +version = "1.26.19" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, - {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, + {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, + {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, ] [package.extras] @@ -1334,23 +1345,6 @@ brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotl secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] -[[package]] -name = "urllib3" -version = "2.2.1" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.8" -files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - [[package]] name = "uvicorn" version = "0.29.0" @@ -1618,4 +1612,4 @@ vcr = [] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "c37fcc9d809cf705f31fc6a8c2cf241fae01075b34cff613b62bd77ab049477a" +content-hash = "6269ec3de2038c48d77b23a46c45ee95161c2bc33f5371570fd234a8656e8218" diff --git a/python/pyproject.toml b/python/pyproject.toml index 06ac5da2e..a5a31a0ea 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -57,6 +57,10 @@ pytest-rerunfailures = "^14.0" [tool.poetry.group.lint.dependencies] openai = "^1.10" + +[tool.poetry.group.test.dependencies] +pytest-socket = "^0.7.0" + [tool.poetry.extras] vcr = ["vcrpy"] From 0498289f01f9155ce699fd55ebd57a0517474d31 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Tue, 18 Jun 2024 21:17:38 -0400 Subject: [PATCH 153/373] Add pytest-sockets to test dependencies (#799) - Add pytest-sockets - Run unit tests with --disable-socket and --allow-unix-socket --------- Co-authored-by: William FH <13333726+hinthornw@users.noreply.github.com> --- python/poetry.lock | 738 ++++++++++++++++++++++-------------------- python/pyproject.toml | 1 + 2 files changed, 384 insertions(+), 355 deletions(-) diff --git a/python/poetry.lock b/python/poetry.lock index 0ed84b55d..2c7bbe8d5 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "anyio" -version = "4.3.0" +version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, ] [package.dependencies] @@ -43,33 +43,33 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p [[package]] name = "black" -version = "24.3.0" +version = "24.4.2" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-24.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395"}, - {file = "black-24.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995"}, - {file = "black-24.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"}, - {file = "black-24.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0"}, - {file = "black-24.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9"}, - {file = "black-24.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597"}, - {file = "black-24.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d"}, - {file = "black-24.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5"}, - {file = "black-24.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f"}, - {file = "black-24.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11"}, - {file = "black-24.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4"}, - {file = "black-24.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5"}, - {file = "black-24.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837"}, - {file = "black-24.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd"}, - {file = "black-24.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213"}, - {file = "black-24.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959"}, - {file = "black-24.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb"}, - {file = "black-24.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7"}, - {file = "black-24.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7"}, - {file = "black-24.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f"}, - {file = "black-24.3.0-py3-none-any.whl", hash = "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93"}, - {file = "black-24.3.0.tar.gz", hash = "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f"}, + {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, + {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, + {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, + {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, + {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, + {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, + {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, + {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, + {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, + {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, + {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, + {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, + {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"}, + {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"}, + {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"}, + {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"}, + {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"}, + {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"}, + {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"}, + {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"}, + {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, + {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, ] [package.dependencies] @@ -89,13 +89,13 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "certifi" -version = "2024.2.2" +version = "2024.6.2" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, - {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, + {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"}, + {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"}, ] [[package]] @@ -224,63 +224,63 @@ files = [ [[package]] name = "coverage" -version = "7.4.4" +version = "7.5.3" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2"}, - {file = "coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562"}, - {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87"}, - {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c"}, - {file = "coverage-7.4.4-cp310-cp310-win32.whl", hash = "sha256:598825b51b81c808cb6f078dcb972f96af96b078faa47af7dfcdf282835baa8d"}, - {file = "coverage-7.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:09ef9199ed6653989ebbcaacc9b62b514bb63ea2f90256e71fea3ed74bd8ff6f"}, - {file = "coverage-7.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f9f50e7ef2a71e2fae92774c99170eb8304e3fdf9c8c3c7ae9bab3e7229c5cf"}, - {file = "coverage-7.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:623512f8ba53c422fcfb2ce68362c97945095b864cda94a92edbaf5994201083"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0513b9508b93da4e1716744ef6ebc507aff016ba115ffe8ecff744d1322a7b63"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40209e141059b9370a2657c9b15607815359ab3ef9918f0196b6fccce8d3230f"}, - {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a2b2b78c78293782fd3767d53e6474582f62443d0504b1554370bde86cc8227"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:73bfb9c09951125d06ee473bed216e2c3742f530fc5acc1383883125de76d9cd"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f384c3cc76aeedce208643697fb3e8437604b512255de6d18dae3f27655a384"}, - {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54eb8d1bf7cacfbf2a3186019bcf01d11c666bd495ed18717162f7eb1e9dd00b"}, - {file = "coverage-7.4.4-cp311-cp311-win32.whl", hash = "sha256:cac99918c7bba15302a2d81f0312c08054a3359eaa1929c7e4b26ebe41e9b286"}, - {file = "coverage-7.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:b14706df8b2de49869ae03a5ccbc211f4041750cd4a66f698df89d44f4bd30ec"}, - {file = "coverage-7.4.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76"}, - {file = "coverage-7.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70"}, - {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48"}, - {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9"}, - {file = "coverage-7.4.4-cp312-cp312-win32.whl", hash = "sha256:137eb07173141545e07403cca94ab625cc1cc6bc4c1e97b6e3846270e7e1fea0"}, - {file = "coverage-7.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:d71eec7d83298f1af3326ce0ff1d0ea83c7cb98f72b577097f9083b20bdaf05e"}, - {file = "coverage-7.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ae728ff3b5401cc320d792866987e7e7e880e6ebd24433b70a33b643bb0384"}, - {file = "coverage-7.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc4f1358cb0c78edef3ed237ef2c86056206bb8d9140e73b6b89fbcfcbdd40e1"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8130a2aa2acb8788e0b56938786c33c7c98562697bf9f4c7d6e8e5e3a0501e4a"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf271892d13e43bc2b51e6908ec9a6a5094a4df1d8af0bfc360088ee6c684409"}, - {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4cdc86d54b5da0df6d3d3a2f0b710949286094c3a6700c21e9015932b81447e"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae71e7ddb7a413dd60052e90528f2f65270aad4b509563af6d03d53e979feafd"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:38dd60d7bf242c4ed5b38e094baf6401faa114fc09e9e6632374388a404f98e7"}, - {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa5b1c1bfc28384f1f53b69a023d789f72b2e0ab1b3787aae16992a7ca21056c"}, - {file = "coverage-7.4.4-cp38-cp38-win32.whl", hash = "sha256:dfa8fe35a0bb90382837b238fff375de15f0dcdb9ae68ff85f7a63649c98527e"}, - {file = "coverage-7.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:b2991665420a803495e0b90a79233c1433d6ed77ef282e8e152a324bbbc5e0c8"}, - {file = "coverage-7.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b799445b9f7ee8bf299cfaed6f5b226c0037b74886a4e11515e569b36fe310d"}, - {file = "coverage-7.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b4d33f418f46362995f1e9d4f3a35a1b6322cb959c31d88ae56b0298e1c22357"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadacf9a2f407a4688d700e4ebab33a7e2e408f2ca04dbf4aef17585389eff3e"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c95949560050d04d46b919301826525597f07b33beba6187d04fa64d47ac82e"}, - {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff7687ca3d7028d8a5f0ebae95a6e4827c5616b31a4ee1192bdfde697db110d4"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5fc1de20b2d4a061b3df27ab9b7c7111e9a710f10dc2b84d33a4ab25065994ec"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c74880fc64d4958159fbd537a091d2a585448a8f8508bf248d72112723974cbd"}, - {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:742a76a12aa45b44d236815d282b03cfb1de3b4323f3e4ec933acfae08e54ade"}, - {file = "coverage-7.4.4-cp39-cp39-win32.whl", hash = "sha256:d89d7b2974cae412400e88f35d86af72208e1ede1a541954af5d944a8ba46c57"}, - {file = "coverage-7.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:9ca28a302acb19b6af89e90f33ee3e1906961f94b54ea37de6737b7ca9d8827c"}, - {file = "coverage-7.4.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677"}, - {file = "coverage-7.4.4.tar.gz", hash = "sha256:c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49"}, + {file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"}, + {file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc"}, + {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d"}, + {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c"}, + {file = "coverage-7.5.3-cp310-cp310-win32.whl", hash = "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84"}, + {file = "coverage-7.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac"}, + {file = "coverage-7.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974"}, + {file = "coverage-7.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807"}, + {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8"}, + {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614"}, + {file = "coverage-7.5.3-cp311-cp311-win32.whl", hash = "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9"}, + {file = "coverage-7.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a"}, + {file = "coverage-7.5.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8"}, + {file = "coverage-7.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db"}, + {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35"}, + {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84"}, + {file = "coverage-7.5.3-cp312-cp312-win32.whl", hash = "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08"}, + {file = "coverage-7.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb"}, + {file = "coverage-7.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb"}, + {file = "coverage-7.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98"}, + {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce"}, + {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0"}, + {file = "coverage-7.5.3-cp38-cp38-win32.whl", hash = "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485"}, + {file = "coverage-7.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56"}, + {file = "coverage-7.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85"}, + {file = "coverage-7.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341"}, + {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303"}, + {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd"}, + {file = "coverage-7.5.3-cp39-cp39-win32.whl", hash = "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d"}, + {file = "coverage-7.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0"}, + {file = "coverage-7.5.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884"}, + {file = "coverage-7.5.3.tar.gz", hash = "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f"}, ] [package.dependencies] @@ -291,13 +291,13 @@ toml = ["tomli"] [[package]] name = "dataclasses-json" -version = "0.6.4" +version = "0.6.7" description = "Easily serialize dataclasses to and from JSON." optional = false -python-versions = ">=3.7,<4.0" +python-versions = "<4.0,>=3.7" files = [ - {file = "dataclasses_json-0.6.4-py3-none-any.whl", hash = "sha256:f90578b8a3177f7552f4e1a6e535e84293cd5da421fcce0642d49c0d7bdf8df2"}, - {file = "dataclasses_json-0.6.4.tar.gz", hash = "sha256:73696ebf24936560cca79a2430cbc4f3dd23ac7bf46ed17f38e5e5e7657a6377"}, + {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, + {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, ] [package.dependencies] @@ -317,13 +317,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [package.extras] @@ -345,13 +345,13 @@ testing = ["hatch", "pre-commit", "pytest", "tox"] [[package]] name = "fastapi" -version = "0.110.1" +version = "0.110.3" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" files = [ - {file = "fastapi-0.110.1-py3-none-any.whl", hash = "sha256:5df913203c482f820d31f48e635e022f8cbfe7350e4830ef05a3163925b1addc"}, - {file = "fastapi-0.110.1.tar.gz", hash = "sha256:6feac43ec359dfe4f45b2c18ec8c94edb8dc2dfc461d417d9e626590c071baad"}, + {file = "fastapi-0.110.3-py3-none-any.whl", hash = "sha256:fd7600612f755e4050beb74001310b5a7e1796d149c2ee363124abdfa0289d32"}, + {file = "fastapi-0.110.3.tar.gz", hash = "sha256:555700b0159379e94fdbfc6bb66a0f1c43f4cf7060f25239af3d84b63a656626"}, ] [package.dependencies] @@ -360,17 +360,17 @@ starlette = ">=0.37.2,<0.38.0" typing-extensions = ">=4.8.0" [package.extras] -all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] [[package]] name = "freezegun" -version = "1.4.0" +version = "1.5.1" description = "Let your Python tests travel through time" optional = false python-versions = ">=3.7" files = [ - {file = "freezegun-1.4.0-py3-none-any.whl", hash = "sha256:55e0fc3c84ebf0a96a5aa23ff8b53d70246479e9a68863f1fcac5a3e52f19dd6"}, - {file = "freezegun-1.4.0.tar.gz", hash = "sha256:10939b0ba0ff5adaecf3b06a5c2f73071d9678e507c5eaedb23c761d56ac774b"}, + {file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"}, + {file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"}, ] [package.dependencies] @@ -456,13 +456,13 @@ files = [ [[package]] name = "marshmallow" -version = "3.21.1" +version = "3.21.3" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.21.1-py3-none-any.whl", hash = "sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633"}, - {file = "marshmallow-3.21.1.tar.gz", hash = "sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3"}, + {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, + {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, ] [package.dependencies] @@ -470,7 +470,7 @@ packaging = ">=17.0" [package.extras] dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==4.0.0)", "sphinx-version-warning (==1.1.2)"] +docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] tests = ["pytest", "pytz", "simplejson"] [[package]] @@ -574,38 +574,38 @@ files = [ [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, + {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, + {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, + {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, + {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, + {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, + {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, + {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, + {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, + {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, + {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, + {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, + {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, + {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, + {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, + {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, + {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, + {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, ] [package.dependencies] @@ -632,58 +632,67 @@ files = [ [[package]] name = "numpy" -version = "1.26.4" +version = "2.0.0" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, + {file = "numpy-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f"}, + {file = "numpy-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2"}, + {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238"}, + {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514"}, + {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196"}, + {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1"}, + {file = "numpy-2.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc"}, + {file = "numpy-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787"}, + {file = "numpy-2.0.0-cp310-cp310-win32.whl", hash = "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98"}, + {file = "numpy-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b"}, + {file = "numpy-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5"}, + {file = "numpy-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289"}, + {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609"}, + {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871"}, + {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4"}, + {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581"}, + {file = "numpy-2.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995"}, + {file = "numpy-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f"}, + {file = "numpy-2.0.0-cp311-cp311-win32.whl", hash = "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f"}, + {file = "numpy-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c"}, + {file = "numpy-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f"}, + {file = "numpy-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85"}, + {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2"}, + {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e"}, + {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2"}, + {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a"}, + {file = "numpy-2.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95"}, + {file = "numpy-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"}, + {file = "numpy-2.0.0-cp312-cp312-win32.whl", hash = "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54"}, + {file = "numpy-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df"}, + {file = "numpy-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de"}, + {file = "numpy-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb"}, + {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f"}, + {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86"}, + {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a"}, + {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d"}, + {file = "numpy-2.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4"}, + {file = "numpy-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44"}, + {file = "numpy-2.0.0-cp39-cp39-win32.whl", hash = "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275"}, + {file = "numpy-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65"}, + {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6"}, + {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a"}, + {file = "numpy-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad"}, + {file = "numpy-2.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9"}, + {file = "numpy-2.0.0.tar.gz", hash = "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864"}, ] [[package]] name = "openai" -version = "1.16.2" +version = "1.34.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.16.2-py3-none-any.whl", hash = "sha256:46a435380921e42dae218d04d6dd0e89a30d7f3b9d8a778d5887f78003cf9354"}, - {file = "openai-1.16.2.tar.gz", hash = "sha256:c93d5efe5b73b6cb72c4cd31823852d2e7c84a138c0af3cbe4a8eb32b1164ab2"}, + {file = "openai-1.34.0-py3-none-any.whl", hash = "sha256:018623c2f795424044675c6230fa3bfbf98d9e0aab45d8fd116f2efb2cfb6b7e"}, + {file = "openai-1.34.0.tar.gz", hash = "sha256:95c8e2da4acd6958e626186957d656597613587195abd0fb2527566a93e76770"}, ] [package.dependencies] @@ -700,73 +709,68 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orjson" -version = "3.10.0" +version = "3.10.5" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.0-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:47af5d4b850a2d1328660661f0881b67fdbe712aea905dadd413bdea6f792c33"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c90681333619d78360d13840c7235fdaf01b2b129cb3a4f1647783b1971542b6"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:400c5b7c4222cb27b5059adf1fb12302eebcabf1978f33d0824aa5277ca899bd"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5dcb32e949eae80fb335e63b90e5808b4b0f64e31476b3777707416b41682db5"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7d507c7493252c0a0264b5cc7e20fa2f8622b8a83b04d819b5ce32c97cf57b"}, - {file = "orjson-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e286a51def6626f1e0cc134ba2067dcf14f7f4b9550f6dd4535fd9d79000040b"}, - {file = "orjson-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8acd4b82a5f3a3ec8b1dc83452941d22b4711964c34727eb1e65449eead353ca"}, - {file = "orjson-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:30707e646080dd3c791f22ce7e4a2fc2438765408547c10510f1f690bd336217"}, - {file = "orjson-3.10.0-cp310-none-win32.whl", hash = "sha256:115498c4ad34188dcb73464e8dc80e490a3e5e88a925907b6fedcf20e545001a"}, - {file = "orjson-3.10.0-cp310-none-win_amd64.whl", hash = "sha256:6735dd4a5a7b6df00a87d1d7a02b84b54d215fb7adac50dd24da5997ffb4798d"}, - {file = "orjson-3.10.0-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9587053e0cefc284e4d1cd113c34468b7d3f17666d22b185ea654f0775316a26"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bef1050b1bdc9ea6c0d08468e3e61c9386723633b397e50b82fda37b3563d72"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d16c6963ddf3b28c0d461641517cd312ad6b3cf303d8b87d5ef3fa59d6844337"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4251964db47ef090c462a2d909f16c7c7d5fe68e341dabce6702879ec26d1134"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73bbbdc43d520204d9ef0817ac03fa49c103c7f9ea94f410d2950755be2c349c"}, - {file = "orjson-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:414e5293b82373606acf0d66313aecb52d9c8c2404b1900683eb32c3d042dbd7"}, - {file = "orjson-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:feaed5bb09877dc27ed0d37f037ddef6cb76d19aa34b108db270d27d3d2ef747"}, - {file = "orjson-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5127478260db640323cea131ee88541cb1a9fbce051f0b22fa2f0892f44da302"}, - {file = "orjson-3.10.0-cp311-none-win32.whl", hash = "sha256:b98345529bafe3c06c09996b303fc0a21961820d634409b8639bc16bd4f21b63"}, - {file = "orjson-3.10.0-cp311-none-win_amd64.whl", hash = "sha256:658ca5cee3379dd3d37dbacd43d42c1b4feee99a29d847ef27a1cb18abdfb23f"}, - {file = "orjson-3.10.0-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4329c1d24fd130ee377e32a72dc54a3c251e6706fccd9a2ecb91b3606fddd998"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef0f19fdfb6553342b1882f438afd53c7cb7aea57894c4490c43e4431739c700"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4f60db24161534764277f798ef53b9d3063092f6d23f8f962b4a97edfa997a0"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1de3fd5c7b208d836f8ecb4526995f0d5877153a4f6f12f3e9bf11e49357de98"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f93e33f67729d460a177ba285002035d3f11425ed3cebac5f6ded4ef36b28344"}, - {file = "orjson-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:237ba922aef472761acd697eef77fef4831ab769a42e83c04ac91e9f9e08fa0e"}, - {file = "orjson-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98c1bfc6a9bec52bc8f0ab9b86cc0874b0299fccef3562b793c1576cf3abb570"}, - {file = "orjson-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:30d795a24be16c03dca0c35ca8f9c8eaaa51e3342f2c162d327bd0225118794a"}, - {file = "orjson-3.10.0-cp312-none-win32.whl", hash = "sha256:6a3f53dc650bc860eb26ec293dfb489b2f6ae1cbfc409a127b01229980e372f7"}, - {file = "orjson-3.10.0-cp312-none-win_amd64.whl", hash = "sha256:983db1f87c371dc6ffc52931eb75f9fe17dc621273e43ce67bee407d3e5476e9"}, - {file = "orjson-3.10.0-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9a667769a96a72ca67237224a36faf57db0c82ab07d09c3aafc6f956196cfa1b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade1e21dfde1d37feee8cf6464c20a2f41fa46c8bcd5251e761903e46102dc6b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:23c12bb4ced1c3308eff7ba5c63ef8f0edb3e4c43c026440247dd6c1c61cea4b"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2d014cf8d4dc9f03fc9f870de191a49a03b1bcda51f2a957943fb9fafe55aac"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eadecaa16d9783affca33597781328e4981b048615c2ddc31c47a51b833d6319"}, - {file = "orjson-3.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd583341218826f48bd7c6ebf3310b4126216920853cbc471e8dbeaf07b0b80e"}, - {file = "orjson-3.10.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:90bfc137c75c31d32308fd61951d424424426ddc39a40e367704661a9ee97095"}, - {file = "orjson-3.10.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13b5d3c795b09a466ec9fcf0bd3ad7b85467d91a60113885df7b8d639a9d374b"}, - {file = "orjson-3.10.0-cp38-none-win32.whl", hash = "sha256:5d42768db6f2ce0162544845facb7c081e9364a5eb6d2ef06cd17f6050b048d8"}, - {file = "orjson-3.10.0-cp38-none-win_amd64.whl", hash = "sha256:33e6655a2542195d6fd9f850b428926559dee382f7a862dae92ca97fea03a5ad"}, - {file = "orjson-3.10.0-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4050920e831a49d8782a1720d3ca2f1c49b150953667eed6e5d63a62e80f46a2"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1897aa25a944cec774ce4a0e1c8e98fb50523e97366c637b7d0cddabc42e6643"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9bf565a69e0082ea348c5657401acec3cbbb31564d89afebaee884614fba36b4"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b6ebc17cfbbf741f5c1a888d1854354536f63d84bee537c9a7c0335791bb9009"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2817877d0b69f78f146ab305c5975d0618df41acf8811249ee64231f5953fee"}, - {file = "orjson-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57d017863ec8aa4589be30a328dacd13c2dc49de1c170bc8d8c8a98ece0f2925"}, - {file = "orjson-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:22c2f7e377ac757bd3476ecb7480c8ed79d98ef89648f0176deb1da5cd014eb7"}, - {file = "orjson-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e62ba42bfe64c60c1bc84799944f80704e996592c6b9e14789c8e2a303279912"}, - {file = "orjson-3.10.0-cp39-none-win32.whl", hash = "sha256:60c0b1bdbccd959ebd1575bd0147bd5e10fc76f26216188be4a36b691c937077"}, - {file = "orjson-3.10.0-cp39-none-win_amd64.whl", hash = "sha256:175a41500ebb2fdf320bf78e8b9a75a1279525b62ba400b2b2444e274c2c8bee"}, - {file = "orjson-3.10.0.tar.gz", hash = "sha256:ba4d8cac5f2e2cff36bea6b6481cdb92b38c202bcec603d6f5ff91960595a1ed"}, + {file = "orjson-3.10.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:545d493c1f560d5ccfc134803ceb8955a14c3fcb47bbb4b2fee0232646d0b932"}, + {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4324929c2dd917598212bfd554757feca3e5e0fa60da08be11b4aa8b90013c1"}, + {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c13ca5e2ddded0ce6a927ea5a9f27cae77eee4c75547b4297252cb20c4d30e6"}, + {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6c8e30adfa52c025f042a87f450a6b9ea29649d828e0fec4858ed5e6caecf63"}, + {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:338fd4f071b242f26e9ca802f443edc588fa4ab60bfa81f38beaedf42eda226c"}, + {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6970ed7a3126cfed873c5d21ece1cd5d6f83ca6c9afb71bbae21a0b034588d96"}, + {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:235dadefb793ad12f7fa11e98a480db1f7c6469ff9e3da5e73c7809c700d746b"}, + {file = "orjson-3.10.5-cp310-none-win32.whl", hash = "sha256:be79e2393679eda6a590638abda16d167754393f5d0850dcbca2d0c3735cebe2"}, + {file = "orjson-3.10.5-cp310-none-win_amd64.whl", hash = "sha256:c4a65310ccb5c9910c47b078ba78e2787cb3878cdded1702ac3d0da71ddc5228"}, + {file = "orjson-3.10.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cdf7365063e80899ae3a697def1277c17a7df7ccfc979990a403dfe77bb54d40"}, + {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b68742c469745d0e6ca5724506858f75e2f1e5b59a4315861f9e2b1df77775a"}, + {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d10cc1b594951522e35a3463da19e899abe6ca95f3c84c69e9e901e0bd93d38"}, + {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcbe82b35d1ac43b0d84072408330fd3295c2896973112d495e7234f7e3da2e1"}, + {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c0eb7e0c75e1e486c7563fe231b40fdd658a035ae125c6ba651ca3b07936f5"}, + {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:53ed1c879b10de56f35daf06dbc4a0d9a5db98f6ee853c2dbd3ee9d13e6f302f"}, + {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:099e81a5975237fda3100f918839af95f42f981447ba8f47adb7b6a3cdb078fa"}, + {file = "orjson-3.10.5-cp311-none-win32.whl", hash = "sha256:1146bf85ea37ac421594107195db8bc77104f74bc83e8ee21a2e58596bfb2f04"}, + {file = "orjson-3.10.5-cp311-none-win_amd64.whl", hash = "sha256:36a10f43c5f3a55c2f680efe07aa93ef4a342d2960dd2b1b7ea2dd764fe4a37c"}, + {file = "orjson-3.10.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:68f85ecae7af14a585a563ac741b0547a3f291de81cd1e20903e79f25170458f"}, + {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28afa96f496474ce60d3340fe8d9a263aa93ea01201cd2bad844c45cd21f5268"}, + {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cd684927af3e11b6e754df80b9ffafd9fb6adcaa9d3e8fdd5891be5a5cad51e"}, + {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d21b9983da032505f7050795e98b5d9eee0df903258951566ecc358f6696969"}, + {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ad1de7fef79736dde8c3554e75361ec351158a906d747bd901a52a5c9c8d24b"}, + {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d97531cdfe9bdd76d492e69800afd97e5930cb0da6a825646667b2c6c6c0211"}, + {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d69858c32f09c3e1ce44b617b3ebba1aba030e777000ebdf72b0d8e365d0b2b3"}, + {file = "orjson-3.10.5-cp312-none-win32.whl", hash = "sha256:64c9cc089f127e5875901ac05e5c25aa13cfa5dbbbd9602bda51e5c611d6e3e2"}, + {file = "orjson-3.10.5-cp312-none-win_amd64.whl", hash = "sha256:b2efbd67feff8c1f7728937c0d7f6ca8c25ec81373dc8db4ef394c1d93d13dc5"}, + {file = "orjson-3.10.5-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:03b565c3b93f5d6e001db48b747d31ea3819b89abf041ee10ac6988886d18e01"}, + {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:584c902ec19ab7928fd5add1783c909094cc53f31ac7acfada817b0847975f26"}, + {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a35455cc0b0b3a1eaf67224035f5388591ec72b9b6136d66b49a553ce9eb1e6"}, + {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1670fe88b116c2745a3a30b0f099b699a02bb3482c2591514baf5433819e4f4d"}, + {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:185c394ef45b18b9a7d8e8f333606e2e8194a50c6e3c664215aae8cf42c5385e"}, + {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ca0b3a94ac8d3886c9581b9f9de3ce858263865fdaa383fbc31c310b9eac07c9"}, + {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dfc91d4720d48e2a709e9c368d5125b4b5899dced34b5400c3837dadc7d6271b"}, + {file = "orjson-3.10.5-cp38-none-win32.whl", hash = "sha256:c05f16701ab2a4ca146d0bca950af254cb7c02f3c01fca8efbbad82d23b3d9d4"}, + {file = "orjson-3.10.5-cp38-none-win_amd64.whl", hash = "sha256:8a11d459338f96a9aa7f232ba95679fc0c7cedbd1b990d736467894210205c09"}, + {file = "orjson-3.10.5-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:85c89131d7b3218db1b24c4abecea92fd6c7f9fab87441cfc342d3acc725d807"}, + {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66215277a230c456f9038d5e2d84778141643207f85336ef8d2a9da26bd7ca"}, + {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51bbcdea96cdefa4a9b4461e690c75ad4e33796530d182bdd5c38980202c134a"}, + {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbead71dbe65f959b7bd8cf91e0e11d5338033eba34c114f69078d59827ee139"}, + {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df58d206e78c40da118a8c14fc189207fffdcb1f21b3b4c9c0c18e839b5a214"}, + {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c4057c3b511bb8aef605616bd3f1f002a697c7e4da6adf095ca5b84c0fd43595"}, + {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b39e006b00c57125ab974362e740c14a0c6a66ff695bff44615dcf4a70ce2b86"}, + {file = "orjson-3.10.5-cp39-none-win32.whl", hash = "sha256:eded5138cc565a9d618e111c6d5c2547bbdd951114eb822f7f6309e04db0fb47"}, + {file = "orjson-3.10.5-cp39-none-win_amd64.whl", hash = "sha256:cc28e90a7cae7fcba2493953cff61da5a52950e78dc2dacfe931a317ee3d8de7"}, + {file = "orjson-3.10.5.tar.gz", hash = "sha256:7a5baef8a4284405d96c90c7c62b755e9ef1ada84c2406c24a9ebec86b89f46d"}, ] [[package]] name = "packaging" -version = "24.0" +version = "24.1" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, - {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] @@ -797,28 +801,29 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, - {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] @@ -855,47 +860,47 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pydantic" -version = "1.10.15" +version = "1.10.16" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"}, - {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"}, - {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50f1666a9940d3d68683c9d96e39640f709d7a72ff8702987dab1761036206bb"}, - {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82790d4753ee5d00739d6cb5cf56bceb186d9d6ce134aca3ba7befb1eedbc2c8"}, - {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d207d5b87f6cbefbdb1198154292faee8017d7495a54ae58db06762004500d00"}, - {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e49db944fad339b2ccb80128ffd3f8af076f9f287197a480bf1e4ca053a866f0"}, - {file = "pydantic-1.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:d3b5c4cbd0c9cb61bbbb19ce335e1f8ab87a811f6d589ed52b0254cf585d709c"}, - {file = "pydantic-1.10.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c3d5731a120752248844676bf92f25a12f6e45425e63ce22e0849297a093b5b0"}, - {file = "pydantic-1.10.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c365ad9c394f9eeffcb30a82f4246c0006417f03a7c0f8315d6211f25f7cb654"}, - {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3287e1614393119c67bd4404f46e33ae3be3ed4cd10360b48d0a4459f420c6a3"}, - {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be51dd2c8596b25fe43c0a4a59c2bee4f18d88efb8031188f9e7ddc6b469cf44"}, - {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6a51a1dd4aa7b3f1317f65493a182d3cff708385327c1c82c81e4a9d6d65b2e4"}, - {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4e316e54b5775d1eb59187f9290aeb38acf620e10f7fd2f776d97bb788199e53"}, - {file = "pydantic-1.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:0d142fa1b8f2f0ae11ddd5e3e317dcac060b951d605fda26ca9b234b92214986"}, - {file = "pydantic-1.10.15-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7ea210336b891f5ea334f8fc9f8f862b87acd5d4a0cbc9e3e208e7aa1775dabf"}, - {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3453685ccd7140715e05f2193d64030101eaad26076fad4e246c1cc97e1bb30d"}, - {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bea1f03b8d4e8e86702c918ccfd5d947ac268f0f0cc6ed71782e4b09353b26f"}, - {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:005655cabc29081de8243126e036f2065bd7ea5b9dff95fde6d2c642d39755de"}, - {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:af9850d98fc21e5bc24ea9e35dd80a29faf6462c608728a110c0a30b595e58b7"}, - {file = "pydantic-1.10.15-cp37-cp37m-win_amd64.whl", hash = "sha256:d31ee5b14a82c9afe2bd26aaa405293d4237d0591527d9129ce36e58f19f95c1"}, - {file = "pydantic-1.10.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5e09c19df304b8123938dc3c53d3d3be6ec74b9d7d0d80f4f4b5432ae16c2022"}, - {file = "pydantic-1.10.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7ac9237cd62947db00a0d16acf2f3e00d1ae9d3bd602b9c415f93e7a9fc10528"}, - {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:584f2d4c98ffec420e02305cf675857bae03c9d617fcfdc34946b1160213a948"}, - {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbc6989fad0c030bd70a0b6f626f98a862224bc2b1e36bfc531ea2facc0a340c"}, - {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d573082c6ef99336f2cb5b667b781d2f776d4af311574fb53d908517ba523c22"}, - {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6bd7030c9abc80134087d8b6e7aa957e43d35714daa116aced57269a445b8f7b"}, - {file = "pydantic-1.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:3350f527bb04138f8aff932dc828f154847fbdc7a1a44c240fbfff1b57f49a12"}, - {file = "pydantic-1.10.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:51d405b42f1b86703555797270e4970a9f9bd7953f3990142e69d1037f9d9e51"}, - {file = "pydantic-1.10.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a980a77c52723b0dc56640ced396b73a024d4b74f02bcb2d21dbbac1debbe9d0"}, - {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67f1a1fb467d3f49e1708a3f632b11c69fccb4e748a325d5a491ddc7b5d22383"}, - {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:676ed48f2c5bbad835f1a8ed8a6d44c1cd5a21121116d2ac40bd1cd3619746ed"}, - {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:92229f73400b80c13afcd050687f4d7e88de9234d74b27e6728aa689abcf58cc"}, - {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2746189100c646682eff0bce95efa7d2e203420d8e1c613dc0c6b4c1d9c1fde4"}, - {file = "pydantic-1.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:394f08750bd8eaad714718812e7fab615f873b3cdd0b9d84e76e51ef3b50b6b7"}, - {file = "pydantic-1.10.15-py3-none-any.whl", hash = "sha256:28e552a060ba2740d0d2aabe35162652c1459a0b9069fe0db7f4ee0e18e74d58"}, - {file = "pydantic-1.10.15.tar.gz", hash = "sha256:ca832e124eda231a60a041da4f013e3ff24949d94a01154b137fc2f2a43c3ffb"}, + {file = "pydantic-1.10.16-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1a539ac40551b01a85e899829aa43ca8036707474af8d74b48be288d4d2d2846"}, + {file = "pydantic-1.10.16-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a4fcc7b0b8038dbda2dda642cff024032dfae24a7960cc58e57a39eb1949b9b"}, + {file = "pydantic-1.10.16-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4660dd697de1ae2d4305a85161312611f64d5360663a9ba026cd6ad9e3fe14c3"}, + {file = "pydantic-1.10.16-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:900a787c574f903a97d0bf52a43ff3b6cf4fa0119674bcfc0e5fd1056d388ad9"}, + {file = "pydantic-1.10.16-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d30192a63e6d3334c3f0c0506dd6ae9f1dce7b2f8845518915291393a5707a22"}, + {file = "pydantic-1.10.16-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:16cf23ed599ca5ca937e37ba50ab114e6b5c387eb43a6cc533701605ad1be611"}, + {file = "pydantic-1.10.16-cp310-cp310-win_amd64.whl", hash = "sha256:8d23111f41d1e19334edd51438fd57933f3eee7d9d2fa8cc3f5eda515a272055"}, + {file = "pydantic-1.10.16-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef287b8d7fc0e86a8bd1f902c61aff6ba9479c50563242fe88ba39692e98e1e0"}, + {file = "pydantic-1.10.16-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b9ded699bfd3b3912d796ff388b0c607e6d35d41053d37aaf8fd6082c660de9a"}, + {file = "pydantic-1.10.16-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:daeb199814333e4426c5e86d7fb610f4e230289f28cab90eb4de27330bef93cf"}, + {file = "pydantic-1.10.16-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5973843f1fa99ec6c3ac8d1a8698ac9340b35e45cca6c3e5beb5c3bd1ef15de6"}, + {file = "pydantic-1.10.16-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6b8a7788a8528a558828fe4a48783cafdcf2612d13c491594a8161dc721629c"}, + {file = "pydantic-1.10.16-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8abaecf54dacc9d991dda93c3b880d41092a8924cde94eeb811d7d9ab55df7d8"}, + {file = "pydantic-1.10.16-cp311-cp311-win_amd64.whl", hash = "sha256:ddc7b682fbd23f051edc419dc6977e11dd2dbdd0cef9d05f0e15d1387862d230"}, + {file = "pydantic-1.10.16-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:067c2b5539f7839653ad8c3d1fc2f1343338da8677b7b2172abf3cd3fdc8f719"}, + {file = "pydantic-1.10.16-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d1fc943583c046ecad0ff5d6281ee571b64e11b5503d9595febdce54f38b290"}, + {file = "pydantic-1.10.16-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18548b30ccebe71d380b0886cc44ea5d80afbcc155e3518792f13677ad06097d"}, + {file = "pydantic-1.10.16-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4e92292f9580fc5ea517618580fac24e9f6dc5657196e977c194a8e50e14f5a9"}, + {file = "pydantic-1.10.16-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5da8bc4bb4f85b8c97cc7f11141fddbbd29eb25e843672e5807e19cc3d7c1b7f"}, + {file = "pydantic-1.10.16-cp37-cp37m-win_amd64.whl", hash = "sha256:a04ee1ea34172b87707a6ecfcdb120d7656892206b7c4dbdb771a73e90179fcb"}, + {file = "pydantic-1.10.16-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4fa86469fd46e732242c7acb83282d33f83591a7e06f840481327d5bf6d96112"}, + {file = "pydantic-1.10.16-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:89c2783dc261726fe7a5ce1121bce29a2f7eb9b1e704c68df2b117604e3b346f"}, + {file = "pydantic-1.10.16-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78e59fa919fa7a192f423d190d8660c35dd444efa9216662273f36826765424b"}, + {file = "pydantic-1.10.16-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7e82a80068c77f4b074032e031e642530b6d45cb8121fc7c99faa31fb6c6b72"}, + {file = "pydantic-1.10.16-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d82d5956cee27a30e26a5b88d00a6a2a15a4855e13c9baf50175976de0dc282c"}, + {file = "pydantic-1.10.16-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b7b99424cc0970ff08deccb549b5a6ec1040c0b449eab91723e64df2bd8fdca"}, + {file = "pydantic-1.10.16-cp38-cp38-win_amd64.whl", hash = "sha256:d97a35e1ba59442775201657171f601a2879e63517a55862a51f8d67cdfc0017"}, + {file = "pydantic-1.10.16-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9d91f6866fd3e303c632207813ef6bc4d86055e21c5e5a0a311983a9ac5f0192"}, + {file = "pydantic-1.10.16-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d8d3c71d14c8bd26d2350c081908dbf59d5a6a8f9596d9ef2b09cc1e61c8662b"}, + {file = "pydantic-1.10.16-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b73e6386b439b4881d79244e9fc1e32d1e31e8d784673f5d58a000550c94a6c0"}, + {file = "pydantic-1.10.16-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5f039881fb2ef86f6de6eacce6e71701b47500355738367413ccc1550b2a69cf"}, + {file = "pydantic-1.10.16-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3895ddb26f22bdddee7e49741486aa7b389258c6f6771943e87fc00eabd79134"}, + {file = "pydantic-1.10.16-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:55b945da2756b5cef93d792521ad0d457fdf2f69fd5a2d10a27513f5281717dd"}, + {file = "pydantic-1.10.16-cp39-cp39-win_amd64.whl", hash = "sha256:22dd265c77c3976a34be78409b128cb84629284dfd1b69d2fa1507a36f84dc8b"}, + {file = "pydantic-1.10.16-py3-none-any.whl", hash = "sha256:aa2774ba5412fd1c5cb890d08e8b0a3bb5765898913ba1f61a65a4810f03cf29"}, + {file = "pydantic-1.10.16.tar.gz", hash = "sha256:8bb388f6244809af69ee384900b10b677a69f1980fdc655ea419710cffcb5610"}, ] [package.dependencies] @@ -929,13 +934,13 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no [[package]] name = "pytest-asyncio" -version = "0.21.1" +version = "0.21.2" description = "Pytest support for asyncio" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"}, - {file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"}, + {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, + {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, ] [package.dependencies] @@ -1024,18 +1029,18 @@ watchdog = ">=2.0.0" [[package]] name = "pytest-xdist" -version = "3.5.0" +version = "3.6.1" description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-xdist-3.5.0.tar.gz", hash = "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a"}, - {file = "pytest_xdist-3.5.0-py3-none-any.whl", hash = "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"}, + {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, + {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, ] [package.dependencies] -execnet = ">=1.1" -pytest = ">=6.2.0" +execnet = ">=2.1" +pytest = ">=7.0.0" [package.extras] psutil = ["psutil (>=3.0)"] @@ -1068,7 +1073,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1076,16 +1080,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1102,7 +1098,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1110,7 +1105,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -1118,13 +1112,13 @@ files = [ [[package]] name = "requests" -version = "2.32.2" +version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" files = [ - {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, - {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -1139,28 +1133,28 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.3.5" +version = "0.3.7" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:aef5bd3b89e657007e1be6b16553c8813b221ff6d92c7526b7e0227450981eac"}, - {file = "ruff-0.3.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:89b1e92b3bd9fca249153a97d23f29bed3992cff414b222fcd361d763fc53f12"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e55771559c89272c3ebab23326dc23e7f813e492052391fe7950c1a5a139d89"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dabc62195bf54b8a7876add6e789caae0268f34582333cda340497c886111c39"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a05f3793ba25f194f395578579c546ca5d83e0195f992edc32e5907d142bfa3"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dfd3504e881082959b4160ab02f7a205f0fadc0a9619cc481982b6837b2fd4c0"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87258e0d4b04046cf1d6cc1c56fadbf7a880cc3de1f7294938e923234cf9e498"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:712e71283fc7d9f95047ed5f793bc019b0b0a29849b14664a60fd66c23b96da1"}, - {file = "ruff-0.3.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a532a90b4a18d3f722c124c513ffb5e5eaff0cc4f6d3aa4bda38e691b8600c9f"}, - {file = "ruff-0.3.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:122de171a147c76ada00f76df533b54676f6e321e61bd8656ae54be326c10296"}, - {file = "ruff-0.3.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d80a6b18a6c3b6ed25b71b05eba183f37d9bc8b16ace9e3d700997f00b74660b"}, - {file = "ruff-0.3.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a7b6e63194c68bca8e71f81de30cfa6f58ff70393cf45aab4c20f158227d5936"}, - {file = "ruff-0.3.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:a759d33a20c72f2dfa54dae6e85e1225b8e302e8ac655773aff22e542a300985"}, - {file = "ruff-0.3.5-py3-none-win32.whl", hash = "sha256:9d8605aa990045517c911726d21293ef4baa64f87265896e491a05461cae078d"}, - {file = "ruff-0.3.5-py3-none-win_amd64.whl", hash = "sha256:dc56bb16a63c1303bd47563c60482a1512721053d93231cf7e9e1c6954395a0e"}, - {file = "ruff-0.3.5-py3-none-win_arm64.whl", hash = "sha256:faeeae9905446b975dcf6d4499dc93439b131f1443ee264055c5716dd947af55"}, - {file = "ruff-0.3.5.tar.gz", hash = "sha256:a067daaeb1dc2baf9b82a32dae67d154d95212080c80435eb052d95da647763d"}, + {file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0e8377cccb2f07abd25e84fc5b2cbe48eeb0fea9f1719cad7caedb061d70e5ce"}, + {file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:15a4d1cc1e64e556fa0d67bfd388fed416b7f3b26d5d1c3e7d192c897e39ba4b"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d28bdf3d7dc71dd46929fafeec98ba89b7c3550c3f0978e36389b5631b793663"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:379b67d4f49774ba679593b232dcd90d9e10f04d96e3c8ce4a28037ae473f7bb"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c060aea8ad5ef21cdfbbe05475ab5104ce7827b639a78dd55383a6e9895b7c51"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ebf8f615dde968272d70502c083ebf963b6781aacd3079081e03b32adfe4d58a"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d48098bd8f5c38897b03604f5428901b65e3c97d40b3952e38637b5404b739a2"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8a4fda219bf9024692b1bc68c9cff4b80507879ada8769dc7e985755d662ea"}, + {file = "ruff-0.3.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c44e0149f1d8b48c4d5c33d88c677a4aa22fd09b1683d6a7ff55b816b5d074f"}, + {file = "ruff-0.3.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3050ec0af72b709a62ecc2aca941b9cd479a7bf2b36cc4562f0033d688e44fa1"}, + {file = "ruff-0.3.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a29cc38e4c1ab00da18a3f6777f8b50099d73326981bb7d182e54a9a21bb4ff7"}, + {file = "ruff-0.3.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5b15cc59c19edca917f51b1956637db47e200b0fc5e6e1878233d3a938384b0b"}, + {file = "ruff-0.3.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e491045781b1e38b72c91247cf4634f040f8d0cb3e6d3d64d38dcf43616650b4"}, + {file = "ruff-0.3.7-py3-none-win32.whl", hash = "sha256:bc931de87593d64fad3a22e201e55ad76271f1d5bfc44e1a1887edd0903c7d9f"}, + {file = "ruff-0.3.7-py3-none-win_amd64.whl", hash = "sha256:5ef0e501e1e39f35e03c2acb1d1238c595b8bb36cf7a170e7c1df1b73da00e74"}, + {file = "ruff-0.3.7-py3-none-win_arm64.whl", hash = "sha256:789e144f6dc7019d1f92a812891c645274ed08af6037d11fc65fcbc183b7d59f"}, + {file = "ruff-0.3.7.tar.gz", hash = "sha256:d5c1aebee5162c2226784800ae031f660c350e7a3402c4d1f8ea4e97e232e3ba"}, ] [[package]] @@ -1216,13 +1210,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.3" +version = "4.66.4" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.3-py3-none-any.whl", hash = "sha256:4f41d54107ff9a223dca80b53efe4fb654c67efaba7f47bada3ee9d50e05bd53"}, - {file = "tqdm-4.66.3.tar.gz", hash = "sha256:23097a41eba115ba99ecae40d06444c15d1c0c698d527a01c6c8bd1c5d0647e5"}, + {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, + {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, ] [package.dependencies] @@ -1236,24 +1230,24 @@ telegram = ["requests"] [[package]] name = "types-psutil" -version = "5.9.5.20240316" +version = "5.9.5.20240516" description = "Typing stubs for psutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-psutil-5.9.5.20240316.tar.gz", hash = "sha256:5636f5714bb930c64bb34c4d47a59dc92f9d610b778b5364a31daa5584944848"}, - {file = "types_psutil-5.9.5.20240316-py3-none-any.whl", hash = "sha256:2fdd64ea6e97befa546938f486732624f9255fde198b55e6f00fda236f059f64"}, + {file = "types-psutil-5.9.5.20240516.tar.gz", hash = "sha256:bb296f59fc56458891d0feb1994717e548a1bcf89936a2877df8792b822b4696"}, + {file = "types_psutil-5.9.5.20240516-py3-none-any.whl", hash = "sha256:83146ded949a10167d9895e567b3b71e53ebc5e23fd8363eab62b3c76cce7b89"}, ] [[package]] name = "types-pytz" -version = "2024.1.0.20240203" +version = "2024.1.0.20240417" description = "Typing stubs for pytz" optional = false python-versions = ">=3.8" files = [ - {file = "types-pytz-2024.1.0.20240203.tar.gz", hash = "sha256:c93751ee20dfc6e054a0148f8f5227b9a00b79c90a4d3c9f464711a73179c89e"}, - {file = "types_pytz-2024.1.0.20240203-py3-none-any.whl", hash = "sha256:9679eef0365db3af91ef7722c199dbb75ee5c1b67e3c4dd7bfbeb1b8a71c21a3"}, + {file = "types-pytz-2024.1.0.20240417.tar.gz", hash = "sha256:6810c8a1f68f21fdf0f4f374a432487c77645a0ac0b31de4bf4690cf21ad3981"}, + {file = "types_pytz-2024.1.0.20240417-py3-none-any.whl", hash = "sha256:8335d443310e2db7b74e007414e74c4f53b67452c0cb0d228ca359ccfba59659"}, ] [[package]] @@ -1281,15 +1275,29 @@ files = [ [package.dependencies] types-urllib3 = "*" +[[package]] +name = "types-requests" +version = "2.32.0.20240602" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-requests-2.32.0.20240602.tar.gz", hash = "sha256:3f98d7bbd0dd94ebd10ff43a7fbe20c3b8528acace6d8efafef0b6a184793f06"}, + {file = "types_requests-2.32.0.20240602-py3-none-any.whl", hash = "sha256:ed3946063ea9fbc6b5fc0c44fa279188bae42d582cb63760be6cb4b9d06c3de8"}, +] + +[package.dependencies] +urllib3 = ">=2" + [[package]] name = "types-tqdm" -version = "4.66.0.20240106" +version = "4.66.0.20240417" description = "Typing stubs for tqdm" optional = false python-versions = ">=3.8" files = [ - {file = "types-tqdm-4.66.0.20240106.tar.gz", hash = "sha256:7acf4aade5bad3ded76eb829783f9961b1c2187948eaa6dd1ae8644dff95a938"}, - {file = "types_tqdm-4.66.0.20240106-py3-none-any.whl", hash = "sha256:7459b0f441b969735685645a5d8480f7912b10d05ab45f99a2db8a8e45cb550b"}, + {file = "types-tqdm-4.66.0.20240417.tar.gz", hash = "sha256:16dce9ef522ea8d40e4f5b8d84dd8a1166eefc13ceee7a7e158bf0f1a1421a31"}, + {file = "types_tqdm-4.66.0.20240417-py3-none-any.whl", hash = "sha256:248aef1f9986b7b8c2c12b3cb4399fc17dba0a29e7e3f3f9cd704babb879383d"}, ] [[package]] @@ -1305,13 +1313,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] @@ -1345,6 +1353,23 @@ brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotl secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +[[package]] +name = "urllib3" +version = "2.2.2" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + [[package]] name = "uvicorn" version = "0.29.0" @@ -1385,40 +1410,43 @@ tests = ["Werkzeug (==2.0.3)", "aiohttp", "boto3", "httplib2", "httpx", "pytest" [[package]] name = "watchdog" -version = "4.0.0" +version = "4.0.1" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, - {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, - {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, - {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, - {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, - {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, - {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, - {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, + {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, + {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, + {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, + {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, ] [package.extras] @@ -1612,4 +1640,4 @@ vcr = [] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "6269ec3de2038c48d77b23a46c45ee95161c2bc33f5371570fd234a8656e8218" +content-hash = "b88660bba119846142d1a2e19b7a3c7a077eefe3e54763bdb1c14224ff098c88" diff --git a/python/pyproject.toml b/python/pyproject.toml index a5a31a0ea..afd1d8247 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -53,6 +53,7 @@ vcrpy = "^6.0.1" fastapi = "^0.110.1" uvicorn = "^0.29.0" pytest-rerunfailures = "^14.0" +pytest-socket = "^0.7.0" [tool.poetry.group.lint.dependencies] openai = "^1.10" From 31c3a4068fead0b090396d53e31fbc65ff23e8ff Mon Sep 17 00:00:00 2001 From: Jon <49285755+AtomicJon@users.noreply.github.com> Date: Wed, 19 Jun 2024 13:32:01 -0400 Subject: [PATCH 154/373] feat(js): add support for updating a dataset (#800) --- js/src/client.ts | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/js/src/client.ts b/js/src/client.ts index 80a7d3caf..50cecdcea 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -1947,6 +1947,45 @@ export class Client { } } + /** + * Update a dataset + * @param props The dataset details to update + * @returns The updated dataset + */ + public async updateDataset(props: { + datasetId?: string; + datasetName?: string; + name?: string; + description?: string; + }): Promise { + const { datasetId, datasetName, ...update } = props; + + if (!datasetId && !datasetName) { + throw new Error("Must provide either datasetName or datasetId"); + } + const _datasetId = + datasetId ?? (await this.readDataset({ datasetName })).id; + assertUuid(_datasetId); + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/datasets/${_datasetId}`, + { + method: "PATCH", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(update), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + if (!response.ok) { + throw new Error( + `Failed to update dataset ${_datasetId}: ${response.status} ${response.statusText}` + ); + } + return (await response.json()) as Dataset; + } + public async deleteDataset({ datasetId, datasetName, From 80a18a77874bfab94e69651b2d2936cc49851478 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 7 Jun 2024 17:56:48 +0200 Subject: [PATCH 155/373] feat(js): add anonymizer API --- js/.gitignore | 4 + js/package.json | 15 ++++ js/scripts/create-entrypoints.js | 2 + js/src/anonymizer/index.ts | 136 +++++++++++++++++++++++++++++++ js/src/tests/anonymizer.test.ts | 72 ++++++++++++++++ js/tsconfig.json | 1 + js/yarn.lock | 17 ++++ 7 files changed, 247 insertions(+) create mode 100644 js/src/anonymizer/index.ts create mode 100644 js/src/tests/anonymizer.test.ts diff --git a/js/.gitignore b/js/.gitignore index 5864f6933..902b3f759 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -63,6 +63,10 @@ Chinook_Sqlite.sql /wrappers.js /wrappers.d.ts /wrappers.d.cts +/anonymizer.cjs +/anonymizer.js +/anonymizer.d.ts +/anonymizer.d.cts /wrappers/openai.cjs /wrappers/openai.js /wrappers/openai.d.ts diff --git a/js/package.json b/js/package.json index b2972645d..179febc17 100644 --- a/js/package.json +++ b/js/package.json @@ -37,6 +37,10 @@ "wrappers.js", "wrappers.d.ts", "wrappers.d.cts", + "anonymizer.cjs", + "anonymizer.js", + "anonymizer.d.ts", + "anonymizer.d.cts", "wrappers/openai.cjs", "wrappers/openai.js", "wrappers/openai.d.ts", @@ -91,6 +95,7 @@ "dependencies": { "@types/uuid": "^9.0.1", "commander": "^10.0.1", + "lodash.set": "^4.3.2", "p-queue": "^6.6.2", "p-retry": "4", "uuid": "^9.0.0" @@ -104,6 +109,7 @@ "@langchain/langgraph": "^0.0.19", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", + "@types/lodash.set": "^4.3.9", "@typescript-eslint/eslint-plugin": "^5.59.8", "@typescript-eslint/parser": "^5.59.8", "babel-jest": "^29.5.0", @@ -228,6 +234,15 @@ "import": "./wrappers.js", "require": "./wrappers.cjs" }, + "./anonymizer": { + "types": { + "import": "./anonymizer.d.ts", + "require": "./anonymizer.d.cts", + "default": "./anonymizer.d.ts" + }, + "import": "./anonymizer.js", + "require": "./anonymizer.cjs" + }, "./wrappers/openai": { "types": { "import": "./wrappers/openai.d.ts", diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index 1c571c14f..61d7341ab 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -15,9 +15,11 @@ const entrypoints = { schemas: "schemas", langchain: "langchain", wrappers: "wrappers/index", + anonymizer: "anonymizer/index", "wrappers/openai": "wrappers/openai", "singletons/traceable": "singletons/traceable", }; + const updateJsonFile = (relativePath, updateFunction) => { const contents = fs.readFileSync(relativePath).toString(); const res = updateFunction(JSON.parse(contents)); diff --git a/js/src/anonymizer/index.ts b/js/src/anonymizer/index.ts new file mode 100644 index 000000000..6e49e09bb --- /dev/null +++ b/js/src/anonymizer/index.ts @@ -0,0 +1,136 @@ +import set from "lodash.set"; + +export interface StringNode { + value: string; + path: string; +} + +function extractStringNodes(data: unknown, options: { maxDepth?: number }) { + const parsedOptions = { ...options, maxDepth: options.maxDepth ?? 10 }; + + const queue: [value: unknown, depth: number, path: string][] = [ + [data, 0, ""], + ]; + + const result: StringNode[] = []; + while (queue.length > 0) { + const task = queue.shift(); + if (task == null) continue; + const [value, depth, path] = task; + if (typeof value === "object" && value != null) { + if (depth >= parsedOptions.maxDepth) continue; + for (const [key, nestedValue] of Object.entries(value)) { + queue.push([nestedValue, depth + 1, path ? `${path}.${key}` : key]); + } + } else if (Array.isArray(value)) { + if (depth >= parsedOptions.maxDepth) continue; + for (let i = 0; i < value.length; i++) { + queue.push([value[i], depth + 1, `${path}[${i}]`]); + } + } else if (typeof value === "string") { + result.push({ value, path }); + } + } + + return result; +} + +function deepClone(data: T): T { + if ("structuredClone" in globalThis) { + return globalThis.structuredClone(data); + } + + return JSON.parse(JSON.stringify(data)); +} + +export interface StringNodeProcessor { + maskNodes: (nodes: StringNode[]) => StringNode[]; +} + +export interface StringNodeRule { + type?: "pattern"; + pattern: RegExp | string; + replace?: string; +} + +export type ReplacerType = + | ((value: string, path?: string) => string) + | StringNodeRule[] + | StringNodeProcessor; + +export function replaceSensitiveData( + data: T, + replacer: ReplacerType, + options?: { + maxDepth?: number; + deepClone?: boolean; + } +): T { + const nodes = extractStringNodes(data, { + maxDepth: options?.maxDepth, + }); + + // by default we opt-in to mutate the value directly + // to improve performance + let mutateValue = options?.deepClone ? deepClone(data) : data; + + const processor: StringNodeProcessor = Array.isArray(replacer) + ? (() => { + const replacers: [regex: RegExp, replace: string][] = replacer.map( + ({ pattern, type, replace }) => { + if (type != null && type !== "pattern") + throw new Error("Invalid anonymizer type"); + return [ + typeof pattern === "string" ? new RegExp(pattern, "g") : pattern, + replace ?? "[redacted]", + ]; + } + ); + + if (replacers.length === 0) throw new Error("No replacers provided"); + return { + maskNodes: (nodes: StringNode[]) => { + return nodes.reduce((memo, item) => { + const newValue = replacers.reduce((value, [regex, replace]) => { + const result = value.replace(regex, replace); + + // make sure we reset the state of regex + regex.lastIndex = 0; + + return result; + }, item.value); + + if (newValue !== item.value) { + memo.push({ value: newValue, path: item.path }); + } + + return memo; + }, []); + }, + }; + })() + : typeof replacer === "function" + ? { + maskNodes: (nodes: StringNode[]) => + nodes.reduce((memo, item) => { + const newValue = replacer(item.value, item.path); + if (newValue !== item.value) { + memo.push({ value: newValue, path: item.path }); + } + + return memo; + }, []), + } + : replacer; + + const toUpdate = processor.maskNodes(nodes); + for (const node of toUpdate) { + if (node.path === "") { + mutateValue = node.value as unknown as T; + } else { + set(mutateValue as unknown as object, node.path, node.value); + } + } + + return mutateValue; +} diff --git a/js/src/tests/anonymizer.test.ts b/js/src/tests/anonymizer.test.ts new file mode 100644 index 000000000..28e41ebec --- /dev/null +++ b/js/src/tests/anonymizer.test.ts @@ -0,0 +1,72 @@ +import { StringNodeRule, replaceSensitiveData } from "../anonymizer/index.js"; +import { v4 as uuid } from "uuid"; + +const EMAIL_REGEX = /[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+.[a-zA-Z]{2,}/g; +const UUID_REGEX = + /[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}/g; + +describe("replacer", () => { + const replacer = (text: string) => + text.replace(EMAIL_REGEX, "[email address]").replace(UUID_REGEX, "[uuid]"); + + test("object", () => { + expect( + replaceSensitiveData( + { + message: "Hello, this is my email: hello@example.com", + metadata: uuid(), + }, + replacer + ) + ).toEqual({ + message: "Hello, this is my email: [email address]", + metadata: "[uuid]", + }); + }); + + test("array", () => { + expect( + replaceSensitiveData(["human", "hello@example.com"], replacer) + ).toEqual(["human", "[email address]"]); + }); + + test("string", () => { + expect(replaceSensitiveData("hello@example.com", replacer)).toEqual( + "[email address]" + ); + }); +}); + +describe("declared", () => { + const replacers: StringNodeRule[] = [ + { pattern: EMAIL_REGEX, replace: "[email address]" }, + { pattern: UUID_REGEX, replace: "[uuid]" }, + ]; + + test("object", () => { + expect( + replaceSensitiveData( + { + message: "Hello, this is my email: hello@example.com", + metadata: uuid(), + }, + replacers + ) + ).toEqual({ + message: "Hello, this is my email: [email address]", + metadata: "[uuid]", + }); + }); + + test("array", () => { + expect( + replaceSensitiveData(["human", "hello@example.com"], replacers) + ).toEqual(["human", "[email address]"]); + }); + + test("string", () => { + expect(replaceSensitiveData("hello@example.com", replacers)).toEqual( + "[email address]" + ); + }); +}); diff --git a/js/tsconfig.json b/js/tsconfig.json index d2b424b45..92b1a3026 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -40,6 +40,7 @@ "src/schemas.ts", "src/langchain.ts", "src/wrappers/index.ts", + "src/anonymizer/index.ts", "src/wrappers/openai.ts", "src/singletons/traceable.ts" ] diff --git a/js/yarn.lock b/js/yarn.lock index 8e4cee5e8..be071906d 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -1487,6 +1487,18 @@ resolved "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz" integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== +"@types/lodash.set@^4.3.9": + version "4.3.9" + resolved "https://registry.yarnpkg.com/@types/lodash.set/-/lodash.set-4.3.9.tgz#55d95bce407b42c6655f29b2d0811fd428e698f0" + integrity sha512-KOxyNkZpbaggVmqbpr82N2tDVTx05/3/j0f50Es1prxrWB0XYf9p3QNxqcbWb7P1Q9wlvsUSlCFnwlPCIJ46PQ== + dependencies: + "@types/lodash" "*" + +"@types/lodash@*": + version "4.17.4" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.17.4.tgz#0303b64958ee070059e3a7184048a55159fe20b7" + integrity sha512-wYCP26ZLxaT3R39kiN2+HcJ4kTd3U1waI/cY7ivWYqFP6pW3ZNpvi6Wd6PHZx7T/t8z0vlkXMg3QYLa7DZ/IJQ== + "@types/node-fetch@^2.6.4": version "2.6.11" resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.11.tgz#9b39b78665dae0e82a08f02f4967d62c66f95d24" @@ -3573,6 +3585,11 @@ lodash.merge@^4.6.2: resolved "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz" integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== +lodash.set@^4.3.2: + version "4.3.2" + resolved "https://registry.yarnpkg.com/lodash.set/-/lodash.set-4.3.2.tgz#d8757b1da807dde24816b0d6a84bea1a76230b23" + integrity sha512-4hNPN5jlm/N/HLMCO43v8BXKq9Z7QdAGc/VGrRD61w8gN9g/6jF9A4L1pbUgBLCffi0w9VsXfTOij5x8iTyFvg== + lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz" From f3684fa28f2806fc9679d1741da0aaa6fde0dc2a Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 15:43:20 +0200 Subject: [PATCH 156/373] Add quick and dirty Python solution --- python/langsmith/anonymizer.py | 115 +++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 python/langsmith/anonymizer.py diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py new file mode 100644 index 000000000..07df0491a --- /dev/null +++ b/python/langsmith/anonymizer.py @@ -0,0 +1,115 @@ +import copy +import re +from abc import abstractmethod +from collections import defaultdict +from typing import Any, Callable, List, Optional, Tuple, TypedDict, Union + + +class ExtractOptions(TypedDict): + maxDepth: Optional[int] + """ + Maximum depth to traverse to to extract string nodes + """ + + +class StringNode(TypedDict): + value: str + path: tuple[str | int] + + +def _extract_string_nodes(data: Any, options: ExtractOptions) -> List[StringNode]: + parsed_options = {**options, "maxDepth": options.get("maxDepth", 10)} + + queue: List[Tuple[Any, int, tuple[str | int]]] = [(data, 0, tuple())] + result: List[StringNode] = [] + + while queue: + task = queue.pop(0) + if task is None: + continue + value, depth, path = task + + if isinstance(value, dict) or isinstance(value, defaultdict): + if depth >= parsed_options["maxDepth"]: + continue + for key, nested_value in value.items(): + queue.append((nested_value, depth + 1, path + (key,))) + elif isinstance(value, list): + if depth >= parsed_options["maxDepth"]: + continue + for i, item in enumerate(value): + queue.append((item, depth + 1, path + (i,))) + elif isinstance(value, str): + result.append(StringNode(value, path)) + + return result + + +class StringNodeProcessor: + @abstractmethod + def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: + """Mask node.""" + + +class ReplacerOptions(TypedDict): + maxDepth: Optional[int] + deepClone: Optional[bool] + + +class StringNodeRule(TypedDict): + pattern: Union[str, re.Pattern] + replace: Optional[str] = "[redacted]" + + +ReplacerType = Union[ + Callable[[str, Optional[str]], str], List[StringNodeRule], StringNodeProcessor +] + + +def replace_sensitive_data( + data: Any, replacer: ReplacerType, options: Optional[ReplacerOptions] = None +) -> Any: + nodes = _extract_string_nodes(data, options) + mutate_value = copy.deepcopy(data) if options and options["deepClone"] else data + + if isinstance(replacer, list): + + def mask_nodes(nodes: List[StringNode]) -> List[StringNode]: + result = [] + for item in nodes: + new_value = item.value + for rule in replacer: + new_value = rule["pattern"].sub(rule["replace"], new_value) + if new_value != item.value: + result.append(StringNode(new_value, item.path)) + return result + + processor = StringNodeProcessor() + processor.mask_nodes = mask_nodes + elif callable(replacer): + + def mask_nodes(nodes: List[StringNode]) -> List[StringNode]: + return [ + StringNode(replacer(node.value, node.path), node.path) + for node in nodes + if replacer(node.value, node.path) != node.value + ] + + processor = StringNodeProcessor() + processor.mask_nodes = mask_nodes + else: + processor = replacer + + to_update = processor.mask_nodes(nodes) + for node in to_update: + if not node.path: + mutate_value = node.value + else: + temp = mutate_value + for part in node.path[:-1]: + temp = temp[part] + + last_part = node.path[-1] + temp[last_part] = node.value + + return mutate_value From ba0d60223045745d6270dabaa49cd2818d240b40 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 16:54:27 +0200 Subject: [PATCH 157/373] Add tests --- python/langsmith/anonymizer.py | 75 ++++++++++++++-------- python/tests/unit_tests/test_anonymizer.py | 58 +++++++++++++++++ 2 files changed, 107 insertions(+), 26 deletions(-) create mode 100644 python/tests/unit_tests/test_anonymizer.py diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 07df0491a..88dd96e5f 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -1,11 +1,11 @@ -import copy +import copy # noqa import re from abc import abstractmethod from collections import defaultdict -from typing import Any, Callable, List, Optional, Tuple, TypedDict, Union +from typing import Any, Callable, List, Optional, Tuple, TypedDict, TypeVar, Union -class ExtractOptions(TypedDict): +class _ExtractOptions(TypedDict): maxDepth: Optional[int] """ Maximum depth to traverse to to extract string nodes @@ -13,12 +13,17 @@ class ExtractOptions(TypedDict): class StringNode(TypedDict): + """String node extracted from the data.""" + value: str + """String value.""" + path: tuple[str | int] + """Path to the string node in the data.""" -def _extract_string_nodes(data: Any, options: ExtractOptions) -> List[StringNode]: - parsed_options = {**options, "maxDepth": options.get("maxDepth", 10)} +def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNode]: + max_depth = options.get("maxDepth", 10) queue: List[Tuple[Any, int, tuple[str | int]]] = [(data, 0, tuple())] result: List[StringNode] = [] @@ -30,46 +35,63 @@ def _extract_string_nodes(data: Any, options: ExtractOptions) -> List[StringNode value, depth, path = task if isinstance(value, dict) or isinstance(value, defaultdict): - if depth >= parsed_options["maxDepth"]: + if depth >= max_depth: continue for key, nested_value in value.items(): queue.append((nested_value, depth + 1, path + (key,))) elif isinstance(value, list): - if depth >= parsed_options["maxDepth"]: + if depth >= max_depth: continue for i, item in enumerate(value): queue.append((item, depth + 1, path + (i,))) elif isinstance(value, str): - result.append(StringNode(value, path)) + result.append(StringNode(value=value, path=path)) return result class StringNodeProcessor: + """Processes a list of string nodes for masking.""" + @abstractmethod def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: - """Mask node.""" + """Accept and return a list of string nodes to be masked.""" class ReplacerOptions(TypedDict): + """Configuration options for replacing sensitive data.""" + maxDepth: Optional[int] + """Maximum depth to traverse to to extract string nodes.""" + deepClone: Optional[bool] + """Deep clone the data before replacing.""" class StringNodeRule(TypedDict): + """Declarative rule used for replacing sensitive data.""" + pattern: Union[str, re.Pattern] + """Regex pattern to match.""" + replace: Optional[str] = "[redacted]" + """Replacement value. Defaults to `[redacted]` if not specified.""" ReplacerType = Union[ - Callable[[str, Optional[str]], str], List[StringNodeRule], StringNodeProcessor + Callable[[str, tuple[str | int]], str], List[StringNodeRule], StringNodeProcessor ] +T = TypeVar("T", str, dict) + def replace_sensitive_data( - data: Any, replacer: ReplacerType, options: Optional[ReplacerOptions] = None -) -> Any: - nodes = _extract_string_nodes(data, options) + data: T, replacer: ReplacerType, options: Optional[ReplacerOptions] = None +) -> T: + """Replace sensitive data.""" + nodes = _extract_string_nodes( + data, {"maxDepth": (options or {}).get("maxDepth", 10)} + ) mutate_value = copy.deepcopy(data) if options and options["deepClone"] else data if isinstance(replacer, list): @@ -77,11 +99,11 @@ def replace_sensitive_data( def mask_nodes(nodes: List[StringNode]) -> List[StringNode]: result = [] for item in nodes: - new_value = item.value + new_value = item["value"] for rule in replacer: new_value = rule["pattern"].sub(rule["replace"], new_value) - if new_value != item.value: - result.append(StringNode(new_value, item.path)) + if new_value != item["value"]: + result.append(StringNode(value=new_value, path=item["path"])) return result processor = StringNodeProcessor() @@ -89,11 +111,12 @@ def mask_nodes(nodes: List[StringNode]) -> List[StringNode]: elif callable(replacer): def mask_nodes(nodes: List[StringNode]) -> List[StringNode]: - return [ - StringNode(replacer(node.value, node.path), node.path) - for node in nodes - if replacer(node.value, node.path) != node.value - ] + retval: list[StringNode] = [] + for node in nodes: + candidate = replacer(node["value"], node["path"]) + if candidate != node["value"]: + retval.append(StringNode(value=candidate, path=node["path"])) + return retval processor = StringNodeProcessor() processor.mask_nodes = mask_nodes @@ -102,14 +125,14 @@ def mask_nodes(nodes: List[StringNode]) -> List[StringNode]: to_update = processor.mask_nodes(nodes) for node in to_update: - if not node.path: - mutate_value = node.value + if not node["path"]: + mutate_value = node["value"] else: temp = mutate_value - for part in node.path[:-1]: + for part in node["path"][:-1]: temp = temp[part] - last_part = node.path[-1] - temp[last_part] = node.value + last_part = node["path"][-1] + temp[last_part] = node["value"] return mutate_value diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py new file mode 100644 index 000000000..7a90a8e82 --- /dev/null +++ b/python/tests/unit_tests/test_anonymizer.py @@ -0,0 +1,58 @@ +import re +from uuid import uuid4 + +from langsmith.anonymizer import StringNodeRule, replace_sensitive_data + +EMAIL_REGEX = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}") +UUID_REGEX = re.compile( + r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" +) + + +def test_replacer_function(): + def replacer(text: str, _: tuple[str | int]): + text = EMAIL_REGEX.sub("[email address]", text) + text = UUID_REGEX.sub("[uuid]", text) + return text + + assert replace_sensitive_data( + { + "message": "Hello, this is my email: hello@example.com", + "metadata": str(uuid4()), + }, + replacer, + ) == { + "message": "Hello, this is my email: [email address]", + "metadata": "[uuid]", + } + + assert replace_sensitive_data(["human", "hello@example.com"], replacer) == [ + "human", + "[email address]", + ] + assert replace_sensitive_data("hello@example.com", replacer) == "[email address]" + + +def test_replacer_declared(): + replacers = [ + StringNodeRule(pattern=EMAIL_REGEX, replace="[email address]"), + StringNodeRule(pattern=UUID_REGEX, replace="[uuid]"), + ] + + assert replace_sensitive_data( + { + "message": "Hello, this is my email: hello@example.com", + "metadata": str(uuid4()), + }, + replacers, + ) == { + "message": "Hello, this is my email: [email address]", + "metadata": "[uuid]", + } + + assert replace_sensitive_data(["human", "hello@example.com"], replacers) == [ + "human", + "[email address]", + ] + + assert replace_sensitive_data("hello@example.com", replacers) == "[email address]" From 979637e5209cbd915615b3a196090fa5bce7aac8 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 17:41:20 +0200 Subject: [PATCH 158/373] Fix lint --- python/langsmith/anonymizer.py | 109 +++++++++++++-------- python/tests/unit_tests/test_anonymizer.py | 2 +- 2 files changed, 67 insertions(+), 44 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 88dd96e5f..254e81996 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -2,7 +2,7 @@ import re from abc import abstractmethod from collections import defaultdict -from typing import Any, Callable, List, Optional, Tuple, TypedDict, TypeVar, Union +from typing import Any, Callable, List, Optional, Tuple, TypedDict, Union class _ExtractOptions(TypedDict): @@ -18,14 +18,14 @@ class StringNode(TypedDict): value: str """String value.""" - path: tuple[str | int] + path: list[str | int] """Path to the string node in the data.""" def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNode]: - max_depth = options.get("maxDepth", 10) + max_depth = options.get("maxDepth") or 10 - queue: List[Tuple[Any, int, tuple[str | int]]] = [(data, 0, tuple())] + queue: List[Tuple[Any, int, list[str | int]]] = [(data, 0, list())] result: List[StringNode] = [] while queue: @@ -38,12 +38,12 @@ def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNod if depth >= max_depth: continue for key, nested_value in value.items(): - queue.append((nested_value, depth + 1, path + (key,))) + queue.append((nested_value, depth + 1, path + [key])) elif isinstance(value, list): if depth >= max_depth: continue for i, item in enumerate(value): - queue.append((item, depth + 1, path + (i,))) + queue.append((item, depth + 1, path + [i])) elif isinstance(value, str): result.append(StringNode(value=value, path=path)) @@ -71,59 +71,82 @@ class ReplacerOptions(TypedDict): class StringNodeRule(TypedDict): """Declarative rule used for replacing sensitive data.""" - pattern: Union[str, re.Pattern] + pattern: re.Pattern """Regex pattern to match.""" - replace: Optional[str] = "[redacted]" + replace: Optional[str] """Replacement value. Defaults to `[redacted]` if not specified.""" +class RuleNodeProcessor(StringNodeProcessor): + """String node processor that uses a list of rules to replace sensitive data.""" + + rules: List[StringNodeRule] + + def __init__(self, rules: List[StringNodeRule]): + """Initialize the processor with a list of rules.""" + self.rules = rules + + def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: + """Mask nodes using the rules.""" + result = [] + for item in nodes: + new_value = item["value"] + for rule in self.rules: + new_value = rule["pattern"].sub( + rule["replace"] + if isinstance(rule["replace"], str) + else "[redacted]", + new_value, + ) + if new_value != item["value"]: + result.append(StringNode(value=new_value, path=item["path"])) + return result + + +class CallableNodeProcessor(StringNodeProcessor): + """String node processor that uses a callable function to replace sensitive data.""" + + func: Callable[[str, list[str | int]], str] + + def __init__(self, func: Callable[[str, list[str | int]], str]): + """Initialize the processor with a callable function.""" + self.func = func + + def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: + """Mask nodes using the callable function.""" + retval: list[StringNode] = [] + for node in nodes: + candidate = self.func(node["value"], node["path"]) + if candidate != node["value"]: + retval.append(StringNode(value=candidate, path=node["path"])) + return retval + + ReplacerType = Union[ - Callable[[str, tuple[str | int]], str], List[StringNodeRule], StringNodeProcessor + Callable[[str, list[str | int]], str], List[StringNodeRule], StringNodeProcessor ] -T = TypeVar("T", str, dict) + +def _get_node_processor(replacer: ReplacerType) -> StringNodeProcessor: + if isinstance(replacer, list): + return RuleNodeProcessor(rules=replacer) + elif callable(replacer): + return CallableNodeProcessor(func=replacer) + else: + return replacer def replace_sensitive_data( - data: T, replacer: ReplacerType, options: Optional[ReplacerOptions] = None -) -> T: + data: Any, replacer: ReplacerType, options: Optional[ReplacerOptions] = None +) -> Any: """Replace sensitive data.""" nodes = _extract_string_nodes( - data, {"maxDepth": (options or {}).get("maxDepth", 10)} + data, {"maxDepth": (options.get("maxDepth") if options else None) or 10} ) mutate_value = copy.deepcopy(data) if options and options["deepClone"] else data - if isinstance(replacer, list): - - def mask_nodes(nodes: List[StringNode]) -> List[StringNode]: - result = [] - for item in nodes: - new_value = item["value"] - for rule in replacer: - new_value = rule["pattern"].sub(rule["replace"], new_value) - if new_value != item["value"]: - result.append(StringNode(value=new_value, path=item["path"])) - return result - - processor = StringNodeProcessor() - processor.mask_nodes = mask_nodes - elif callable(replacer): - - def mask_nodes(nodes: List[StringNode]) -> List[StringNode]: - retval: list[StringNode] = [] - for node in nodes: - candidate = replacer(node["value"], node["path"]) - if candidate != node["value"]: - retval.append(StringNode(value=candidate, path=node["path"])) - return retval - - processor = StringNodeProcessor() - processor.mask_nodes = mask_nodes - else: - processor = replacer - - to_update = processor.mask_nodes(nodes) + to_update = _get_node_processor(replacer).mask_nodes(nodes) for node in to_update: if not node["path"]: mutate_value = node["value"] diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index 7a90a8e82..9832453c7 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -10,7 +10,7 @@ def test_replacer_function(): - def replacer(text: str, _: tuple[str | int]): + def replacer(text: str, _: list[str | int]): text = EMAIL_REGEX.sub("[email address]", text) text = UUID_REGEX.sub("[uuid]", text) return text From ee2336cda8a59cf01891c6c4d760b519390d970a Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 17:41:55 +0200 Subject: [PATCH 159/373] Fix format --- python/langsmith/anonymizer.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 254e81996..6b89a0fb3 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -94,9 +94,11 @@ def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: new_value = item["value"] for rule in self.rules: new_value = rule["pattern"].sub( - rule["replace"] - if isinstance(rule["replace"], str) - else "[redacted]", + ( + rule["replace"] + if isinstance(rule["replace"], str) + else "[redacted]" + ), new_value, ) if new_value != item["value"]: From 715eb24b553c5b77140d99a05cddd21b5a6a5177 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 17:45:03 +0200 Subject: [PATCH 160/373] Replace union --- python/langsmith/anonymizer.py | 12 +++++++----- python/tests/unit_tests/test_anonymizer.py | 3 ++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 6b89a0fb3..d28b68920 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -18,14 +18,14 @@ class StringNode(TypedDict): value: str """String value.""" - path: list[str | int] + path: list[Union[str, int]] """Path to the string node in the data.""" def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNode]: max_depth = options.get("maxDepth") or 10 - queue: List[Tuple[Any, int, list[str | int]]] = [(data, 0, list())] + queue: List[Tuple[Any, int, list[Union[str, int]]]] = [(data, 0, list())] result: List[StringNode] = [] while queue: @@ -109,9 +109,9 @@ def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: class CallableNodeProcessor(StringNodeProcessor): """String node processor that uses a callable function to replace sensitive data.""" - func: Callable[[str, list[str | int]], str] + func: Callable[[str, list[Union[str, int]]], str] - def __init__(self, func: Callable[[str, list[str | int]], str]): + def __init__(self, func: Callable[[str, list[Union[str, int]]], str]): """Initialize the processor with a callable function.""" self.func = func @@ -126,7 +126,9 @@ def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: ReplacerType = Union[ - Callable[[str, list[str | int]], str], List[StringNodeRule], StringNodeProcessor + Callable[[str, list[Union[str, int]]], str], + List[StringNodeRule], + StringNodeProcessor, ] diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index 9832453c7..5c8ed9080 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -1,4 +1,5 @@ import re +from typing import Union from uuid import uuid4 from langsmith.anonymizer import StringNodeRule, replace_sensitive_data @@ -10,7 +11,7 @@ def test_replacer_function(): - def replacer(text: str, _: list[str | int]): + def replacer(text: str, _: list[Union[str, int]]): text = EMAIL_REGEX.sub("[email address]", text) text = UUID_REGEX.sub("[uuid]", text) return text From 440a43218d6a84a7981a9f66a7b1c9107d279f9c Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 17:47:47 +0200 Subject: [PATCH 161/373] Do not use list --- python/langsmith/anonymizer.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index d28b68920..c6a471fcd 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -18,14 +18,14 @@ class StringNode(TypedDict): value: str """String value.""" - path: list[Union[str, int]] + path: List[Union[str, int]] """Path to the string node in the data.""" def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNode]: max_depth = options.get("maxDepth") or 10 - queue: List[Tuple[Any, int, list[Union[str, int]]]] = [(data, 0, list())] + queue: List[Tuple[Any, int, List[Union[str, int]]]] = [(data, 0, list())] result: List[StringNode] = [] while queue: @@ -109,15 +109,15 @@ def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: class CallableNodeProcessor(StringNodeProcessor): """String node processor that uses a callable function to replace sensitive data.""" - func: Callable[[str, list[Union[str, int]]], str] + func: Callable[[str, List[Union[str, int]]], str] - def __init__(self, func: Callable[[str, list[Union[str, int]]], str]): + def __init__(self, func: Callable[[str, List[Union[str, int]]], str]): """Initialize the processor with a callable function.""" self.func = func def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: """Mask nodes using the callable function.""" - retval: list[StringNode] = [] + retval: List[StringNode] = [] for node in nodes: candidate = self.func(node["value"], node["path"]) if candidate != node["value"]: @@ -126,7 +126,7 @@ def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: ReplacerType = Union[ - Callable[[str, list[Union[str, int]]], str], + Callable[[str, List[Union[str, int]]], str], List[StringNodeRule], StringNodeProcessor, ] From 548bbf8d0af2f75702e34c87520ed2c353fb6ddb Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 17:50:59 +0200 Subject: [PATCH 162/373] Replace list with List in test_anonymizer --- python/tests/unit_tests/test_anonymizer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index 5c8ed9080..f8df94848 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -1,5 +1,5 @@ import re -from typing import Union +from typing import List, Union from uuid import uuid4 from langsmith.anonymizer import StringNodeRule, replace_sensitive_data @@ -11,7 +11,7 @@ def test_replacer_function(): - def replacer(text: str, _: list[Union[str, int]]): + def replacer(text: str, _: List[Union[str, int]]): text = EMAIL_REGEX.sub("[email address]", text) text = UUID_REGEX.sub("[uuid]", text) return text From a45b45eb93033563f888dfca8ff7e77203e9697d Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 18:06:37 +0200 Subject: [PATCH 163/373] Add to __init__ --- python/langsmith/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/langsmith/__init__.py b/python/langsmith/__init__.py index 0e0325d13..23f8901b4 100644 --- a/python/langsmith/__init__.py +++ b/python/langsmith/__init__.py @@ -97,6 +97,7 @@ def __getattr__(name: str) -> Any: "__version__", "EvaluationResult", "RunEvaluator", + "anonymizer", "traceable", "trace", "unit", From 20c0d097273c4e614a7569cc0a9f2ab2fbc7e8cd Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 22:08:03 +0200 Subject: [PATCH 164/373] Convert to factory --- js/src/anonymizer/index.ts | 127 ++++++++++++++++---------------- js/src/tests/anonymizer.test.ts | 32 ++++---- 2 files changed, 78 insertions(+), 81 deletions(-) diff --git a/js/src/anonymizer/index.ts b/js/src/anonymizer/index.ts index 6e49e09bb..dc97267c9 100644 --- a/js/src/anonymizer/index.ts +++ b/js/src/anonymizer/index.ts @@ -58,79 +58,82 @@ export type ReplacerType = | StringNodeRule[] | StringNodeProcessor; -export function replaceSensitiveData( - data: T, +export function createAnonymizer( replacer: ReplacerType, options?: { maxDepth?: number; deepClone?: boolean; } -): T { - const nodes = extractStringNodes(data, { - maxDepth: options?.maxDepth, - }); - - // by default we opt-in to mutate the value directly - // to improve performance - let mutateValue = options?.deepClone ? deepClone(data) : data; - - const processor: StringNodeProcessor = Array.isArray(replacer) - ? (() => { - const replacers: [regex: RegExp, replace: string][] = replacer.map( - ({ pattern, type, replace }) => { - if (type != null && type !== "pattern") - throw new Error("Invalid anonymizer type"); - return [ - typeof pattern === "string" ? new RegExp(pattern, "g") : pattern, - replace ?? "[redacted]", - ]; - } - ); - - if (replacers.length === 0) throw new Error("No replacers provided"); - return { - maskNodes: (nodes: StringNode[]) => { - return nodes.reduce((memo, item) => { - const newValue = replacers.reduce((value, [regex, replace]) => { - const result = value.replace(regex, replace); - - // make sure we reset the state of regex - regex.lastIndex = 0; - - return result; - }, item.value); - +) { + return (data: T): T => { + const nodes = extractStringNodes(data, { + maxDepth: options?.maxDepth, + }); + + // by default we opt-in to mutate the value directly + // to improve performance + let mutateValue = options?.deepClone ? deepClone(data) : data; + + const processor: StringNodeProcessor = Array.isArray(replacer) + ? (() => { + const replacers: [regex: RegExp, replace: string][] = replacer.map( + ({ pattern, type, replace }) => { + if (type != null && type !== "pattern") + throw new Error("Invalid anonymizer type"); + return [ + typeof pattern === "string" + ? new RegExp(pattern, "g") + : pattern, + replace ?? "[redacted]", + ]; + } + ); + + if (replacers.length === 0) throw new Error("No replacers provided"); + return { + maskNodes: (nodes: StringNode[]) => { + return nodes.reduce((memo, item) => { + const newValue = replacers.reduce((value, [regex, replace]) => { + const result = value.replace(regex, replace); + + // make sure we reset the state of regex + regex.lastIndex = 0; + + return result; + }, item.value); + + if (newValue !== item.value) { + memo.push({ value: newValue, path: item.path }); + } + + return memo; + }, []); + }, + }; + })() + : typeof replacer === "function" + ? { + maskNodes: (nodes: StringNode[]) => + nodes.reduce((memo, item) => { + const newValue = replacer(item.value, item.path); if (newValue !== item.value) { memo.push({ value: newValue, path: item.path }); } return memo; - }, []); - }, - }; - })() - : typeof replacer === "function" - ? { - maskNodes: (nodes: StringNode[]) => - nodes.reduce((memo, item) => { - const newValue = replacer(item.value, item.path); - if (newValue !== item.value) { - memo.push({ value: newValue, path: item.path }); - } - - return memo; - }, []), + }, []), + } + : replacer; + + const toUpdate = processor.maskNodes(nodes); + for (const node of toUpdate) { + if (node.path === "") { + mutateValue = node.value as unknown as T; + } else { + set(mutateValue as unknown as object, node.path, node.value); } - : replacer; - - const toUpdate = processor.maskNodes(nodes); - for (const node of toUpdate) { - if (node.path === "") { - mutateValue = node.value as unknown as T; - } else { - set(mutateValue as unknown as object, node.path, node.value); } - } - return mutateValue; + return mutateValue; + }; } diff --git a/js/src/tests/anonymizer.test.ts b/js/src/tests/anonymizer.test.ts index 28e41ebec..c0441e275 100644 --- a/js/src/tests/anonymizer.test.ts +++ b/js/src/tests/anonymizer.test.ts @@ -1,4 +1,4 @@ -import { StringNodeRule, replaceSensitiveData } from "../anonymizer/index.js"; +import { StringNodeRule, createAnonymizer } from "../anonymizer/index.js"; import { v4 as uuid } from "uuid"; const EMAIL_REGEX = /[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+.[a-zA-Z]{2,}/g; @@ -11,13 +11,10 @@ describe("replacer", () => { test("object", () => { expect( - replaceSensitiveData( - { - message: "Hello, this is my email: hello@example.com", - metadata: uuid(), - }, - replacer - ) + createAnonymizer(replacer)({ + message: "Hello, this is my email: hello@example.com", + metadata: uuid(), + }) ).toEqual({ message: "Hello, this is my email: [email address]", metadata: "[uuid]", @@ -26,12 +23,12 @@ describe("replacer", () => { test("array", () => { expect( - replaceSensitiveData(["human", "hello@example.com"], replacer) + createAnonymizer(replacer)(["human", "hello@example.com"]) ).toEqual(["human", "[email address]"]); }); test("string", () => { - expect(replaceSensitiveData("hello@example.com", replacer)).toEqual( + expect(createAnonymizer(replacer)("hello@example.com")).toEqual( "[email address]" ); }); @@ -45,13 +42,10 @@ describe("declared", () => { test("object", () => { expect( - replaceSensitiveData( - { - message: "Hello, this is my email: hello@example.com", - metadata: uuid(), - }, - replacers - ) + createAnonymizer(replacers)({ + message: "Hello, this is my email: hello@example.com", + metadata: uuid(), + }) ).toEqual({ message: "Hello, this is my email: [email address]", metadata: "[uuid]", @@ -60,12 +54,12 @@ describe("declared", () => { test("array", () => { expect( - replaceSensitiveData(["human", "hello@example.com"], replacers) + createAnonymizer(replacers)(["human", "hello@example.com"]) ).toEqual(["human", "[email address]"]); }); test("string", () => { - expect(replaceSensitiveData("hello@example.com", replacers)).toEqual( + expect(createAnonymizer(replacers)("hello@example.com")).toEqual( "[email address]" ); }); From e030ed4eb1ea038d2c7ac4e55693bf4ecde69741 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 22:10:38 +0200 Subject: [PATCH 165/373] Convert the python API as well --- python/langsmith/anonymizer.py | 48 ++++++++++++---------- python/tests/unit_tests/test_anonymizer.py | 20 ++++----- 2 files changed, 35 insertions(+), 33 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index c6a471fcd..07b5e1d8a 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -141,25 +141,29 @@ def _get_node_processor(replacer: ReplacerType) -> StringNodeProcessor: return replacer -def replace_sensitive_data( - data: Any, replacer: ReplacerType, options: Optional[ReplacerOptions] = None -) -> Any: - """Replace sensitive data.""" - nodes = _extract_string_nodes( - data, {"maxDepth": (options.get("maxDepth") if options else None) or 10} - ) - mutate_value = copy.deepcopy(data) if options and options["deepClone"] else data - - to_update = _get_node_processor(replacer).mask_nodes(nodes) - for node in to_update: - if not node["path"]: - mutate_value = node["value"] - else: - temp = mutate_value - for part in node["path"][:-1]: - temp = temp[part] - - last_part = node["path"][-1] - temp[last_part] = node["value"] - - return mutate_value +def create_anonymizer( + replacer: ReplacerType, options: Optional[ReplacerOptions] = None +) -> Callable[[Any], Any]: + """Create an anonymizer function.""" + + def anonymizer(data: Any) -> Any: + nodes = _extract_string_nodes( + data, {"maxDepth": (options.get("maxDepth") if options else None) or 10} + ) + mutate_value = copy.deepcopy(data) if options and options["deepClone"] else data + + to_update = _get_node_processor(replacer).mask_nodes(nodes) + for node in to_update: + if not node["path"]: + mutate_value = node["value"] + else: + temp = mutate_value + for part in node["path"][:-1]: + temp = temp[part] + + last_part = node["path"][-1] + temp[last_part] = node["value"] + + return mutate_value + + return anonymizer diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index f8df94848..30679952b 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -2,7 +2,7 @@ from typing import List, Union from uuid import uuid4 -from langsmith.anonymizer import StringNodeRule, replace_sensitive_data +from langsmith.anonymizer import StringNodeRule, create_anonymizer EMAIL_REGEX = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}") UUID_REGEX = re.compile( @@ -16,22 +16,21 @@ def replacer(text: str, _: List[Union[str, int]]): text = UUID_REGEX.sub("[uuid]", text) return text - assert replace_sensitive_data( + assert create_anonymizer(replacer)( { "message": "Hello, this is my email: hello@example.com", "metadata": str(uuid4()), - }, - replacer, + } ) == { "message": "Hello, this is my email: [email address]", "metadata": "[uuid]", } - assert replace_sensitive_data(["human", "hello@example.com"], replacer) == [ + assert create_anonymizer(replacer)(["human", "hello@example.com"]) == [ "human", "[email address]", ] - assert replace_sensitive_data("hello@example.com", replacer) == "[email address]" + assert create_anonymizer(replacer)("hello@example.com") == "[email address]" def test_replacer_declared(): @@ -40,20 +39,19 @@ def test_replacer_declared(): StringNodeRule(pattern=UUID_REGEX, replace="[uuid]"), ] - assert replace_sensitive_data( + assert create_anonymizer(replacers)( { "message": "Hello, this is my email: hello@example.com", "metadata": str(uuid4()), - }, - replacers, + } ) == { "message": "Hello, this is my email: [email address]", "metadata": "[uuid]", } - assert replace_sensitive_data(["human", "hello@example.com"], replacers) == [ + assert create_anonymizer(replacers)(["human", "hello@example.com"]) == [ "human", "[email address]", ] - assert replace_sensitive_data("hello@example.com", replacers) == "[email address]" + assert create_anonymizer(replacers)("hello@example.com") == "[email address]" From 4b80b72ecd53f544f243c3bc4d11f2dfec9037e9 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 22:21:22 +0200 Subject: [PATCH 166/373] Fix types, add `anonymizer` kwarg to avoid double specifying hide_input / hide_output --- js/src/anonymizer/index.ts | 4 ++-- js/src/client.ts | 13 +++++++++---- python/langsmith/client.py | 5 +++++ 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/js/src/anonymizer/index.ts b/js/src/anonymizer/index.ts index dc97267c9..60becaf9c 100644 --- a/js/src/anonymizer/index.ts +++ b/js/src/anonymizer/index.ts @@ -58,14 +58,14 @@ export type ReplacerType = | StringNodeRule[] | StringNodeProcessor; -export function createAnonymizer( +export function createAnonymizer( replacer: ReplacerType, options?: { maxDepth?: number; deepClone?: boolean; } ) { - return (data: T): T => { + return (data: T): T => { const nodes = extractStringNodes(data, { maxDepth: options?.maxDepth, }); diff --git a/js/src/client.ts b/js/src/client.ts index 50cecdcea..15be0d209 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -49,8 +49,9 @@ interface ClientConfig { callerOptions?: AsyncCallerParams; timeout_ms?: number; webUrl?: string; - hideInputs?: boolean; - hideOutputs?: boolean; + anonymizer?: (values: KVMap) => KVMap; + hideInputs?: boolean | ((inputs: KVMap) => KVMap); + hideOutputs?: boolean | ((outputs: KVMap) => KVMap); autoBatchTracing?: boolean; pendingAutoBatchedRunLimit?: number; fetchOptions?: RequestInit; @@ -429,8 +430,12 @@ export class Client { ...(config.callerOptions ?? {}), onFailedResponseHook: handle429, }); - this.hideInputs = config.hideInputs ?? defaultConfig.hideInputs; - this.hideOutputs = config.hideOutputs ?? defaultConfig.hideOutputs; + + this.hideInputs = + config.hideInputs ?? config.anonymizer ?? defaultConfig.hideInputs; + this.hideOutputs = + config.hideOutputs ?? config.anonymizer ?? defaultConfig.hideOutputs; + this.autoBatchTracing = config.autoBatchTracing ?? this.autoBatchTracing; this.pendingAutoBatchedRunLimit = config.pendingAutoBatchedRunLimit ?? this.pendingAutoBatchedRunLimit; diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 13e3eb15b..fdb361dbe 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -461,6 +461,7 @@ def __init__( web_url: Optional[str] = None, session: Optional[requests.Session] = None, auto_batch_tracing: bool = True, + anonymizer: Optional[Callable[[dict], dict]] = None, hide_inputs: Optional[Union[Callable[[dict], dict], bool]] = None, hide_outputs: Optional[Union[Callable[[dict], dict], bool]] = None, info: Optional[Union[dict, ls_schemas.LangSmithInfo]] = None, @@ -578,11 +579,15 @@ def __init__( self._hide_inputs = ( hide_inputs if hide_inputs is not None + else anonymizer + if anonymizer is not None else ls_utils.get_env_var("HIDE_INPUTS") == "true" ) self._hide_outputs = ( hide_outputs if hide_outputs is not None + else anonymizer + if anonymizer is not None else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" ) From cba9e662f66abc0decc377eb5df3304ccbe45a06 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 22:25:18 +0200 Subject: [PATCH 167/373] Fix format and lint --- js/src/tests/anonymizer.test.ts | 13 +++++++------ python/langsmith/client.py | 16 ++++++++++------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/js/src/tests/anonymizer.test.ts b/js/src/tests/anonymizer.test.ts index c0441e275..962e3f20e 100644 --- a/js/src/tests/anonymizer.test.ts +++ b/js/src/tests/anonymizer.test.ts @@ -22,9 +22,10 @@ describe("replacer", () => { }); test("array", () => { - expect( - createAnonymizer(replacer)(["human", "hello@example.com"]) - ).toEqual(["human", "[email address]"]); + expect(createAnonymizer(replacer)(["human", "hello@example.com"])).toEqual([ + "human", + "[email address]", + ]); }); test("string", () => { @@ -53,9 +54,9 @@ describe("declared", () => { }); test("array", () => { - expect( - createAnonymizer(replacers)(["human", "hello@example.com"]) - ).toEqual(["human", "[email address]"]); + expect(createAnonymizer(replacers)(["human", "hello@example.com"])).toEqual( + ["human", "[email address]"] + ); }); test("string", () => { diff --git a/python/langsmith/client.py b/python/langsmith/client.py index fdb361dbe..2bd7f858e 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -579,16 +579,20 @@ def __init__( self._hide_inputs = ( hide_inputs if hide_inputs is not None - else anonymizer - if anonymizer is not None - else ls_utils.get_env_var("HIDE_INPUTS") == "true" + else ( + anonymizer + if anonymizer is not None + else ls_utils.get_env_var("HIDE_INPUTS") == "true" + ) ) self._hide_outputs = ( hide_outputs if hide_outputs is not None - else anonymizer - if anonymizer is not None - else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" + else ( + anonymizer + if anonymizer is not None + else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" + ) ) def _repr_html_(self) -> str: From 08977677c8f95f65bbdf5a0ad89616cd18080822 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 22:33:39 +0200 Subject: [PATCH 168/373] Accept replacer with single arg --- python/langsmith/anonymizer.py | 9 ++++++++- python/tests/unit_tests/test_anonymizer.py | 10 ++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 07b5e1d8a..8e39067c0 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -1,5 +1,6 @@ import copy # noqa import re +import inspect from abc import abstractmethod from collections import defaultdict from typing import Any, Callable, List, Optional, Tuple, TypedDict, Union @@ -110,16 +111,22 @@ class CallableNodeProcessor(StringNodeProcessor): """String node processor that uses a callable function to replace sensitive data.""" func: Callable[[str, List[Union[str, int]]], str] + accepts_path: bool def __init__(self, func: Callable[[str, List[Union[str, int]]], str]): """Initialize the processor with a callable function.""" self.func = func + self.accepts_path = len(inspect.signature(func).parameters) == 2 def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: """Mask nodes using the callable function.""" retval: List[StringNode] = [] for node in nodes: - candidate = self.func(node["value"], node["path"]) + candidate = ( + self.func(node["value"], node["path"]) + if self.accepts_path + else self.func(node["value"]) + ) if candidate != node["value"]: retval.append(StringNode(value=candidate, path=node["path"])) return retval diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index 30679952b..da8a265bd 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -33,6 +33,16 @@ def replacer(text: str, _: List[Union[str, int]]): assert create_anonymizer(replacer)("hello@example.com") == "[email address]" +def test_replacer_lambda(): + assert create_anonymizer(lambda text: EMAIL_REGEX.sub("[email address]", text))( + { + "message": "Hello, this is my email: hello@example.com", + } + ) == { + "message": "Hello, this is my email: [email address]", + } + + def test_replacer_declared(): replacers = [ StringNodeRule(pattern=EMAIL_REGEX, replace="[email address]"), From d94fcd76cb4d197bb613b110a1e111c1e20cb5bc Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 14 Jun 2024 22:35:35 +0200 Subject: [PATCH 169/373] Fix types --- python/langsmith/anonymizer.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 8e39067c0..c3bc84c68 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -110,10 +110,13 @@ def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: class CallableNodeProcessor(StringNodeProcessor): """String node processor that uses a callable function to replace sensitive data.""" - func: Callable[[str, List[Union[str, int]]], str] + func: Union[Callable[[str], str], Callable[[str, List[Union[str, int]]], str]] accepts_path: bool - def __init__(self, func: Callable[[str, List[Union[str, int]]], str]): + def __init__( + self, + func: Union[Callable[[str], str], Callable[[str, List[Union[str, int]]], str]], + ): """Initialize the processor with a callable function.""" self.func = func self.accepts_path = len(inspect.signature(func).parameters) == 2 @@ -123,9 +126,9 @@ def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: retval: List[StringNode] = [] for node in nodes: candidate = ( - self.func(node["value"], node["path"]) + self.func(node["value"], node["path"]) # type: ignore[call-arg] if self.accepts_path - else self.func(node["value"]) + else self.func(node["value"]) # type: ignore[call-arg] ) if candidate != node["value"]: retval.append(StringNode(value=candidate, path=node["path"])) From 9072afe7aeccef3393c8481c27407a40c5f13e9b Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Mon, 17 Jun 2024 11:38:31 -0700 Subject: [PATCH 170/373] Add test for objects --- python/tests/unit_tests/test_anonymizer.py | 61 ++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index da8a265bd..6feeef1d1 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -1,7 +1,13 @@ +import json import re +import uuid from typing import List, Union +from unittest.mock import MagicMock from uuid import uuid4 +from pydantic import BaseModel + +from langsmith import Client, traceable from langsmith.anonymizer import StringNodeRule, create_anonymizer EMAIL_REGEX = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}") @@ -65,3 +71,58 @@ def test_replacer_declared(): ] assert create_anonymizer(replacers)("hello@example.com") == "[email address]" + + +def test_replacer_declared_in_traceable(): + replacers = [ + StringNodeRule(pattern=EMAIL_REGEX, replace="[email address]"), + StringNodeRule(pattern=UUID_REGEX, replace="[uuid]"), + ] + anonymizer = create_anonymizer(replacers) + mock_client = Client( + session=MagicMock(), auto_batch_tracing=False, anonymizer=anonymizer + ) + + user_email = "my-test@langchain.ai" + user_id = "4ae21a90-d43b-4017-bb21-4fd9add235ff" + + class MyOutput(BaseModel): + user_email: str + user_id: uuid.UUID + body: str + + class MyInput(BaseModel): + from_email: str + + @traceable(client=mock_client) + def my_func(body: str, from_: MyInput) -> MyOutput: + return MyOutput(user_email=user_email, user_id=user_id, body=body) + + body_ = "Hello from Pluto" + res = my_func(body_, from_=MyInput(from_email="my-from-test@langchain.ai")) + expected = MyOutput(user_email=user_email, user_id=uuid.UUID(user_id), body=body_) + assert res == expected + # get posts + posts = [ + json.loads(call[2]["data"]) + for call in mock_client.session.request.mock_calls + if call.args and call.args[1].endswith("runs") + ] + patches = [ + json.loads(call[2]["data"]) + for call in mock_client.session.request.mock_calls + if call.args and call.args[1].endswith("patches") + ] + expected_inputs = {"from_": {"from_email": "[email address]"}, "body": body_} + expected_outputs = { + "user_email": "[email address]", + "user_id": "[uuid]", + "body": body_, + } + assert len(posts) == 1 + posted_data = posts[0] + assert posted_data["inputs"] == expected_inputs + assert len(patches) == 1 + patched_data = patches[0] + assert patched_data["inputs"] == expected_inputs + assert patched_data["outputs"] == expected_outputs From 4507297dd90bec4ccbd956d80e397395bb4a53b3 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 17 Jun 2024 21:38:39 +0200 Subject: [PATCH 171/373] Fix camecase, code review --- python/langsmith/anonymizer.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index c3bc84c68..c71bd02da 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -7,7 +7,7 @@ class _ExtractOptions(TypedDict): - maxDepth: Optional[int] + max_depth: Optional[int] """ Maximum depth to traverse to to extract string nodes """ @@ -26,7 +26,7 @@ class StringNode(TypedDict): def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNode]: max_depth = options.get("maxDepth") or 10 - queue: List[Tuple[Any, int, List[Union[str, int]]]] = [(data, 0, list())] + queue: List[Tuple[Any, int, List[Union[str, int]]]] = [(data, 0, [])] result: List[StringNode] = [] while queue: @@ -35,7 +35,7 @@ def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNod continue value, depth, path = task - if isinstance(value, dict) or isinstance(value, defaultdict): + if isinstance(value, (dict, defaultdict)): if depth >= max_depth: continue for key, nested_value in value.items(): @@ -62,10 +62,10 @@ def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]: class ReplacerOptions(TypedDict): """Configuration options for replacing sensitive data.""" - maxDepth: Optional[int] + max_depth: Optional[int] """Maximum depth to traverse to to extract string nodes.""" - deepClone: Optional[bool] + deep_clone: Optional[bool] """Deep clone the data before replacing.""" From bc1ea7ec3fd921bb9a36b835a151e6487ba38ae3 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 17 Jun 2024 22:24:14 +0200 Subject: [PATCH 172/373] Add docs, make sure we serialise the payload before --- python/langsmith/client.py | 31 ++++++++++++---------- python/tests/unit_tests/test_anonymizer.py | 19 ++++++++----- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 2bd7f858e..a213b82b8 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -445,6 +445,7 @@ class Client: "tracing_sample_rate", "_sampled_post_uuids", "tracing_queue", + "_anonymizer", "_hide_inputs", "_hide_outputs", "_info", @@ -487,6 +488,9 @@ def __init__( session: requests.Session or None, default=None The session to use for requests. If None, a new session will be created. + anonymizer : Optional[Callable[[dict], dict]] + A function applied for masking serialized run inputs and outputs, + before sending to the API. hide_inputs: Whether to hide run inputs when tracing with this client. If True, hides the entire inputs. If a function, applied to all run inputs when creating runs. @@ -576,23 +580,16 @@ def __init__( self._get_data_type_cached = functools.lru_cache(maxsize=10)( self._get_data_type ) + self._anonymizer = anonymizer self._hide_inputs = ( hide_inputs if hide_inputs is not None - else ( - anonymizer - if anonymizer is not None - else ls_utils.get_env_var("HIDE_INPUTS") == "true" - ) + else ls_utils.get_env_var("HIDE_INPUTS") == "true" ) self._hide_outputs = ( hide_outputs if hide_outputs is not None - else ( - anonymizer - if anonymizer is not None - else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" - ) + else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" ) def _repr_html_(self) -> str: @@ -1250,17 +1247,23 @@ def _create_run(self, run_create: dict): ) def _hide_run_inputs(self, inputs: dict): - if self._hide_inputs is False: - return inputs if self._hide_inputs is True: return {} + if self._anonymizer: + json_inputs = orjson.loads(_dumps_json(inputs)) + return self._anonymizer(json_inputs) + if self._hide_inputs is False: + return inputs return self._hide_inputs(inputs) def _hide_run_outputs(self, outputs: dict): - if self._hide_outputs is False: - return outputs if self._hide_outputs is True: return {} + if self._anonymizer: + json_outputs = orjson.loads(_dumps_json(outputs)) + return self._anonymizer(json_outputs) + if self._hide_outputs is False: + return outputs return self._hide_outputs(outputs) def batch_ingest_runs( diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index 6feeef1d1..0e37aabb4 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -1,7 +1,7 @@ import json import re import uuid -from typing import List, Union +from typing import List, Union, cast from unittest.mock import MagicMock from uuid import uuid4 @@ -108,21 +108,28 @@ def my_func(body: str, from_: MyInput) -> MyOutput: for call in mock_client.session.request.mock_calls if call.args and call.args[1].endswith("runs") ] + patches = [ json.loads(call[2]["data"]) for call in mock_client.session.request.mock_calls - if call.args and call.args[1].endswith("patches") + if call.args + and cast(str, call.args[0]).lower() == "patch" + and "/runs" in call.args[1] ] + expected_inputs = {"from_": {"from_email": "[email address]"}, "body": body_} expected_outputs = { - "user_email": "[email address]", - "user_id": "[uuid]", - "body": body_, + "output": { + "user_email": "[email address]", + "user_id": "[uuid]", + "body": body_, + } } assert len(posts) == 1 posted_data = posts[0] assert posted_data["inputs"] == expected_inputs assert len(patches) == 1 patched_data = patches[0] - assert patched_data["inputs"] == expected_inputs + if "inputs" in patched_data: + assert patched_data["inputs"] == expected_inputs assert patched_data["outputs"] == expected_outputs From 02f7cce2b692423cb87960c9e74cbb953db9b028 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 17 Jun 2024 22:27:02 +0200 Subject: [PATCH 173/373] Code review --- python/langsmith/anonymizer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index c71bd02da..c1e783fea 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -155,6 +155,7 @@ def create_anonymizer( replacer: ReplacerType, options: Optional[ReplacerOptions] = None ) -> Callable[[Any], Any]: """Create an anonymizer function.""" + processor = _get_node_processor(replacer) def anonymizer(data: Any) -> Any: nodes = _extract_string_nodes( @@ -162,7 +163,7 @@ def anonymizer(data: Any) -> Any: ) mutate_value = copy.deepcopy(data) if options and options["deepClone"] else data - to_update = _get_node_processor(replacer).mask_nodes(nodes) + to_update = processor.mask_nodes(nodes) for node in to_update: if not node["path"]: mutate_value = node["value"] From 0f8eb30a854cb6afbba7a4960717ba41c32197fd Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 17 Jun 2024 23:24:53 +0200 Subject: [PATCH 174/373] Add test for native message objects --- js/src/tests/anonymizer.test.ts | 64 +++++++++++++++++++++++++++++++ js/src/tests/utils/mock_client.ts | 5 ++- 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/js/src/tests/anonymizer.test.ts b/js/src/tests/anonymizer.test.ts index 962e3f20e..880d4d16c 100644 --- a/js/src/tests/anonymizer.test.ts +++ b/js/src/tests/anonymizer.test.ts @@ -1,5 +1,9 @@ import { StringNodeRule, createAnonymizer } from "../anonymizer/index.js"; import { v4 as uuid } from "uuid"; +import { traceable } from "../traceable.js"; +import { BaseMessage, SystemMessage } from "@langchain/core/messages"; +import { mockClient } from "./utils/mock_client.js"; +import { getAssumedTreeFromCalls } from "./utils/tree.js"; const EMAIL_REGEX = /[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+.[a-zA-Z]{2,}/g; const UUID_REGEX = @@ -65,3 +69,63 @@ describe("declared", () => { ); }); }); + +describe("client", () => { + test("messages", async () => { + const anonymizer = createAnonymizer([ + { pattern: EMAIL_REGEX, replace: "[email]" }, + { pattern: UUID_REGEX, replace: "[uuid]" }, + ]); + + const { client, callSpy } = mockClient({ anonymizer }); + + const id = uuid(); + const child = traceable( + (value: { messages: BaseMessage[]; values: Record }) => { + return [ + ...value.messages.map((message) => message.content.toString()), + ...Object.entries(value.values).map((lst) => lst.join(": ")), + ].join("\n"); + }, + { name: "child" } + ); + + const evaluate = traceable( + (values: Record) => { + const messages = [new SystemMessage(`UUID: ${id}`)]; + return child({ messages, values }); + }, + { client, name: "evaluate", tracingEnabled: true } + ); + + const result = await evaluate({ email: "hello@example.com" }); + + expect(result).toEqual( + [`UUID: ${id}`, `email: hello@example.com`].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: ["evaluate:0", "child:1"], + data: { + "evaluate:0": { + inputs: { email: "[email]" }, + outputs: { outputs: [`UUID: [uuid]`, `email: [email]`].join("\n") }, + }, + "child:1": { + inputs: { + messages: [ + { + lc: 1, + type: "constructor", + id: ["langchain_core", "messages", "SystemMessage"], + kwargs: { content: "UUID: [uuid]" }, + }, + ], + values: { email: "[email]" }, + }, + outputs: { outputs: [`UUID: [uuid]`, `email: [email]`].join("\n") }, + }, + }, + }); + }); +}); diff --git a/js/src/tests/utils/mock_client.ts b/js/src/tests/utils/mock_client.ts index b46675865..7b985dc86 100644 --- a/js/src/tests/utils/mock_client.ts +++ b/js/src/tests/utils/mock_client.ts @@ -2,8 +2,9 @@ import { jest } from "@jest/globals"; import { Client } from "../../index.js"; -export const mockClient = () => { - const client = new Client({ autoBatchTracing: false }); +type ClientParams = Exclude[0], undefined>; +export const mockClient = (config?: Omit) => { + const client = new Client({ ...config, autoBatchTracing: false }); const callSpy = jest .spyOn((client as any).caller, "call") .mockResolvedValue({ ok: true, text: () => "" }); From dfb0802e981037d5ce417b1c53946b2a90c15079 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 17 Jun 2024 23:27:50 +0200 Subject: [PATCH 175/373] Fix lint --- python/langsmith/anonymizer.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index c1e783fea..97cf2cd55 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -24,7 +24,7 @@ class StringNode(TypedDict): def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNode]: - max_depth = options.get("maxDepth") or 10 + max_depth = options.get("max_depth") or 10 queue: List[Tuple[Any, int, List[Union[str, int]]]] = [(data, 0, [])] result: List[StringNode] = [] @@ -159,9 +159,11 @@ def create_anonymizer( def anonymizer(data: Any) -> Any: nodes = _extract_string_nodes( - data, {"maxDepth": (options.get("maxDepth") if options else None) or 10} + data, {"max_depth": (options.get("max_depth") if options else None) or 10} + ) + mutate_value = ( + copy.deepcopy(data) if options and options["deep_clone"] else data ) - mutate_value = copy.deepcopy(data) if options and options["deepClone"] else data to_update = processor.mask_nodes(nodes) for node in to_update: From ca1571a749617bc06013c1d1cb850d3e426acbc4 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 18 Jun 2024 00:35:16 +0200 Subject: [PATCH 176/373] Fix not running test --- python/tests/unit_tests/test_anonymizer.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index 0e37aabb4..f209e2902 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -7,7 +7,7 @@ from pydantic import BaseModel -from langsmith import Client, traceable +from langsmith import Client, traceable, tracing_context from langsmith.anonymizer import StringNodeRule, create_anonymizer EMAIL_REGEX = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}") @@ -80,7 +80,11 @@ def test_replacer_declared_in_traceable(): ] anonymizer = create_anonymizer(replacers) mock_client = Client( - session=MagicMock(), auto_batch_tracing=False, anonymizer=anonymizer + session=MagicMock(), + auto_batch_tracing=False, + anonymizer=anonymizer, + api_url="http://localhost:1984", + api_key="123", ) user_email = "my-test@langchain.ai" @@ -99,7 +103,8 @@ def my_func(body: str, from_: MyInput) -> MyOutput: return MyOutput(user_email=user_email, user_id=user_id, body=body) body_ = "Hello from Pluto" - res = my_func(body_, from_=MyInput(from_email="my-from-test@langchain.ai")) + with tracing_context(enabled=True): + res = my_func(body_, from_=MyInput(from_email="my-from-test@langchain.ai")) expected = MyOutput(user_email=user_email, user_id=uuid.UUID(user_id), body=body_) assert res == expected # get posts From 433f4b809b0316ce3ed13183587d33dab8923931 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 18 Jun 2024 01:08:17 +0200 Subject: [PATCH 177/373] Use kwargs, remove options arg --- python/langsmith/anonymizer.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 97cf2cd55..78b9dd4aa 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -1,6 +1,5 @@ -import copy # noqa -import re import inspect +import re from abc import abstractmethod from collections import defaultdict from typing import Any, Callable, List, Optional, Tuple, TypedDict, Union @@ -152,18 +151,16 @@ def _get_node_processor(replacer: ReplacerType) -> StringNodeProcessor: def create_anonymizer( - replacer: ReplacerType, options: Optional[ReplacerOptions] = None + replacer: ReplacerType, + *, + max_depth: Optional[int] = None, ) -> Callable[[Any], Any]: """Create an anonymizer function.""" processor = _get_node_processor(replacer) def anonymizer(data: Any) -> Any: - nodes = _extract_string_nodes( - data, {"max_depth": (options.get("max_depth") if options else None) or 10} - ) - mutate_value = ( - copy.deepcopy(data) if options and options["deep_clone"] else data - ) + nodes = _extract_string_nodes(data, {"max_depth": max_depth or 10}) + mutate_value = data to_update = processor.mask_nodes(nodes) for node in to_update: From 99c4a0beb45d1e1302ee5ae27a3cbf7891ce1d22 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 18 Jun 2024 01:10:16 +0200 Subject: [PATCH 178/373] Add back noqa --- python/langsmith/anonymizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/anonymizer.py b/python/langsmith/anonymizer.py index 78b9dd4aa..77e1136f6 100644 --- a/python/langsmith/anonymizer.py +++ b/python/langsmith/anonymizer.py @@ -1,5 +1,5 @@ +import re # noqa import inspect -import re from abc import abstractmethod from collections import defaultdict from typing import Any, Callable, List, Optional, Tuple, TypedDict, Union From 7303161758d10954242a5d310e54443d7a4a3a3b Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 18 Jun 2024 01:42:32 +0200 Subject: [PATCH 179/373] feat(js): bump to 0.1.32 --- js/package.json | 4 ++-- js/src/index.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index 179febc17..80a0b9d96 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.31", + "version": "0.1.32", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -263,4 +263,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/index.ts b/js/src/index.ts index 6223c5a87..851567caf 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.31"; +export const __version__ = "0.1.32"; From 30f20053b071ef0830901e03d979d1797bbb4af3 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 19 Jun 2024 18:45:04 +0200 Subject: [PATCH 180/373] Bump JS to 0.1.33 --- js/package.json | 2 +- js/src/index.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/js/package.json b/js/package.json index 80a0b9d96..ebbba5a72 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.32", + "version": "0.1.33", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/index.ts b/js/src/index.ts index 851567caf..1ecd0341b 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.32"; +export const __version__ = "0.1.33"; From e9c321ea90a1f6b3894b5befdfd860527be04531 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 19 Jun 2024 18:45:23 +0200 Subject: [PATCH 181/373] Bump Python to 0.1.81 --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index afd1d8247..bd4e8b6c9 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.80" +version = "0.1.81" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From d910894f9e59f358a0e1048938ed0bc99e008e69 Mon Sep 17 00:00:00 2001 From: Ankush Gola <9536492+agola11@users.noreply.github.com> Date: Fri, 21 Jun 2024 01:49:25 -0700 Subject: [PATCH 182/373] [Python] fix: add missing stats to TracerSessionResults (#810) Co-authored-by: William FH <13333726+hinthornw@users.noreply.github.com> --- python/langsmith/client.py | 44 +++-- python/langsmith/evaluation/evaluator.py | 2 +- .../langsmith/evaluation/string_evaluator.py | 2 +- python/langsmith/run_helpers.py | 20 ++- python/langsmith/run_trees.py | 12 +- python/langsmith/schemas.py | 12 ++ python/poetry.lock | 164 +++++++++++++----- python/pyproject.toml | 7 +- python/tests/integration_tests/test_client.py | 42 +++-- python/tests/unit_tests/test_anonymizer.py | 1 + python/tests/unit_tests/test_run_trees.py | 2 +- 11 files changed, 221 insertions(+), 87 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index a213b82b8..5d05a2e7e 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -18,6 +18,7 @@ import sys import threading import time +import typing import uuid import warnings import weakref @@ -407,6 +408,24 @@ def _as_uuid(value: ID_TYPE, var: Optional[str] = None) -> uuid.UUID: ) from e +@typing.overload +def _ensure_uuid(value: Optional[Union[str, uuid.UUID]]) -> uuid.UUID: ... + + +@typing.overload +def _ensure_uuid( + value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = True +) -> Optional[uuid.UUID]: ... + + +def _ensure_uuid(value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = False): + if value is None: + if accept_null: + return None + return uuid.uuid4() + return _as_uuid(value) + + @functools.lru_cache(maxsize=1) def _parse_url(url): parsed_url = urllib_parse.urlparse(url) @@ -3138,12 +3157,11 @@ def create_example( if created_at: data["created_at"] = created_at.isoformat() data["id"] = example_id or str(uuid.uuid4()) - example = ls_schemas.ExampleCreate(**data) response = self.request_with_retries( "POST", "/examples", headers={**self._headers, "Content-Type": "application/json"}, - data=example.json(), + data=_dumps_json({k: v for k, v in data.items() if v is not None}), ) ls_utils.raise_for_status_with_text(response) result = response.json() @@ -3275,7 +3293,7 @@ def update_example( Dict[str, Any] The updated example. """ - example = ls_schemas.ExampleUpdate( + example = dict( inputs=inputs, outputs=outputs, dataset_id=dataset_id, @@ -3286,7 +3304,7 @@ def update_example( "PATCH", f"/examples/{_as_uuid(example_id, 'example_id')}", headers={**self._headers, "Content-Type": "application/json"}, - data=example.json(exclude_none=True), + data=_dumps_json({k: v for k, v in example.items() if v is not None}), ) ls_utils.raise_for_status_with_text(response) return response.json() @@ -3386,7 +3404,7 @@ def _select_eval_results( results_ = cast(List[ls_evaluator.EvaluationResult], results["results"]) else: results_ = [ - ls_evaluator.EvaluationResult(**{"key": fn_name, **results}) + ls_evaluator.EvaluationResult(**{"key": fn_name, **results}) # type: ignore[arg-type] ] else: raise TypeError( @@ -3631,8 +3649,8 @@ def create_feedback( ) feedback_source.metadata["__run"] = _run_meta feedback = ls_schemas.FeedbackCreate( - id=feedback_id or uuid.uuid4(), - run_id=run_id, + id=_ensure_uuid(feedback_id), + run_id=_ensure_uuid(run_id), key=key, score=score, value=value, @@ -3642,9 +3660,11 @@ def create_feedback( created_at=datetime.datetime.now(datetime.timezone.utc), modified_at=datetime.datetime.now(datetime.timezone.utc), feedback_config=feedback_config, - session_id=project_id, - comparative_experiment_id=comparative_experiment_id, - feedback_group_id=feedback_group_id, + session_id=_ensure_uuid(project_id, accept_null=True), + comparative_experiment_id=_ensure_uuid( + comparative_experiment_id, accept_null=True + ), + feedback_group_id=_ensure_uuid(feedback_group_id, accept_null=True), ) feedback_block = _dumps_json(feedback.dict(exclude_none=True)) self.request_with_retries( @@ -4038,8 +4058,6 @@ def list_annotation_queues( ): yield ls_schemas.AnnotationQueue( **queue, - _host_url=self._host_url, - _tenant_id=self._get_optional_tenant_id(), ) if limit is not None and i + 1 >= limit: break @@ -4078,8 +4096,6 @@ def create_annotation_queue( ls_utils.raise_for_status_with_text(response) return ls_schemas.AnnotationQueue( **response.json(), - _host_url=self._host_url, - _tenant_id=self._get_optional_tenant_id(), ) def read_annotation_queue(self, queue_id: ID_TYPE) -> ls_schemas.AnnotationQueue: diff --git a/python/langsmith/evaluation/evaluator.py b/python/langsmith/evaluation/evaluator.py index ee732a351..47797e646 100644 --- a/python/langsmith/evaluation/evaluator.py +++ b/python/langsmith/evaluation/evaluator.py @@ -22,7 +22,7 @@ try: from pydantic.v1 import BaseModel, Field, ValidationError # type: ignore[import] except ImportError: - from pydantic import BaseModel, Field, ValidationError + from pydantic import BaseModel, Field, ValidationError # type: ignore[assignment] from functools import wraps diff --git a/python/langsmith/evaluation/string_evaluator.py b/python/langsmith/evaluation/string_evaluator.py index 749795604..423ddedba 100644 --- a/python/langsmith/evaluation/string_evaluator.py +++ b/python/langsmith/evaluation/string_evaluator.py @@ -35,4 +35,4 @@ def evaluate_run( run_input = run.inputs[self.input_key] run_output = run.outputs[self.prediction_key] grading_results = self.grading_function(run_input, run_output, answer) - return EvaluationResult(key=self.evaluation_name, **grading_results) + return EvaluationResult(**{"key": self.evaluation_name, **grading_results}) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 531358cbc..3d4753f67 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -728,14 +728,16 @@ def trace( else: new_run = run_trees.RunTree( name=name, - id=run_id or uuid.uuid4(), - reference_example_id=reference_example_id, + id=ls_client._ensure_uuid(run_id), + reference_example_id=ls_client._ensure_uuid( + reference_example_id, accept_null=True + ), run_type=run_type, extra=extra_outer, - project_name=project_name_, + project_name=project_name_, # type: ignore[arg-type] inputs=inputs or {}, tags=tags_, - client=client, + client=client, # type: ignore[arg-type] ) new_run.post() _PARENT_RUN_TREE.set(new_run) @@ -1094,7 +1096,7 @@ def _setup_run( ) else: new_run = run_trees.RunTree( - id=id_, + id=ls_client._ensure_uuid(id_), name=name_, serialized={ "name": name, @@ -1103,11 +1105,13 @@ def _setup_run( }, inputs=inputs, run_type=run_type, - reference_example_id=reference_example_id, - project_name=selected_project, + reference_example_id=ls_client._ensure_uuid( + reference_example_id, accept_null=True + ), + project_name=selected_project, # type: ignore[arg-type] extra=extra_inner, tags=tags_, - client=client_, + client=client_, # type: ignore ) try: new_run.post() diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index 591472e04..ffe997c67 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -11,14 +11,18 @@ try: from pydantic.v1 import Field, root_validator, validator # type: ignore[import] except ImportError: - from pydantic import Field, root_validator, validator + from pydantic import ( # type: ignore[assignment, no-redef] + Field, + root_validator, + validator, + ) import threading import urllib.parse from langsmith import schemas as ls_schemas from langsmith import utils -from langsmith.client import ID_TYPE, RUN_TYPE_T, Client, _dumps_json +from langsmith.client import ID_TYPE, RUN_TYPE_T, Client, _dumps_json, _ensure_uuid logger = logging.getLogger(__name__) @@ -218,7 +222,7 @@ def create_child( serialized_ = serialized or {"name": name} run = RunTree( name=name, - id=run_id or uuid4(), + id=_ensure_uuid(run_id), serialized=serialized_, inputs=inputs or {}, outputs=outputs or {}, @@ -229,7 +233,7 @@ def create_child( end_time=end_time, extra=extra or {}, parent_run=self, - session_name=self.session_name, + project_name=self.session_name, client=self.client, tags=tags, ) diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 758530e03..453aa13de 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -573,6 +573,18 @@ class TracerSessionResult(TracerSession): """Feedback stats for the project.""" run_facets: Optional[List[Dict[str, Any]]] """Facets for the runs in the project.""" + total_cost: Optional[Decimal] + """The total estimated LLM cost associated with the completion tokens.""" + prompt_cost: Optional[Decimal] + """The estimated cost associated with the prompt (input) tokens.""" + completion_cost: Optional[Decimal] + """The estimated cost associated with the completion tokens.""" + first_token_p50: Optional[timedelta] + """The median (50th percentile) time to process the first token.""" + first_token_p99: Optional[timedelta] + """The 99th percentile time to process the first token.""" + error_rate: Optional[float] + """The error rate for the project.""" @runtime_checkable diff --git a/python/poetry.lock b/python/poetry.lock index 2c7bbe8d5..2d896248c 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1,5 +1,19 @@ # This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "anyio" version = "4.4.0" @@ -686,13 +700,13 @@ files = [ [[package]] name = "openai" -version = "1.34.0" +version = "1.35.3" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.34.0-py3-none-any.whl", hash = "sha256:018623c2f795424044675c6230fa3bfbf98d9e0aab45d8fd116f2efb2cfb6b7e"}, - {file = "openai-1.34.0.tar.gz", hash = "sha256:95c8e2da4acd6958e626186957d656597613587195abd0fb2527566a93e76770"}, + {file = "openai-1.35.3-py3-none-any.whl", hash = "sha256:7b26544cef80f125431c073ffab3811d2421fbb9e30d3bd5c2436aba00b042d5"}, + {file = "openai-1.35.3.tar.gz", hash = "sha256:d6177087f150b381d49499be782d764213fdf638d391b29ca692b84dd675a389"}, ] [package.dependencies] @@ -860,55 +874,113 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pydantic" -version = "1.10.16" -description = "Data validation and settings management using python type hints" +version = "2.7.4" +description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-1.10.16-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1a539ac40551b01a85e899829aa43ca8036707474af8d74b48be288d4d2d2846"}, - {file = "pydantic-1.10.16-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a4fcc7b0b8038dbda2dda642cff024032dfae24a7960cc58e57a39eb1949b9b"}, - {file = "pydantic-1.10.16-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4660dd697de1ae2d4305a85161312611f64d5360663a9ba026cd6ad9e3fe14c3"}, - {file = "pydantic-1.10.16-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:900a787c574f903a97d0bf52a43ff3b6cf4fa0119674bcfc0e5fd1056d388ad9"}, - {file = "pydantic-1.10.16-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d30192a63e6d3334c3f0c0506dd6ae9f1dce7b2f8845518915291393a5707a22"}, - {file = "pydantic-1.10.16-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:16cf23ed599ca5ca937e37ba50ab114e6b5c387eb43a6cc533701605ad1be611"}, - {file = "pydantic-1.10.16-cp310-cp310-win_amd64.whl", hash = "sha256:8d23111f41d1e19334edd51438fd57933f3eee7d9d2fa8cc3f5eda515a272055"}, - {file = "pydantic-1.10.16-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef287b8d7fc0e86a8bd1f902c61aff6ba9479c50563242fe88ba39692e98e1e0"}, - {file = "pydantic-1.10.16-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b9ded699bfd3b3912d796ff388b0c607e6d35d41053d37aaf8fd6082c660de9a"}, - {file = "pydantic-1.10.16-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:daeb199814333e4426c5e86d7fb610f4e230289f28cab90eb4de27330bef93cf"}, - {file = "pydantic-1.10.16-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5973843f1fa99ec6c3ac8d1a8698ac9340b35e45cca6c3e5beb5c3bd1ef15de6"}, - {file = "pydantic-1.10.16-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6b8a7788a8528a558828fe4a48783cafdcf2612d13c491594a8161dc721629c"}, - {file = "pydantic-1.10.16-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8abaecf54dacc9d991dda93c3b880d41092a8924cde94eeb811d7d9ab55df7d8"}, - {file = "pydantic-1.10.16-cp311-cp311-win_amd64.whl", hash = "sha256:ddc7b682fbd23f051edc419dc6977e11dd2dbdd0cef9d05f0e15d1387862d230"}, - {file = "pydantic-1.10.16-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:067c2b5539f7839653ad8c3d1fc2f1343338da8677b7b2172abf3cd3fdc8f719"}, - {file = "pydantic-1.10.16-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d1fc943583c046ecad0ff5d6281ee571b64e11b5503d9595febdce54f38b290"}, - {file = "pydantic-1.10.16-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18548b30ccebe71d380b0886cc44ea5d80afbcc155e3518792f13677ad06097d"}, - {file = "pydantic-1.10.16-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4e92292f9580fc5ea517618580fac24e9f6dc5657196e977c194a8e50e14f5a9"}, - {file = "pydantic-1.10.16-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5da8bc4bb4f85b8c97cc7f11141fddbbd29eb25e843672e5807e19cc3d7c1b7f"}, - {file = "pydantic-1.10.16-cp37-cp37m-win_amd64.whl", hash = "sha256:a04ee1ea34172b87707a6ecfcdb120d7656892206b7c4dbdb771a73e90179fcb"}, - {file = "pydantic-1.10.16-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4fa86469fd46e732242c7acb83282d33f83591a7e06f840481327d5bf6d96112"}, - {file = "pydantic-1.10.16-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:89c2783dc261726fe7a5ce1121bce29a2f7eb9b1e704c68df2b117604e3b346f"}, - {file = "pydantic-1.10.16-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78e59fa919fa7a192f423d190d8660c35dd444efa9216662273f36826765424b"}, - {file = "pydantic-1.10.16-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7e82a80068c77f4b074032e031e642530b6d45cb8121fc7c99faa31fb6c6b72"}, - {file = "pydantic-1.10.16-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d82d5956cee27a30e26a5b88d00a6a2a15a4855e13c9baf50175976de0dc282c"}, - {file = "pydantic-1.10.16-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b7b99424cc0970ff08deccb549b5a6ec1040c0b449eab91723e64df2bd8fdca"}, - {file = "pydantic-1.10.16-cp38-cp38-win_amd64.whl", hash = "sha256:d97a35e1ba59442775201657171f601a2879e63517a55862a51f8d67cdfc0017"}, - {file = "pydantic-1.10.16-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9d91f6866fd3e303c632207813ef6bc4d86055e21c5e5a0a311983a9ac5f0192"}, - {file = "pydantic-1.10.16-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d8d3c71d14c8bd26d2350c081908dbf59d5a6a8f9596d9ef2b09cc1e61c8662b"}, - {file = "pydantic-1.10.16-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b73e6386b439b4881d79244e9fc1e32d1e31e8d784673f5d58a000550c94a6c0"}, - {file = "pydantic-1.10.16-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5f039881fb2ef86f6de6eacce6e71701b47500355738367413ccc1550b2a69cf"}, - {file = "pydantic-1.10.16-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3895ddb26f22bdddee7e49741486aa7b389258c6f6771943e87fc00eabd79134"}, - {file = "pydantic-1.10.16-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:55b945da2756b5cef93d792521ad0d457fdf2f69fd5a2d10a27513f5281717dd"}, - {file = "pydantic-1.10.16-cp39-cp39-win_amd64.whl", hash = "sha256:22dd265c77c3976a34be78409b128cb84629284dfd1b69d2fa1507a36f84dc8b"}, - {file = "pydantic-1.10.16-py3-none-any.whl", hash = "sha256:aa2774ba5412fd1c5cb890d08e8b0a3bb5765898913ba1f61a65a4810f03cf29"}, - {file = "pydantic-1.10.16.tar.gz", hash = "sha256:8bb388f6244809af69ee384900b10b677a69f1980fdc655ea419710cffcb5610"}, + {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, + {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, ] [package.dependencies] -typing-extensions = ">=4.2.0" +annotated-types = ">=0.4.0" +pydantic-core = "2.18.4" +typing-extensions = ">=4.6.1" [package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.18.4" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"}, + {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"}, + {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"}, + {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"}, + {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"}, + {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"}, + {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"}, + {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"}, + {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"}, + {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"}, + {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"}, + {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"}, + {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"}, + {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"}, + {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"}, + {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"}, + {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"}, + {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"}, + {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"}, + {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"}, + {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"}, + {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"}, + {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"}, + {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"}, + {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"}, + {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"}, + {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"}, + {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"}, + {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"}, + {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"}, + {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"}, + {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"}, + {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pytest" @@ -1640,4 +1712,4 @@ vcr = [] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "b88660bba119846142d1a2e19b7a3c7a077eefe3e54763bdb1c14224ff098c88" +content-hash = "e062da3051244f0d59796d6659149eee4e2f46d9332714d57edd459c80b7d8cd" diff --git a/python/pyproject.toml b/python/pyproject.toml index bd4e8b6c9..64656d057 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -26,7 +26,7 @@ langsmith = "langsmith.cli.main:main" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -pydantic = ">=1,<3" +pydantic = [{version = ">=1,<3", python = "<3.12.4"}, {version = "^2.7.4", python=">=3.12.4"}] requests = "^2" orjson = "^3.9.14" @@ -35,7 +35,6 @@ pytest = "^7.3.1" black = ">=23.3,<25.0" mypy = "^1.9.0" ruff = "^0.3.4" -pydantic = ">=1,<2" types-requests = "^2.31.0.1" pandas-stubs = "^2.0.1.230501" types-pyyaml = "^6.0.12.10" @@ -93,6 +92,10 @@ docstring-code-format = true docstring-code-line-length = 80 [tool.mypy] +plugins = [ + "pydantic.v1.mypy", + "pydantic.mypy", +] ignore_missing_imports = "True" disallow_untyped_defs = "True" diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index 9107cc9f9..c4d59e8c4 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -8,7 +8,7 @@ import sys import time from datetime import timedelta -from typing import Any, Callable, Dict, cast +from typing import Any, Callable, Dict from uuid import uuid4 import pytest @@ -444,6 +444,7 @@ def test_create_chat_example( def test_batch_ingest_runs(langchain_client: Client) -> None: _session = "__test_batch_ingest_runs" trace_id = uuid4() + trace_id_2 = uuid4() run_id_2 = uuid4() current_time = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S%fZ" @@ -462,6 +463,16 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: "inputs": {"input1": 1, "input2": 2}, "outputs": {"output1": 3, "output2": 4}, }, + { + "id": str(trace_id_2), + "session_name": _session, + "name": "run 3", + "run_type": "chain", + "dotted_order": f"{current_time}{str(trace_id_2)}", + "trace_id": str(trace_id_2), + "inputs": {"input1": 1, "input2": 2}, + "error": "error", + }, { "id": str(run_id_2), "session_name": _session, @@ -481,7 +492,7 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: f"{later_time}{str(run_id_2)}", "trace_id": str(trace_id), "parent_run_id": str(trace_id), - "outputs": {"output1": 7, "output2": 8}, + "outputs": {"output1": 4, "output2": 5}, }, ] langchain_client.batch_ingest_runs(create=runs_to_create, update=runs_to_update) @@ -491,10 +502,11 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: try: runs = list( langchain_client.list_runs( - project_name=_session, run_ids=[str(trace_id), str(run_id_2)] + project_name=_session, + run_ids=[str(trace_id), str(run_id_2), str(trace_id_2)], ) ) - if len(runs) == 2: + if len(runs) == 3: break raise LangSmithError("Runs not created yet") except LangSmithError: @@ -502,20 +514,30 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: wait += 1 else: raise ValueError("Runs not created in time") - assert len(runs) == 2 + assert len(runs) == 3 # Write all the assertions here - runs = sorted(runs, key=lambda x: cast(str, x.dotted_order)) - assert len(runs) == 2 + assert len(runs) == 3 # Assert inputs and outputs of run 1 - run1 = runs[0] + run1 = next(run for run in runs if run.id == trace_id) assert run1.inputs == {"input1": 1, "input2": 2} assert run1.outputs == {"output1": 3, "output2": 4} # Assert inputs and outputs of run 2 - run2 = runs[1] + run2 = next(run for run in runs if run.id == run_id_2) assert run2.inputs == {"input1": 5, "input2": 6} - assert run2.outputs == {"output1": 7, "output2": 8} + assert run2.outputs == {"output1": 4, "output2": 5} + + # Assert inputs and outputs of run 3 + run3 = next(run for run in runs if run.id == trace_id_2) + assert run3.inputs == {"input1": 1, "input2": 2} + assert run3.error == "error" + + # read the project + result = langchain_client.read_project(project_name=_session) + assert result.error_rate > 0 + assert result.first_token_p50 is None + assert result.first_token_p99 is None langchain_client.delete_project(project_name=_session) diff --git a/python/tests/unit_tests/test_anonymizer.py b/python/tests/unit_tests/test_anonymizer.py index f209e2902..147f46d1c 100644 --- a/python/tests/unit_tests/test_anonymizer.py +++ b/python/tests/unit_tests/test_anonymizer.py @@ -1,3 +1,4 @@ +# mypy: disable-error-code="annotation-unchecked" import json import re import uuid diff --git a/python/tests/unit_tests/test_run_trees.py b/python/tests/unit_tests/test_run_trees.py index ba25860dc..77618ab5f 100644 --- a/python/tests/unit_tests/test_run_trees.py +++ b/python/tests/unit_tests/test_run_trees.py @@ -15,7 +15,7 @@ def test_run_tree_accepts_tpe() -> None: name="My Chat Bot", inputs={"text": "Summarize this morning's meetings."}, client=mock_client, - executor=ThreadPoolExecutor(), + executor=ThreadPoolExecutor(), # type: ignore ) From 864af45a96a419058b9b338656d3af9f3ff3660b Mon Sep 17 00:00:00 2001 From: TMK04 Date: Fri, 14 Jun 2024 16:59:11 +0800 Subject: [PATCH 183/373] fix(js): keep traceable wrappedFunc returnValue props --- js/src/singletons/types.ts | 8 ++++--- js/src/tests/traceable.test.ts | 38 ++++++++++++++++++++++++++++++++++ js/src/traceable.ts | 14 ++++++++++--- 3 files changed, 54 insertions(+), 6 deletions(-) diff --git a/js/src/singletons/types.ts b/js/src/singletons/types.ts index 1ebe0eb19..dd7efabf3 100644 --- a/js/src/singletons/types.ts +++ b/js/src/singletons/types.ts @@ -32,11 +32,10 @@ type UnionToIntersection = (U extends any ? (x: U) => void : never) extends ( ? I : never; // eslint-disable-next-line @typescript-eslint/no-explicit-any - export type TraceableFunction any> = // function overloads are represented as intersections rather than unions // matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448 - Func extends { + (Func extends { (...args: infer A1): infer R1; (...args: infer A2): infer R2; (...args: infer A3): infer R3; @@ -70,6 +69,9 @@ export type TraceableFunction any> = (...args: infer A1): infer R1; } ? UnionToIntersection> - : never; + : never) & { + // Other properties of Func + [K in keyof Func]: Func[K]; + }; export type RunTreeLike = RunTree; diff --git a/js/src/tests/traceable.test.ts b/js/src/tests/traceable.test.ts index 523a194b3..4c755c31e 100644 --- a/js/src/tests/traceable.test.ts +++ b/js/src/tests/traceable.test.ts @@ -513,6 +513,44 @@ describe("async generators", () => { }, }); }); + + test("iterable with props", async () => { + const { client, callSpy } = mockClient(); + + const iterableTraceable = traceable( + function iterableWithProps() { + return { + *[Symbol.asyncIterator]() { + yield 0; + }, + prop: "value", + }; + }, + { + client, + tracingEnabled: true, + } + ); + + const numbers: number[] = []; + const iterableWithProps = await iterableTraceable(); + for await (const num of iterableWithProps) { + numbers.push(num); + } + + expect(numbers).toEqual([0]); + + expect(iterableWithProps.prop).toBe("value"); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: ["iterableWithProps:0"], + edges: [], + data: { + "iterableWithProps:0": { + outputs: { outputs: [0] }, + }, + }, + }); + }); }); describe("deferred input", () => { diff --git a/js/src/traceable.ts b/js/src/traceable.ts index ee977e58f..1f009c680 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -434,14 +434,13 @@ export function traceable any>( return chunks; } - async function* wrapAsyncGeneratorForTracing( - iterable: AsyncIterable, + async function* wrapAsyncIteratorForTracing( + iterator: AsyncIterator, snapshot: ReturnType | undefined ) { let finished = false; const chunks: unknown[] = []; try { - const iterator = iterable[Symbol.asyncIterator](); while (true) { const { value, done } = await (snapshot ? snapshot(() => iterator.next()) @@ -464,6 +463,15 @@ export function traceable any>( await handleEnd(); } } + function wrapAsyncGeneratorForTracing( + iterable: AsyncIterable, + snapshot: ReturnType | undefined + ) { + const iterator = iterable[Symbol.asyncIterator](); + const wrappedIterator = wrapAsyncIteratorForTracing(iterator, snapshot); + iterable[Symbol.asyncIterator] = () => wrappedIterator; + return iterable; + } async function handleEnd() { const onEnd = config?.on_end; From 89d195687c8f9a344a38194fba973658862fe3ea Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Mon, 24 Jun 2024 14:16:02 -0700 Subject: [PATCH 184/373] fix: add LANGSMITH_RUNS_ENDPOINTS to excluded --- python/langsmith/env/_runtime_env.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/langsmith/env/_runtime_env.py b/python/langsmith/env/_runtime_env.py index 7f25b3572..354f0eca1 100644 --- a/python/langsmith/env/_runtime_env.py +++ b/python/langsmith/env/_runtime_env.py @@ -176,6 +176,7 @@ def get_langchain_env_var_metadata() -> dict: "LANGCHAIN_TRACING_V2", "LANGCHAIN_PROJECT", "LANGCHAIN_SESSION", + "LANGSMITH_RUNS_ENDPOINTS", } langchain_metadata = { k: v From 84b4f8fc7df75ed140aa826c5eab54c1ed19b7c7 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Mon, 24 Jun 2024 14:20:28 -0700 Subject: [PATCH 185/373] bump version --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index bd4e8b6c9..2bd513b83 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.81" +version = "0.1.82" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 6f4f33ac84e152e7e948595f43692e30586b2a51 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 21 Jun 2024 23:41:10 +0100 Subject: [PATCH 186/373] fix(anonymizer): make deep cloning the default --- js/src/anonymizer/index.ts | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/js/src/anonymizer/index.ts b/js/src/anonymizer/index.ts index 60becaf9c..28e3d088f 100644 --- a/js/src/anonymizer/index.ts +++ b/js/src/anonymizer/index.ts @@ -60,19 +60,14 @@ export type ReplacerType = export function createAnonymizer( replacer: ReplacerType, - options?: { - maxDepth?: number; - deepClone?: boolean; - } + options?: { maxDepth?: number } ) { return (data: T): T => { const nodes = extractStringNodes(data, { maxDepth: options?.maxDepth, }); - // by default we opt-in to mutate the value directly - // to improve performance - let mutateValue = options?.deepClone ? deepClone(data) : data; + let mutateValue = deepClone(data); const processor: StringNodeProcessor = Array.isArray(replacer) ? (() => { From f579c9fbf8195e0e5002fdc563f388215e7a3b70 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 26 Jun 2024 12:29:18 +0100 Subject: [PATCH 187/373] Fix tests, rely on JSON payload instead --- js/src/anonymizer/index.ts | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/js/src/anonymizer/index.ts b/js/src/anonymizer/index.ts index 28e3d088f..cf5996066 100644 --- a/js/src/anonymizer/index.ts +++ b/js/src/anonymizer/index.ts @@ -36,10 +36,6 @@ function extractStringNodes(data: unknown, options: { maxDepth?: number }) { } function deepClone(data: T): T { - if ("structuredClone" in globalThis) { - return globalThis.structuredClone(data); - } - return JSON.parse(JSON.stringify(data)); } @@ -63,12 +59,11 @@ export function createAnonymizer( options?: { maxDepth?: number } ) { return (data: T): T => { - const nodes = extractStringNodes(data, { + let mutateValue = deepClone(data); + const nodes = extractStringNodes(mutateValue, { maxDepth: options?.maxDepth, }); - let mutateValue = deepClone(data); - const processor: StringNodeProcessor = Array.isArray(replacer) ? (() => { const replacers: [regex: RegExp, replace: string][] = replacer.map( From 488ceb71c9e2ad72eccb90e91590eb3a47a750d8 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 26 Jun 2024 12:30:22 +0100 Subject: [PATCH 188/373] Bump to 0.1.34 --- js/package.json | 2 +- js/src/index.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/js/package.json b/js/package.json index ebbba5a72..b1f3dbb95 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.33", + "version": "0.1.34", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/index.ts b/js/src/index.ts index 1ecd0341b..2c38b2949 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.33"; +export const __version__ = "0.1.34"; From d37b3f6556378b077ee3a2e811df743a2d465125 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 26 Jun 2024 12:30:33 +0100 Subject: [PATCH 189/373] Format JS file --- js/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/package.json b/js/package.json index b1f3dbb95..ae1e45929 100644 --- a/js/package.json +++ b/js/package.json @@ -263,4 +263,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} From f6f6655c3ece1de996aabc9cc3d7c9e305066e78 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Thu, 27 Jun 2024 10:30:57 -0700 Subject: [PATCH 190/373] [Python] Add offset arg explicitly --- python/langsmith/client.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 5d05a2e7e..00dab2dd9 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3203,6 +3203,8 @@ def list_examples( as_of: Optional[Union[datetime.datetime, str]] = None, splits: Optional[Sequence[str]] = None, inline_s3_urls: bool = True, + *, + offset: int = 0, limit: Optional[int] = None, metadata: Optional[dict] = None, **kwargs: Any, @@ -3225,6 +3227,7 @@ def list_examples( Returns examples only from the specified splits. inline_s3_urls (bool, optional): Whether to inline S3 URLs. Defaults to True. + offset (int): The offset to start from. Defaults to 0. limit (int, optional): The maximum number of examples to return. Yields: @@ -3232,6 +3235,7 @@ def list_examples( """ params: Dict[str, Any] = { **kwargs, + "offset": offset, "id": example_ids, "as_of": ( as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of From 4108d8db28dede6014c762b9c6794791cfab57b6 Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Sun, 30 Jun 2024 22:31:24 -0700 Subject: [PATCH 191/373] rfc -- LLMEvaluator --- python/langsmith/evaluation/llm_evaluator.py | 216 ++++++++++++++++++ .../integration_tests/test_llm_evaluator.py | 168 ++++++++++++++ 2 files changed, 384 insertions(+) create mode 100644 python/langsmith/evaluation/llm_evaluator.py create mode 100644 python/tests/integration_tests/test_llm_evaluator.py diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py new file mode 100644 index 000000000..7c707f6a8 --- /dev/null +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -0,0 +1,216 @@ +"""Contains the LLMEvaluator class for building LLM-as-a-judge evaluators.""" + +from typing import Any, Callable, List, Optional, Tuple, Union + +from pydantic import BaseModel + +from langsmith.evaluation import EvaluationResult, EvaluationResults, RunEvaluator +from langsmith.schemas import Example, Run + + +class CategoricalScoreConfig(BaseModel): + """Configuration for a categorical score.""" + + key: str + choices: List[str] + description: str + include_explanation: bool = False + + +class ContinuousScoreConfig(BaseModel): + """Configuration for a continuous score.""" + + key: str + min: float = 0 + max: float = 1 + description: str + include_explanation: bool = False + + +def _create_score_json_schema( + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], +) -> dict: + properties: dict[str, Any] = {} + if isinstance(score_config, CategoricalScoreConfig): + properties["score"] = { + "type": "string", + "enum": score_config.choices, + "description": f"The score for the evaluation, one of " + f"{', '.join(score_config.choices)}.", + } + elif isinstance(score_config, ContinuousScoreConfig): + properties["score"] = { + "type": "number", + "minimum": score_config.min, + "maximum": score_config.max, + "description": f"The score for the evaluation, between " + f"{score_config.min} and {score_config.max}, inclusive.", + } + else: + raise ValueError("Invalid score type. Must be 'categorical' or 'continuous'") + + if score_config.include_explanation: + properties["explanation"] = { + "type": "string", + "description": "The explanation for the score.", + } + + return { + "title": score_config.key, + "description": score_config.description, + "type": "object", + "properties": properties, + "required": ( + ["score", "explanation"] if score_config.include_explanation else ["score"] + ), + } + + +class LLMEvaluator(RunEvaluator): + """A class for building LLM-as-a-judge evaluators.""" + + def __init__( + self, + *, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Example], dict]] = None, + model: Optional[str] = "gpt-3.5-turbo", + model_provider: Optional[str] = "openai", + **kwargs, + ): + """Initialize the LLMEvaluator. + + Args: + prompt_template (Union[str, List[Tuple[str, str]]): The prompt + template to use for the evaluation. If a string is provided, it is + assumed to be a system message. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The configuration for the score, either categorical or continuous. + map_variables (Optional[Callable[[Run, Example], dict]], optional): + A function that maps the run and example to the variables in the + prompt. Defaults to None. If None, it is assumed that the prompt + only requires 'input', 'output', and 'expected'. + model (Optional[str], optional): The model to use for the evaluation. + Defaults to "gpt-3.5-turbo". + model_provider (Optional[str], optional): The model provider to use + for the evaluation. Defaults to "openai". + """ + try: + from langchain_core.prompts import ChatPromptTemplate + except ImportError as e: + raise ImportError( + "LLMEvaluator requires langchain-core to be installed. " + "Please install langchain-core by running `pip install langchain-core`." + ) from e + try: + from langchain.chat_models import init_chat_model + except ImportError as e: + raise ImportError( + "LLMEvaluator requires langchain to be installed. " + "Please install langchain by running `pip install langchain`." + ) from e + if isinstance(prompt_template, str): + self.prompt = ChatPromptTemplate.from_messages( + [("system", prompt_template)] + ) + else: + self.prompt = ChatPromptTemplate.from_messages(prompt_template) + + if set(self.prompt.input_variables) - {"input", "output", "expected"}: + if not map_variables: + raise ValueError( + "map_inputs must be provided if the prompt template contains " + "variables other than 'input', 'output', and 'expected'" + ) + self.map_variables = map_variables + + self.score_config = score_config + self.score_schema = _create_score_json_schema(self.score_config) + + try: + model = init_chat_model( + model=model, model_provider=model_provider, **kwargs + ).with_structured_output(self.score_schema) + except ImportError as e: + raise ImportError( + "LLMEvaluator is missing a required langchain integration." + ) from e + except ValueError as e: + raise ValueError( + "Error loading the model. Please check the model, model_provider, " + "and that the appropriate secrets are set." + ) from e + + self.runnable = self.prompt | model + + def evaluate_run( + self, run: Run, example: Optional[Example] = None + ) -> Union[EvaluationResult, EvaluationResults]: + """Evaluate a run.""" + if self.map_variables: + variables = self.map_variables(run, example) + if set(self.prompt.input_variables) - set(variables.keys()): + raise ValueError( + "map_variables must return a dictionary with keys for all of the " + "variables in the prompt. Expected variables: " + f"{self.prompt.input_variables}. Returned variables: " + f"{variables.keys()}" + ) + output = self.runnable.invoke(variables) + else: + variables = {} + if "input" in self.prompt.input_variables: + if len(run.inputs) == 0: + raise ValueError( + "No input keys are present in run.inputs but the prompt " + "requires 'input'." + ) + if len(run.inputs) != 1: + raise ValueError( + "Multiple input keys are present in run.inputs. Please provide " + "a map_variables function." + ) + variables["input"] = list(run.inputs.values())[0] + if "output" in self.prompt.input_variables: + if len(run.outputs) == 0: + raise ValueError( + "No output keys are present in run.outputs but the prompt " + "requires 'output'." + ) + if len(run.outputs) != 1: + raise ValueError( + "Multiple output keys are present in run.outputs. Please " + "provide a map_variables function." + ) + variables["output"] = list(run.outputs.values())[0] + if "expected" in self.prompt.input_variables: + if not example: + raise ValueError( + "No example is provided but the prompt requires 'expected'." + ) + if len(example.outputs) == 0: + raise ValueError( + "No output keys are present in example.outputs but the prompt " + "requires 'expected'." + ) + if len(example.outputs) != 1: + raise ValueError( + "Multiple output keys are present in example.outputs. Please " + "provide a map_variables function." + ) + variables["expected"] = list(example.outputs.values())[0] + output = self.runnable.invoke(variables) + + if isinstance(self.score_config, CategoricalScoreConfig): + value = output["score"] + explanation = output.get("explanation", None) + return EvaluationResult( + key=self.score_config.key, value=value, comment=explanation + ) + elif isinstance(self.score_config, ContinuousScoreConfig): + score = output["score"] + explanation = output.get("explanation", None) + return EvaluationResult( + key=self.score_config.key, score=score, comment=explanation + ) diff --git a/python/tests/integration_tests/test_llm_evaluator.py b/python/tests/integration_tests/test_llm_evaluator.py new file mode 100644 index 000000000..9b0ffb8f4 --- /dev/null +++ b/python/tests/integration_tests/test_llm_evaluator.py @@ -0,0 +1,168 @@ +import pytest + +from langsmith import Client, evaluate +from langsmith.evaluation.llm_evaluator import ( + CategoricalScoreConfig, + ContinuousScoreConfig, + LLMEvaluator, +) + + +def test_llm_evaluator_init() -> None: + evaluator = LLMEvaluator( + prompt_template="Is the response vague? Y/N\n{input}", + score_config=CategoricalScoreConfig( + key="vagueness", + choices=["Y", "N"], + description="Whether the response is vague. Y for yes, N for no.", + include_explanation=True, + ), + ) + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "vagueness", + "description": "Whether the response is vague. Y for yes, N for no.", + "type": "object", + "properties": { + "score": { + "type": "string", + "enum": ["Y", "N"], + "description": "The score for the evaluation, one of Y, N.", + }, + "explanation": { + "type": "string", + "description": "The explanation for the score.", + }, + }, + "required": ["score", "explanation"], + } + + # Try a continuous score + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "rating", + "description": "The rating of the response, from 0 to 1.", + "type": "object", + "properties": { + "score": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "The score for the evaluation, " + "between 0 and 1, inclusive.", + }, + }, + "required": ["score"], + } + + # Test invalid model + with pytest.raises(ValueError): + LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + model_provider="invalid", + ) + + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {expected}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + assert evaluator is not None + assert set(evaluator.prompt.input_variables) == {"input", "output", "expected"} + + with pytest.raises(ValueError): + # Test invalid input variable without map_variables + LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {hello}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + + evaluator = LLMEvaluator( + prompt_template="Rate the response from 0 to 1.\n{input} {output} {hello}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + map_variables=lambda run, example: {"hello": "world"}, + ) + assert evaluator is not None + assert set(evaluator.prompt.input_variables) == {"input", "output", "hello"} + + +def test_evaluate() -> None: + client = Client() + client.clone_public_dataset( + "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" + ) + dataset_name = "Evaluate Examples" + + def predict(inputs: dict) -> dict: + return {"answer": "Yes"} + + reference_accuracy = LLMEvaluator( + prompt_template="Is the output accurate with respect to the expected output? " + "Y/N\nOutput: {output}\nExpected: {expected}", + score_config=CategoricalScoreConfig( + key="reference_accuracy", + choices=["Y", "N"], + description="Whether the output is accurate with respect to " + "the expected output.", + include_explanation=False, + ), + ) + + accuracy = LLMEvaluator( + prompt_template=[ + ( + "system", + "Is the output accurate with respect to the context and " + "question? Y/N", + ), + ("human", "Context: {context}\nQuestion: {question}\nOutput: {output}"), + ], + score_config=CategoricalScoreConfig( + key="accuracy", + choices=["Y", "N"], + description="Whether the output is accurate with respect to " + "the context and question.", + include_explanation=True, + ), + map_variables=lambda run, example: { + "context": example.inputs["context"], + "question": example.inputs["question"], + "output": run.outputs["answer"], + }, + model_provider="anthropic", + model="claude-3-haiku-20240307", + ) + + results = evaluate( + predict, + data=dataset_name, + evaluators=[reference_accuracy.evaluate_run, accuracy.evaluate_run], + ) + results.wait() From c287232f13822014f18ecb26fb4a12c8c9a2b505 Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Sun, 30 Jun 2024 22:50:06 -0700 Subject: [PATCH 192/373] fix mypy --- python/langsmith/evaluation/llm_evaluator.py | 25 +++++++++++-------- .../integration_tests/test_llm_evaluator.py | 8 +++--- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py index 7c707f6a8..68235a003 100644 --- a/python/langsmith/evaluation/llm_evaluator.py +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -1,6 +1,6 @@ """Contains the LLMEvaluator class for building LLM-as-a-judge evaluators.""" -from typing import Any, Callable, List, Optional, Tuple, Union +from typing import Any, Callable, List, Optional, Tuple, Union, cast from pydantic import BaseModel @@ -74,9 +74,9 @@ def __init__( *, prompt_template: Union[str, List[Tuple[str, str]]], score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], - map_variables: Optional[Callable[[Run, Example], dict]] = None, - model: Optional[str] = "gpt-3.5-turbo", - model_provider: Optional[str] = "openai", + map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None, + model: str = "gpt-3.5-turbo", + model_provider: str = "openai", **kwargs, ): """Initialize the LLMEvaluator. @@ -129,7 +129,7 @@ def __init__( self.score_schema = _create_score_json_schema(self.score_config) try: - model = init_chat_model( + chat_model = init_chat_model( model=model, model_provider=model_provider, **kwargs ).with_structured_output(self.score_schema) except ImportError as e: @@ -142,7 +142,7 @@ def __init__( "and that the appropriate secrets are set." ) from e - self.runnable = self.prompt | model + self.runnable = self.prompt | chat_model def evaluate_run( self, run: Run, example: Optional[Example] = None @@ -157,7 +157,6 @@ def evaluate_run( f"{self.prompt.input_variables}. Returned variables: " f"{variables.keys()}" ) - output = self.runnable.invoke(variables) else: variables = {} if "input" in self.prompt.input_variables: @@ -173,6 +172,11 @@ def evaluate_run( ) variables["input"] = list(run.inputs.values())[0] if "output" in self.prompt.input_variables: + if not run.outputs: + raise ValueError( + "No output keys are present in run.outputs but the prompt " + "requires 'output'." + ) if len(run.outputs) == 0: raise ValueError( "No output keys are present in run.outputs but the prompt " @@ -185,9 +189,10 @@ def evaluate_run( ) variables["output"] = list(run.outputs.values())[0] if "expected" in self.prompt.input_variables: - if not example: + if not example or not example.outputs: raise ValueError( - "No example is provided but the prompt requires 'expected'." + "No example or example outputs is provided but the prompt " + "requires 'expected'." ) if len(example.outputs) == 0: raise ValueError( @@ -200,8 +205,8 @@ def evaluate_run( "provide a map_variables function." ) variables["expected"] = list(example.outputs.values())[0] - output = self.runnable.invoke(variables) + output: dict = cast(dict, self.runnable.invoke(variables)) if isinstance(self.score_config, CategoricalScoreConfig): value = output["score"] explanation = output.get("explanation", None) diff --git a/python/tests/integration_tests/test_llm_evaluator.py b/python/tests/integration_tests/test_llm_evaluator.py index 9b0ffb8f4..2a88f78d4 100644 --- a/python/tests/integration_tests/test_llm_evaluator.py +++ b/python/tests/integration_tests/test_llm_evaluator.py @@ -152,9 +152,9 @@ def predict(inputs: dict) -> dict: include_explanation=True, ), map_variables=lambda run, example: { - "context": example.inputs["context"], - "question": example.inputs["question"], - "output": run.outputs["answer"], + "context": example.inputs.get("context", "") if example else "", + "question": example.inputs.get("question", "") if example else "", + "output": run.outputs.get("output", "") if run.outputs else "", }, model_provider="anthropic", model="claude-3-haiku-20240307", @@ -163,6 +163,6 @@ def predict(inputs: dict) -> dict: results = evaluate( predict, data=dataset_name, - evaluators=[reference_accuracy.evaluate_run, accuracy.evaluate_run], + evaluators=[reference_accuracy, accuracy], ) results.wait() From 490c5a7a45e816eb3425c5e1948ad1b9f09b351d Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Sun, 30 Jun 2024 22:59:30 -0700 Subject: [PATCH 193/373] another fix --- python/langsmith/evaluation/llm_evaluator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py index 68235a003..3051fa9aa 100644 --- a/python/langsmith/evaluation/llm_evaluator.py +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -1,6 +1,6 @@ """Contains the LLMEvaluator class for building LLM-as-a-judge evaluators.""" -from typing import Any, Callable, List, Optional, Tuple, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast from pydantic import BaseModel @@ -30,7 +30,7 @@ class ContinuousScoreConfig(BaseModel): def _create_score_json_schema( score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], ) -> dict: - properties: dict[str, Any] = {} + properties: Dict[str, Any] = {} if isinstance(score_config, CategoricalScoreConfig): properties["score"] = { "type": "string", From 1809ac44206120b64dec3f5a566d42df4dbc0c48 Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Mon, 1 Jul 2024 16:57:45 -0700 Subject: [PATCH 194/373] add other attributes to listExamples --- js/src/client.ts | 20 +++++++++ js/src/tests/client.int.test.ts | 44 +++++++++++++++++++ python/langsmith/client.py | 4 ++ python/tests/integration_tests/test_client.py | 29 ++++++++++++ 4 files changed, 97 insertions(+) diff --git a/js/src/client.ts b/js/src/client.ts index 15be0d209..98b5de483 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -571,6 +571,7 @@ export class Client { ): AsyncIterable { let offset = Number(queryParams.get("offset")) || 0; const limit = Number(queryParams.get("limit")) || 100; + const limitSet = queryParams.has("limit"); while (true) { queryParams.set("offset", String(offset)); queryParams.set("limit", String(limit)); @@ -594,6 +595,10 @@ export class Client { } yield items; + if (limitSet && items.length === limit) { + break; + } + if (items.length < limit) { break; } @@ -2183,6 +2188,9 @@ export class Client { splits, inlineS3Urls, metadata, + limit, + offset, + filter, }: { datasetId?: string; datasetName?: string; @@ -2191,6 +2199,9 @@ export class Client { splits?: string[]; inlineS3Urls?: boolean; metadata?: KVMap; + limit?: number; + offset?: number; + filter?: string; } = {}): AsyncIterable { let datasetId_; if (datasetId !== undefined && datasetName !== undefined) { @@ -2228,6 +2239,15 @@ export class Client { const serializedMetadata = JSON.stringify(metadata); params.append("metadata", serializedMetadata); } + if (limit !== undefined) { + params.append("limit", limit.toString()); + } + if (offset !== undefined) { + params.append("offset", offset.toString()); + } + if (filter !== undefined) { + params.append("filter", filter); + } for await (const examples of this._getPaginated( "/examples", params diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 0b87522e9..55d0fc898 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -510,6 +510,22 @@ test.concurrent( client.listExamples({ datasetId: dataset.id }) ); expect(examplesList.length).toEqual(4); + + const examplesListLimited = await toArray( + client.listExamples({ datasetId: dataset.id, limit: 2 }) + ); + expect(examplesListLimited.length).toEqual(2); + + const examplesListOffset = await toArray( + client.listExamples({ datasetId: dataset.id, offset: 2 }) + ); + expect(examplesListOffset.length).toEqual(2); + + const examplesListLimitedOffset = await toArray( + client.listExamples({ datasetId: dataset.id, limit: 1, offset: 2 }) + ); + expect(examplesListLimitedOffset.length).toEqual(1); + await client.deleteExample(example.id); const examplesList2 = await toArray( client.listExamples({ datasetId: dataset.id }) @@ -583,6 +599,34 @@ test.concurrent( expect(examplesList3[0].metadata?.foo).toEqual("bar"); expect(examplesList3[0].metadata?.baz).toEqual("qux"); + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'exists(metadata, "baz")', + }) + ); + expect(examplesList3.length).toEqual(1); + expect(examplesList3[0].metadata?.foo).toEqual("bar"); + expect(examplesList3[0].metadata?.baz).toEqual("qux"); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'has("metadata", \'{"foo": "bar"}\')', + }) + ); + expect(examplesList3.length).toEqual(1); + expect(examplesList3[0].metadata?.foo).toEqual("bar"); + expect(examplesList3[0].metadata?.baz).toEqual("qux"); + + examplesList3 = await toArray( + client.listExamples({ + datasetId: dataset.id, + filter: 'exists(metadata, "bazzz")', + }) + ); + expect(examplesList3.length).toEqual(0); + examplesList3 = await toArray( client.listExamples({ datasetId: dataset.id, diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 00dab2dd9..464f80117 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3207,6 +3207,7 @@ def list_examples( offset: int = 0, limit: Optional[int] = None, metadata: Optional[dict] = None, + filter: Optional[str] = None, **kwargs: Any, ) -> Iterator[ls_schemas.Example]: """Retrieve the example rows of the specified dataset. @@ -3229,6 +3230,8 @@ def list_examples( Defaults to True. offset (int): The offset to start from. Defaults to 0. limit (int, optional): The maximum number of examples to return. + filter (str, optional): A structured fileter string to apply to + the examples. Yields: Example: The examples. @@ -3243,6 +3246,7 @@ def list_examples( "splits": splits, "inline_s3_urls": inline_s3_urls, "limit": min(limit, 100) if limit is not None else 100, + "filter": filter, } if metadata is not None: params["metadata"] = _dumps_json(metadata) diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index c4d59e8c4..980d410cf 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -128,6 +128,14 @@ def test_list_examples(langchain_client: Client) -> None: example_list = list(langchain_client.list_examples(dataset_id=dataset.id)) assert len(example_list) == len(examples) + example_list = list( + langchain_client.list_examples(dataset_id=dataset.id, offset=1, limit=2) + ) + assert len(example_list) == 2 + + example_list = list(langchain_client.list_examples(dataset_id=dataset.id, offset=1)) + assert len(example_list) == len(examples) - 1 + example_list = list( langchain_client.list_examples(dataset_id=dataset.id, splits=["train"]) ) @@ -202,6 +210,27 @@ def test_list_examples(langchain_client: Client) -> None: ) assert len(example_list) == 0 + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='exists(metadata, "baz")' + ) + ) + assert len(example_list) == 1 + + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='has("metadata", \'{"foo": "bar"}\')' + ) + ) + assert len(example_list) == 1 + + example_list = list( + langchain_client.list_examples( + dataset_id=dataset.id, filter='exists(metadata, "bazzz")' + ) + ) + assert len(example_list) == 0 + langchain_client.delete_dataset(dataset_id=dataset.id) From 70ee9fef676f19c5d72af988015faa7e8b92e8a7 Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Mon, 1 Jul 2024 17:40:31 -0700 Subject: [PATCH 195/373] fix pagination logic --- js/src/client.ts | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index 98b5de483..0f44d5124 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -571,7 +571,6 @@ export class Client { ): AsyncIterable { let offset = Number(queryParams.get("offset")) || 0; const limit = Number(queryParams.get("limit")) || 100; - const limitSet = queryParams.has("limit"); while (true) { queryParams.set("offset", String(offset)); queryParams.set("limit", String(limit)); @@ -595,10 +594,6 @@ export class Client { } yield items; - if (limitSet && items.length === limit) { - break; - } - if (items.length < limit) { break; } @@ -2248,11 +2243,18 @@ export class Client { if (filter !== undefined) { params.append("filter", filter); } + let i = 0; for await (const examples of this._getPaginated( "/examples", params )) { - yield* examples; + for (const example of examples) { + yield example; + i++; + } + if (limit !== undefined && i >= limit) { + break; + } } } From fd3c61ec36d48f76701d6ca143008fe278d1f7ac Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Mon, 1 Jul 2024 18:50:57 -0700 Subject: [PATCH 196/373] chore: bump JS to 0.1.35, Py to 0.1.83 --- js/package.json | 2 +- js/src/index.ts | 2 +- python/pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index ae1e45929..dcecdeb6a 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.34", + "version": "0.1.35", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/index.ts b/js/src/index.ts index 2c38b2949..d41027b36 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.34"; +export const __version__ = "0.1.35"; diff --git a/python/pyproject.toml b/python/pyproject.toml index 904789afb..274754add 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.82" +version = "0.1.83" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From ce7ebb9ec479e28df1d1bb5370995d0076328350 Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Tue, 2 Jul 2024 00:33:17 -0700 Subject: [PATCH 197/373] add from_model --- python/langsmith/evaluation/llm_evaluator.py | 108 +++++++++++++----- .../integration_tests/test_llm_evaluator.py | 33 +++++- 2 files changed, 109 insertions(+), 32 deletions(-) diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py index 3051fa9aa..e3a212415 100644 --- a/python/langsmith/evaluation/llm_evaluator.py +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -75,7 +75,7 @@ def __init__( prompt_template: Union[str, List[Tuple[str, str]]], score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None, - model: str = "gpt-3.5-turbo", + model_name: str = "gpt-4o", model_provider: str = "openai", **kwargs, ): @@ -89,27 +89,92 @@ def __init__( The configuration for the score, either categorical or continuous. map_variables (Optional[Callable[[Run, Example], dict]], optional): A function that maps the run and example to the variables in the - prompt. Defaults to None. If None, it is assumed that the prompt + prompt. Defaults to None. If None, it is assumed that the prompt only requires 'input', 'output', and 'expected'. - model (Optional[str], optional): The model to use for the evaluation. - Defaults to "gpt-3.5-turbo". + model_name (Optional[str], optional): The model to use for the evaluation. + Defaults to "gpt-4o". model_provider (Optional[str], optional): The model provider to use for the evaluation. Defaults to "openai". """ try: - from langchain_core.prompts import ChatPromptTemplate + from langchain.chat_models import init_chat_model except ImportError as e: raise ImportError( - "LLMEvaluator requires langchain-core to be installed. " - "Please install langchain-core by running `pip install langchain-core`." + "LLMEvaluator requires langchain to be installed. " + "Please install langchain by running `pip install langchain`." ) from e + + chat_model = init_chat_model( + model=model_name, model_provider=model_provider, **kwargs + ) + + self._initialize(prompt_template, score_config, map_variables, chat_model) + + @classmethod + def from_model( + cls, + model: Any, + *, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None, + ): + """Create an LLMEvaluator instance from a BaseChatModel instance. + + Args: + model (BaseChatModel): The chat model instance to use for the evaluation. + prompt_template (Union[str, List[Tuple[str, str]]): The prompt + template to use for the evaluation. If a string is provided, it is + assumed to be a system message. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The configuration for the score, either categorical or continuous. + map_variables (Optional[Callable[[Run, Example]], dict]], optional): + A function that maps the run and example to the variables in the + prompt. Defaults to None. If None, it is assumed that the prompt + only requires 'input', 'output', and 'expected'. + + Returns: + LLMEvaluator: An instance of LLMEvaluator. + """ + instance = cls.__new__(cls) + instance._initialize(prompt_template, score_config, map_variables, model) + return instance + + def _initialize( + self, + prompt_template: Union[str, List[Tuple[str, str]]], + score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig], + map_variables: Optional[Callable[[Run, Optional[Example]], dict]], + chat_model: Any, + ): + """Shared initialization code for __init__ and from_model. + + Args: + prompt_template (Union[str, List[Tuple[str, str]]): The prompt template. + score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): + The score configuration. + map_variables (Optional[Callable[[Run, Example]], dict]]): + Function to map variables. + chat_model (BaseChatModel): The chat model instance. + """ try: - from langchain.chat_models import init_chat_model + from langchain_core.language_models.chat_models import BaseChatModel + from langchain_core.prompts import ChatPromptTemplate except ImportError as e: raise ImportError( - "LLMEvaluator requires langchain to be installed. " - "Please install langchain by running `pip install langchain`." + "LLMEvaluator requires langchain-core to be installed. " + "Please install langchain-core by running `pip install langchain-core`." ) from e + + if not ( + isinstance(chat_model, BaseChatModel) + and hasattr(chat_model, "with_structured_output") + ): + raise ValueError( + "chat_model must be an instance of " + "BaseLanguageModel and support structured output." + ) + if isinstance(prompt_template, str): self.prompt = ChatPromptTemplate.from_messages( [("system", prompt_template)] @@ -128,20 +193,7 @@ def __init__( self.score_config = score_config self.score_schema = _create_score_json_schema(self.score_config) - try: - chat_model = init_chat_model( - model=model, model_provider=model_provider, **kwargs - ).with_structured_output(self.score_schema) - except ImportError as e: - raise ImportError( - "LLMEvaluator is missing a required langchain integration." - ) from e - except ValueError as e: - raise ValueError( - "Error loading the model. Please check the model, model_provider, " - "and that the appropriate secrets are set." - ) from e - + chat_model = chat_model.with_structured_output(self.score_schema) self.runnable = self.prompt | chat_model def evaluate_run( @@ -149,14 +201,8 @@ def evaluate_run( ) -> Union[EvaluationResult, EvaluationResults]: """Evaluate a run.""" if self.map_variables: + # These will be validated when we invoke the model variables = self.map_variables(run, example) - if set(self.prompt.input_variables) - set(variables.keys()): - raise ValueError( - "map_variables must return a dictionary with keys for all of the " - "variables in the prompt. Expected variables: " - f"{self.prompt.input_variables}. Returned variables: " - f"{variables.keys()}" - ) else: variables = {} if "input" in self.prompt.input_variables: diff --git a/python/tests/integration_tests/test_llm_evaluator.py b/python/tests/integration_tests/test_llm_evaluator.py index 2a88f78d4..8fed56e1d 100644 --- a/python/tests/integration_tests/test_llm_evaluator.py +++ b/python/tests/integration_tests/test_llm_evaluator.py @@ -113,6 +113,37 @@ def test_llm_evaluator_init() -> None: assert set(evaluator.prompt.input_variables) == {"input", "output", "hello"} +def test_from_model() -> None: + from langchain_openai import ChatOpenAI + + evaluator = LLMEvaluator.from_model( + ChatOpenAI(), + prompt_template="Rate the response from 0 to 1.\n{input}", + score_config=ContinuousScoreConfig( + key="rating", + description="The rating of the response, from 0 to 1.", + include_explanation=False, + ), + ) + assert evaluator is not None + assert evaluator.prompt.input_variables == ["input"] + assert evaluator.score_schema == { + "title": "rating", + "description": "The rating of the response, from 0 to 1.", + "type": "object", + "properties": { + "score": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "The score for the evaluation, " + "between 0 and 1, inclusive.", + }, + }, + "required": ["score"], + } + + def test_evaluate() -> None: client = Client() client.clone_public_dataset( @@ -157,7 +188,7 @@ def predict(inputs: dict) -> dict: "output": run.outputs.get("output", "") if run.outputs else "", }, model_provider="anthropic", - model="claude-3-haiku-20240307", + model_name="claude-3-haiku-20240307", ) results = evaluate( From b1ec89573b7a99c191391249ac48b0c80bb3a58e Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Tue, 2 Jul 2024 00:51:45 -0700 Subject: [PATCH 198/373] update based on comments --- python/langsmith/evaluation/llm_evaluator.py | 120 ++++++++++-------- .../integration_tests/test_llm_evaluator.py | 13 +- 2 files changed, 80 insertions(+), 53 deletions(-) diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py index e3a212415..b6fcecabb 100644 --- a/python/langsmith/evaluation/llm_evaluator.py +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -200,59 +200,77 @@ def evaluate_run( self, run: Run, example: Optional[Example] = None ) -> Union[EvaluationResult, EvaluationResults]: """Evaluate a run.""" + variables = self._prepare_variables(run, example) + output: dict = cast(dict, self.runnable.invoke(variables)) + return self._parse_output(output) + + async def aevaluate_run( + self, run: Run, example: Optional[Example] = None + ) -> Union[EvaluationResult, EvaluationResults]: + """Asynchronously evaluate a run.""" + variables = self._prepare_variables(run, example) + output: dict = cast(dict, await self.runnable.ainvoke(variables)) + return self._parse_output(output) + + def _prepare_variables(self, run: Run, example: Optional[Example]) -> dict: + """Prepare variables for model invocation.""" if self.map_variables: - # These will be validated when we invoke the model - variables = self.map_variables(run, example) - else: - variables = {} - if "input" in self.prompt.input_variables: - if len(run.inputs) == 0: - raise ValueError( - "No input keys are present in run.inputs but the prompt " - "requires 'input'." - ) - if len(run.inputs) != 1: - raise ValueError( - "Multiple input keys are present in run.inputs. Please provide " - "a map_variables function." - ) - variables["input"] = list(run.inputs.values())[0] - if "output" in self.prompt.input_variables: - if not run.outputs: - raise ValueError( - "No output keys are present in run.outputs but the prompt " - "requires 'output'." - ) - if len(run.outputs) == 0: - raise ValueError( - "No output keys are present in run.outputs but the prompt " - "requires 'output'." - ) - if len(run.outputs) != 1: - raise ValueError( - "Multiple output keys are present in run.outputs. Please " - "provide a map_variables function." - ) - variables["output"] = list(run.outputs.values())[0] - if "expected" in self.prompt.input_variables: - if not example or not example.outputs: - raise ValueError( - "No example or example outputs is provided but the prompt " - "requires 'expected'." - ) - if len(example.outputs) == 0: - raise ValueError( - "No output keys are present in example.outputs but the prompt " - "requires 'expected'." - ) - if len(example.outputs) != 1: - raise ValueError( - "Multiple output keys are present in example.outputs. Please " - "provide a map_variables function." - ) - variables["expected"] = list(example.outputs.values())[0] + return self.map_variables(run, example) - output: dict = cast(dict, self.runnable.invoke(variables)) + variables = {} + if "input" in self.prompt.input_variables: + if len(run.inputs) == 0: + raise ValueError( + "No input keys are present in run.inputs but the prompt " + "requires 'input'." + ) + if len(run.inputs) != 1: + raise ValueError( + "Multiple input keys are present in run.inputs. Please provide " + "a map_variables function." + ) + variables["input"] = list(run.inputs.values())[0] + + if "output" in self.prompt.input_variables: + if not run.outputs: + raise ValueError( + "No output keys are present in run.outputs but the prompt " + "requires 'output'." + ) + if len(run.outputs) == 0: + raise ValueError( + "No output keys are present in run.outputs but the prompt " + "requires 'output'." + ) + if len(run.outputs) != 1: + raise ValueError( + "Multiple output keys are present in run.outputs. Please " + "provide a map_variables function." + ) + variables["output"] = list(run.outputs.values())[0] + + if "expected" in self.prompt.input_variables: + if not example or not example.outputs: + raise ValueError( + "No example or example outputs is provided but the prompt " + "requires 'expected'." + ) + if len(example.outputs) == 0: + raise ValueError( + "No output keys are present in example.outputs but the prompt " + "requires 'expected'." + ) + if len(example.outputs) != 1: + raise ValueError( + "Multiple output keys are present in example.outputs. Please " + "provide a map_variables function." + ) + variables["expected"] = list(example.outputs.values())[0] + + return variables + + def _parse_output(self, output: dict) -> Union[EvaluationResult, EvaluationResults]: + """Parse the model output into an evaluation result.""" if isinstance(self.score_config, CategoricalScoreConfig): value = output["score"] explanation = output.get("explanation", None) diff --git a/python/tests/integration_tests/test_llm_evaluator.py b/python/tests/integration_tests/test_llm_evaluator.py index 8fed56e1d..cedb74024 100644 --- a/python/tests/integration_tests/test_llm_evaluator.py +++ b/python/tests/integration_tests/test_llm_evaluator.py @@ -1,6 +1,6 @@ import pytest -from langsmith import Client, evaluate +from langsmith import Client, aevaluate, evaluate from langsmith.evaluation.llm_evaluator import ( CategoricalScoreConfig, ContinuousScoreConfig, @@ -144,7 +144,7 @@ def test_from_model() -> None: } -def test_evaluate() -> None: +async def test_evaluate() -> None: client = Client() client.clone_public_dataset( "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d" @@ -154,6 +154,9 @@ def test_evaluate() -> None: def predict(inputs: dict) -> dict: return {"answer": "Yes"} + async def apredict(inputs: dict) -> dict: + return {"answer": "Yes"} + reference_accuracy = LLMEvaluator( prompt_template="Is the output accurate with respect to the expected output? " "Y/N\nOutput: {output}\nExpected: {expected}", @@ -197,3 +200,9 @@ def predict(inputs: dict) -> dict: evaluators=[reference_accuracy, accuracy], ) results.wait() + + await aevaluate( + apredict, + data=dataset_name, + evaluators=[reference_accuracy, accuracy], + ) From 7f741d861f1951aba397c91f73681ad687ed2f40 Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Tue, 2 Jul 2024 14:55:56 -0700 Subject: [PATCH 199/373] fix workflow test --- .github/workflows/python_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index 5a45962ae..f351b9308 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -44,7 +44,7 @@ jobs: - name: Install dependencies run: | poetry install --with dev,lint - poetry run pip install -U langchain langchain-core + poetry run pip install -U langchain langchain-core langchain-openai - name: Build ${{ matrix.python-version }} run: poetry build - name: Lint ${{ matrix.python-version }} From a872906b8f0cf38d699093f74313a3d482cbed01 Mon Sep 17 00:00:00 2001 From: jacoblee93 Date: Wed, 3 Jul 2024 10:57:44 -0700 Subject: [PATCH 200/373] Inline lodash.set functionality to patch vulnerability --- js/.eslintrc.cjs | 1 + js/package.json | 2 - js/src/anonymizer/index.ts | 2 +- js/src/utils/lodash/LICENSE | 49 ++++++++++++++++++ js/src/utils/lodash/assignValue.ts | 27 ++++++++++ js/src/utils/lodash/baseAssignValue.ts | 23 +++++++++ js/src/utils/lodash/baseSet.ts | 52 +++++++++++++++++++ js/src/utils/lodash/castPath.ts | 19 +++++++ js/src/utils/lodash/eq.ts | 35 +++++++++++++ js/src/utils/lodash/getTag.ts | 19 +++++++ js/src/utils/lodash/isIndex.ts | 30 +++++++++++ js/src/utils/lodash/isKey.ts | 36 ++++++++++++++ js/src/utils/lodash/isObject.ts | 31 ++++++++++++ js/src/utils/lodash/isSymbol.ts | 28 +++++++++++ js/src/utils/lodash/memoizeCapped.ts | 69 ++++++++++++++++++++++++++ js/src/utils/lodash/set.ts | 39 +++++++++++++++ js/src/utils/lodash/stringToPath.ts | 49 ++++++++++++++++++ js/src/utils/lodash/toKey.ts | 23 +++++++++ js/yarn.lock | 17 ------- 19 files changed, 531 insertions(+), 20 deletions(-) create mode 100644 js/src/utils/lodash/LICENSE create mode 100644 js/src/utils/lodash/assignValue.ts create mode 100644 js/src/utils/lodash/baseAssignValue.ts create mode 100644 js/src/utils/lodash/baseSet.ts create mode 100644 js/src/utils/lodash/castPath.ts create mode 100644 js/src/utils/lodash/eq.ts create mode 100644 js/src/utils/lodash/getTag.ts create mode 100644 js/src/utils/lodash/isIndex.ts create mode 100644 js/src/utils/lodash/isKey.ts create mode 100644 js/src/utils/lodash/isObject.ts create mode 100644 js/src/utils/lodash/isSymbol.ts create mode 100644 js/src/utils/lodash/memoizeCapped.ts create mode 100644 js/src/utils/lodash/set.ts create mode 100644 js/src/utils/lodash/stringToPath.ts create mode 100644 js/src/utils/lodash/toKey.ts diff --git a/js/.eslintrc.cjs b/js/.eslintrc.cjs index a870c9f5a..da4c3ecb4 100644 --- a/js/.eslintrc.cjs +++ b/js/.eslintrc.cjs @@ -14,6 +14,7 @@ module.exports = { ignorePatterns: [ ".eslintrc.cjs", "scripts", + "src/utils/lodash/*", "node_modules", "dist", "dist-cjs", diff --git a/js/package.json b/js/package.json index dcecdeb6a..978c3a78d 100644 --- a/js/package.json +++ b/js/package.json @@ -95,7 +95,6 @@ "dependencies": { "@types/uuid": "^9.0.1", "commander": "^10.0.1", - "lodash.set": "^4.3.2", "p-queue": "^6.6.2", "p-retry": "4", "uuid": "^9.0.0" @@ -109,7 +108,6 @@ "@langchain/langgraph": "^0.0.19", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", - "@types/lodash.set": "^4.3.9", "@typescript-eslint/eslint-plugin": "^5.59.8", "@typescript-eslint/parser": "^5.59.8", "babel-jest": "^29.5.0", diff --git a/js/src/anonymizer/index.ts b/js/src/anonymizer/index.ts index cf5996066..dc360a3c4 100644 --- a/js/src/anonymizer/index.ts +++ b/js/src/anonymizer/index.ts @@ -1,4 +1,4 @@ -import set from "lodash.set"; +import set from "../utils/lodash/set.js"; export interface StringNode { value: string; diff --git a/js/src/utils/lodash/LICENSE b/js/src/utils/lodash/LICENSE new file mode 100644 index 000000000..5b807415b --- /dev/null +++ b/js/src/utils/lodash/LICENSE @@ -0,0 +1,49 @@ +The MIT License + +Copyright JS Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. \ No newline at end of file diff --git a/js/src/utils/lodash/assignValue.ts b/js/src/utils/lodash/assignValue.ts new file mode 100644 index 000000000..f02ed4991 --- /dev/null +++ b/js/src/utils/lodash/assignValue.ts @@ -0,0 +1,27 @@ +import baseAssignValue from "./baseAssignValue.js"; +import eq from "./eq.js"; + +/** Used to check objects for own properties. */ +const hasOwnProperty = Object.prototype.hasOwnProperty; + +/** + * Assigns `value` to `key` of `object` if the existing value is not equivalent. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ +function assignValue(object: Record, key: string, value: any) { + const objValue = object[key]; + + if (!(hasOwnProperty.call(object, key) && eq(objValue, value))) { + if (value !== 0 || 1 / value === 1 / objValue) { + baseAssignValue(object, key, value); + } + } else if (value === undefined && !(key in object)) { + baseAssignValue(object, key, value); + } +} + +export default assignValue; diff --git a/js/src/utils/lodash/baseAssignValue.ts b/js/src/utils/lodash/baseAssignValue.ts new file mode 100644 index 000000000..5d1d70d16 --- /dev/null +++ b/js/src/utils/lodash/baseAssignValue.ts @@ -0,0 +1,23 @@ +/** + * The base implementation of `assignValue` and `assignMergeValue` without + * value checks. + * + * @private + * @param {Object} object The object to modify. + * @param {string} key The key of the property to assign. + * @param {*} value The value to assign. + */ +function baseAssignValue(object: Record, key: string, value: any) { + if (key === "__proto__") { + Object.defineProperty(object, key, { + configurable: true, + enumerable: true, + value: value, + writable: true, + }); + } else { + object[key] = value; + } +} + +export default baseAssignValue; diff --git a/js/src/utils/lodash/baseSet.ts b/js/src/utils/lodash/baseSet.ts new file mode 100644 index 000000000..5db4ddf76 --- /dev/null +++ b/js/src/utils/lodash/baseSet.ts @@ -0,0 +1,52 @@ +// @ts-nocheck + +import assignValue from "./assignValue.js"; +import castPath from "./castPath.js"; +import isIndex from "./isIndex.js"; +import isObject from "./isObject.js"; +import toKey from "./toKey.js"; + +/** + * The base implementation of `set`. + * + * @private + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @param {Function} [customizer] The function to customize path creation. + * @returns {Object} Returns `object`. + */ +function baseSet(object, path, value, customizer) { + if (!isObject(object)) { + return object; + } + path = castPath(path, object); + + const length = path.length; + const lastIndex = length - 1; + + let index = -1; + let nested = object; + + while (nested != null && ++index < length) { + const key = toKey(path[index]); + let newValue = value; + + if (index !== lastIndex) { + const objValue = nested[key]; + newValue = customizer ? customizer(objValue, key, nested) : undefined; + if (newValue === undefined) { + newValue = isObject(objValue) + ? objValue + : isIndex(path[index + 1]) + ? [] + : {}; + } + } + assignValue(nested, key, newValue); + nested = nested[key]; + } + return object; +} + +export default baseSet; diff --git a/js/src/utils/lodash/castPath.ts b/js/src/utils/lodash/castPath.ts new file mode 100644 index 000000000..4ae161c6f --- /dev/null +++ b/js/src/utils/lodash/castPath.ts @@ -0,0 +1,19 @@ +import isKey from "./isKey.js"; +import stringToPath from "./stringToPath.js"; + +/** + * Casts `value` to a path array if it's not one. + * + * @private + * @param {*} value The value to inspect. + * @param {Object} [object] The object to query keys on. + * @returns {Array} Returns the cast property path array. + */ +function castPath(value: any, object: Record) { + if (Array.isArray(value)) { + return value; + } + return isKey(value, object) ? [value] : stringToPath(value); +} + +export default castPath; diff --git a/js/src/utils/lodash/eq.ts b/js/src/utils/lodash/eq.ts new file mode 100644 index 000000000..11ece1229 --- /dev/null +++ b/js/src/utils/lodash/eq.ts @@ -0,0 +1,35 @@ +/** + * Performs a + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * comparison between two values to determine if they are equivalent. + * + * @since 4.0.0 + * @category Lang + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @returns {boolean} Returns `true` if the values are equivalent, else `false`. + * @example + * + * const object = { 'a': 1 } + * const other = { 'a': 1 } + * + * eq(object, object) + * // => true + * + * eq(object, other) + * // => false + * + * eq('a', 'a') + * // => true + * + * eq('a', Object('a')) + * // => false + * + * eq(NaN, NaN) + * // => true + */ +function eq(value: any, other: any) { + return value === other || (value !== value && other !== other); +} + +export default eq; diff --git a/js/src/utils/lodash/getTag.ts b/js/src/utils/lodash/getTag.ts new file mode 100644 index 000000000..c616a26e0 --- /dev/null +++ b/js/src/utils/lodash/getTag.ts @@ -0,0 +1,19 @@ +// @ts-nocheck + +const toString = Object.prototype.toString; + +/** + * Gets the `toStringTag` of `value`. + * + * @private + * @param {*} value The value to query. + * @returns {string} Returns the `toStringTag`. + */ +function getTag(value) { + if (value == null) { + return value === undefined ? "[object Undefined]" : "[object Null]"; + } + return toString.call(value); +} + +export default getTag; diff --git a/js/src/utils/lodash/isIndex.ts b/js/src/utils/lodash/isIndex.ts new file mode 100644 index 000000000..eb956ca70 --- /dev/null +++ b/js/src/utils/lodash/isIndex.ts @@ -0,0 +1,30 @@ +// @ts-nocheck + +/** Used as references for various `Number` constants. */ +const MAX_SAFE_INTEGER = 9007199254740991; + +/** Used to detect unsigned integer values. */ +const reIsUint = /^(?:0|[1-9]\d*)$/; + +/** + * Checks if `value` is a valid array-like index. + * + * @private + * @param {*} value The value to check. + * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index. + * @returns {boolean} Returns `true` if `value` is a valid index, else `false`. + */ +function isIndex(value, length) { + const type = typeof value; + length = length == null ? MAX_SAFE_INTEGER : length; + + return ( + !!length && + (type === "number" || (type !== "symbol" && reIsUint.test(value))) && + value > -1 && + value % 1 === 0 && + value < length + ); +} + +export default isIndex; diff --git a/js/src/utils/lodash/isKey.ts b/js/src/utils/lodash/isKey.ts new file mode 100644 index 000000000..5c46772b9 --- /dev/null +++ b/js/src/utils/lodash/isKey.ts @@ -0,0 +1,36 @@ +// @ts-nocheck +import isSymbol from "./isSymbol.js"; + +/** Used to match property names within property paths. */ +const reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/; +const reIsPlainProp = /^\w*$/; + +/** + * Checks if `value` is a property name and not a property path. + * + * @private + * @param {*} value The value to check. + * @param {Object} [object] The object to query keys on. + * @returns {boolean} Returns `true` if `value` is a property name, else `false`. + */ +function isKey(value, object) { + if (Array.isArray(value)) { + return false; + } + const type = typeof value; + if ( + type === "number" || + type === "boolean" || + value == null || + isSymbol(value) + ) { + return true; + } + return ( + reIsPlainProp.test(value) || + !reIsDeepProp.test(value) || + (object != null && value in Object(object)) + ); +} + +export default isKey; diff --git a/js/src/utils/lodash/isObject.ts b/js/src/utils/lodash/isObject.ts new file mode 100644 index 000000000..56c8930f8 --- /dev/null +++ b/js/src/utils/lodash/isObject.ts @@ -0,0 +1,31 @@ +// @ts-nocheck + +/** + * Checks if `value` is the + * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) + * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) + * + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an object, else `false`. + * @example + * + * isObject({}) + * // => true + * + * isObject([1, 2, 3]) + * // => true + * + * isObject(Function) + * // => true + * + * isObject(null) + * // => false + */ +function isObject(value) { + const type = typeof value; + return value != null && (type === "object" || type === "function"); +} + +export default isObject; diff --git a/js/src/utils/lodash/isSymbol.ts b/js/src/utils/lodash/isSymbol.ts new file mode 100644 index 000000000..94e65a60f --- /dev/null +++ b/js/src/utils/lodash/isSymbol.ts @@ -0,0 +1,28 @@ +// @ts-nocheck + +import getTag from "./getTag.js"; + +/** + * Checks if `value` is classified as a `Symbol` primitive or object. + * + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a symbol, else `false`. + * @example + * + * isSymbol(Symbol.iterator) + * // => true + * + * isSymbol('abc') + * // => false + */ +function isSymbol(value) { + const type = typeof value; + return ( + type === "symbol" || + (type === "object" && value != null && getTag(value) === "[object Symbol]") + ); +} + +export default isSymbol; diff --git a/js/src/utils/lodash/memoizeCapped.ts b/js/src/utils/lodash/memoizeCapped.ts new file mode 100644 index 000000000..c4696ddd3 --- /dev/null +++ b/js/src/utils/lodash/memoizeCapped.ts @@ -0,0 +1,69 @@ +// @ts-nocheck + +/** + * Creates a function that memoizes the result of `func`. If `resolver` is + * provided, it determines the cache key for storing the result based on the + * arguments provided to the memoized function. By default, the first argument + * provided to the memoized function is used as the map cache key. The `func` + * is invoked with the `this` binding of the memoized function. + * + * **Note:** The cache is exposed as the `cache` property on the memoized + * function. Its creation may be customized by replacing the `memoize.Cache` + * constructor with one whose instances implement the + * [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object) + * method interface of `clear`, `delete`, `get`, `has`, and `set`. + * + * @since 0.1.0 + * @category Function + * @param {Function} func The function to have its output memoized. + * @param {Function} [resolver] The function to resolve the cache key. + * @returns {Function} Returns the new memoized function. + * @example + * + * const object = { 'a': 1, 'b': 2 } + * const other = { 'c': 3, 'd': 4 } + * + * const values = memoize(values) + * values(object) + * // => [1, 2] + * + * values(other) + * // => [3, 4] + * + * object.a = 2 + * values(object) + * // => [1, 2] + * + * // Modify the result cache. + * values.cache.set(object, ['a', 'b']) + * values(object) + * // => ['a', 'b'] + * + * // Replace `memoize.Cache`. + * memoize.Cache = WeakMap + */ +function memoize(func, resolver) { + if ( + typeof func !== "function" || + (resolver != null && typeof resolver !== "function") + ) { + throw new TypeError("Expected a function"); + } + const memoized = function (...args) { + const key = resolver ? resolver.apply(this, args) : args[0]; + const cache = memoized.cache; + + if (cache.has(key)) { + return cache.get(key); + } + const result = func.apply(this, args); + memoized.cache = cache.set(key, result) || cache; + return result; + }; + memoized.cache = new (memoize.Cache || Map)(); + return memoized; +} + +memoize.Cache = Map; + +export default memoize; diff --git a/js/src/utils/lodash/set.ts b/js/src/utils/lodash/set.ts new file mode 100644 index 000000000..01f277ce4 --- /dev/null +++ b/js/src/utils/lodash/set.ts @@ -0,0 +1,39 @@ +// @ts-nocheck + +import baseSet from "./baseSet.js"; + +/** + * Sets the value at `path` of `object`. If a portion of `path` doesn't exist, + * it's created. Arrays are created for missing index properties while objects + * are created for all other missing properties. Use `setWith` to customize + * `path` creation. + * + * **Note:** This method mutates `object`. + * + * Inlined to just use set functionality and patch vulnerabilities + * on existing isolated "lodash.set" package. + * + * @since 3.7.0 + * @category Object + * @param {Object} object The object to modify. + * @param {Array|string} path The path of the property to set. + * @param {*} value The value to set. + * @returns {Object} Returns `object`. + * @see has, hasIn, get, unset + * @example + * + * const object = { 'a': [{ 'b': { 'c': 3 } }] } + * + * set(object, 'a[0].b.c', 4) + * console.log(object.a[0].b.c) + * // => 4 + * + * set(object, ['x', '0', 'y', 'z'], 5) + * console.log(object.x[0].y.z) + * // => 5 + */ +function set(object, path, value) { + return object == null ? object : baseSet(object, path, value); +} + +export default set; diff --git a/js/src/utils/lodash/stringToPath.ts b/js/src/utils/lodash/stringToPath.ts new file mode 100644 index 000000000..d4e99ab9f --- /dev/null +++ b/js/src/utils/lodash/stringToPath.ts @@ -0,0 +1,49 @@ +// @ts-nocheck + +import memoizeCapped from "./memoizeCapped.js"; + +const charCodeOfDot = ".".charCodeAt(0); +const reEscapeChar = /\\(\\)?/g; +const rePropName = RegExp( + // Match anything that isn't a dot or bracket. + "[^.[\\]]+" + + "|" + + // Or match property names within brackets. + "\\[(?:" + + // Match a non-string expression. + "([^\"'][^[]*)" + + "|" + + // Or match strings (supports escaping characters). + "([\"'])((?:(?!\\2)[^\\\\]|\\\\.)*?)\\2" + + ")\\]" + + "|" + + // Or match "" as the space between consecutive dots or empty brackets. + "(?=(?:\\.|\\[\\])(?:\\.|\\[\\]|$))", + "g" +); + +/** + * Converts `string` to a property path array. + * + * @private + * @param {string} string The string to convert. + * @returns {Array} Returns the property path array. + */ +const stringToPath = memoizeCapped((string: string) => { + const result = []; + if (string.charCodeAt(0) === charCodeOfDot) { + result.push(""); + } + string.replace(rePropName, (match, expression, quote, subString) => { + let key = match; + if (quote) { + key = subString.replace(reEscapeChar, "$1"); + } else if (expression) { + key = expression.trim(); + } + result.push(key); + }); + return result; +}); + +export default stringToPath; diff --git a/js/src/utils/lodash/toKey.ts b/js/src/utils/lodash/toKey.ts new file mode 100644 index 000000000..98b327455 --- /dev/null +++ b/js/src/utils/lodash/toKey.ts @@ -0,0 +1,23 @@ +// @ts-nocheck + +import isSymbol from "./isSymbol.js"; + +/** Used as references for various `Number` constants. */ +const INFINITY = 1 / 0; + +/** + * Converts `value` to a string key if it's not a string or symbol. + * + * @private + * @param {*} value The value to inspect. + * @returns {string|symbol} Returns the key. + */ +function toKey(value) { + if (typeof value === "string" || isSymbol(value)) { + return value; + } + const result = `${value}`; + return result === "0" && 1 / value === -INFINITY ? "-0" : result; +} + +export default toKey; diff --git a/js/yarn.lock b/js/yarn.lock index be071906d..8e4cee5e8 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -1487,18 +1487,6 @@ resolved "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz" integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== -"@types/lodash.set@^4.3.9": - version "4.3.9" - resolved "https://registry.yarnpkg.com/@types/lodash.set/-/lodash.set-4.3.9.tgz#55d95bce407b42c6655f29b2d0811fd428e698f0" - integrity sha512-KOxyNkZpbaggVmqbpr82N2tDVTx05/3/j0f50Es1prxrWB0XYf9p3QNxqcbWb7P1Q9wlvsUSlCFnwlPCIJ46PQ== - dependencies: - "@types/lodash" "*" - -"@types/lodash@*": - version "4.17.4" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.17.4.tgz#0303b64958ee070059e3a7184048a55159fe20b7" - integrity sha512-wYCP26ZLxaT3R39kiN2+HcJ4kTd3U1waI/cY7ivWYqFP6pW3ZNpvi6Wd6PHZx7T/t8z0vlkXMg3QYLa7DZ/IJQ== - "@types/node-fetch@^2.6.4": version "2.6.11" resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.11.tgz#9b39b78665dae0e82a08f02f4967d62c66f95d24" @@ -3585,11 +3573,6 @@ lodash.merge@^4.6.2: resolved "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz" integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== -lodash.set@^4.3.2: - version "4.3.2" - resolved "https://registry.yarnpkg.com/lodash.set/-/lodash.set-4.3.2.tgz#d8757b1da807dde24816b0d6a84bea1a76230b23" - integrity sha512-4hNPN5jlm/N/HLMCO43v8BXKq9Z7QdAGc/VGrRD61w8gN9g/6jF9A4L1pbUgBLCffi0w9VsXfTOij5x8iTyFvg== - lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz" From 2664d89b852ad8d9142f36ef9440270a86b0e57e Mon Sep 17 00:00:00 2001 From: jacoblee93 Date: Wed, 3 Jul 2024 11:07:26 -0700 Subject: [PATCH 201/373] Bump verison --- js/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/package.json b/js/package.json index 978c3a78d..eee94a13a 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.35", + "version": "0.1.36", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ From 3b8a0330a1cf25000c865ee5f51642e08a650610 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 3 Jul 2024 20:19:32 +0200 Subject: [PATCH 202/373] Bump index.ts as well --- js/src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/index.ts b/js/src/index.ts index d41027b36..575faa25a 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.35"; +export const __version__ = "0.1.36"; From 9b1fcc4518e24e8fa9b2b716d6fe8bffac334d3c Mon Sep 17 00:00:00 2001 From: Yue Wang <150297347+yue-fh@users.noreply.github.com> Date: Sun, 7 Jul 2024 20:27:59 -0400 Subject: [PATCH 203/373] support overridding name in langsmith_extra (#826) currently there is no way to dynamically override the `name` argument that could be passed into `traceable`. This allows the run name to be overridden by `langsmith_extra` --- python/langsmith/run_helpers.py | 3 ++- python/tests/unit_tests/test_run_helpers.py | 9 +++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 3d4753f67..089d19b09 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -174,6 +174,7 @@ def is_async(func: Callable) -> bool: class LangSmithExtra(TypedDict, total=False): """Any additional info to be injected into the run dynamically.""" + name: Optional[str] reference_example_id: Optional[ls_client.ID_TYPE] run_extra: Optional[Dict] parent: Optional[Union[run_trees.RunTree, str, Mapping]] @@ -1006,13 +1007,13 @@ def _setup_run( ) -> _TraceableContainer: """Create a new run or create_child() if run is passed in kwargs.""" extra_outer = container_input.get("extra_outer") or {} - name = container_input.get("name") metadata = container_input.get("metadata") tags = container_input.get("tags") client = container_input.get("client") run_type = container_input.get("run_type") or "chain" outer_project = _PROJECT_NAME.get() langsmith_extra = langsmith_extra or LangSmithExtra() + name = langsmith_extra.get("name") or container_input.get("name") client_ = langsmith_extra.get("client", client) parent_run_ = _get_parent_run( {**langsmith_extra, "client": client_}, kwargs.get("config") diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 4ea0d564e..f731fb892 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -726,7 +726,12 @@ def _get_run(r: RunTree) -> None: with tracing_context(enabled=True): chunks = my_answer( - "some_query", langsmith_extra={"on_end": _get_run, "client": mock_client_} + "some_query", + langsmith_extra={ + "name": "test_overridding_name", + "on_end": _get_run, + "client": mock_client_, + }, ) all_chunks = [] for chunk in chunks: @@ -741,7 +746,7 @@ def _get_run(r: RunTree) -> None: ] assert run is not None run = cast(RunTree, run) - assert run.name == "expand_and_answer_questions" + assert run.name == "test_overridding_name" child_runs = run.child_runs assert child_runs and len(child_runs) == 5 names = [run.name for run in child_runs] From 200b11b1d3f44084bf663e57112858910d8a30d5 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Sun, 7 Jul 2024 17:48:23 -0700 Subject: [PATCH 204/373] [Python] Respect enabled in context manager (#836) Previously, the trace() context manager would trace no matter what. Now, respect the context var that's set if you use tracing_context() Additionally, respect the enabled arg in @traceable even if project_name is manually defined. Additionally, add more tests for async traceable invocations + use asyncio debug mode to log issues --- python/Makefile | 2 +- python/langsmith/client.py | 6 +- python/langsmith/run_helpers.py | 35 +- python/langsmith/run_trees.py | 2 + python/poetry.lock | 356 +++++++++--------- python/pyproject.toml | 10 +- python/tests/integration_tests/test_client.py | 8 - python/tests/integration_tests/test_runs.py | 75 ++-- python/tests/unit_tests/test_run_helpers.py | 126 +++++++ 9 files changed, 381 insertions(+), 239 deletions(-) diff --git a/python/Makefile b/python/Makefile index c8646ed56..d06830bf9 100644 --- a/python/Makefile +++ b/python/Makefile @@ -1,7 +1,7 @@ .PHONY: tests lint format build publish doctest integration_tests integration_tests_fast evals tests: - poetry run python -m pytest --disable-socket --allow-unix-socket -n auto --durations=10 tests/unit_tests + PYTHONDEVMODE=1 PYTHONASYNCIODEBUG=1 poetry run python -m pytest --disable-socket --allow-unix-socket -n auto --durations=10 tests/unit_tests tests_watch: poetry run ptw --now . -- -vv -x tests/unit_tests diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 464f80117..bd39b0e5a 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -266,7 +266,9 @@ def _dumps_json_single( ensure_ascii=True, ).encode("utf-8") try: - result = orjson.dumps(orjson.loads(result.decode("utf-8", errors="lossy"))) + result = orjson.dumps( + orjson.loads(result.decode("utf-8", errors="surrogateescape")) + ) except orjson.JSONDecodeError: result = _elide_surrogates(result) return result @@ -1238,7 +1240,6 @@ def create_run( if not self._filter_for_sampling([run_create]): return run_create = self._run_transform(run_create, copy=True) - self._insert_runtime_env([run_create]) if revision_id is not None: run_create["extra"]["metadata"]["revision_id"] = revision_id if ( @@ -1250,6 +1251,7 @@ def create_run( return self.tracing_queue.put( TracingQueueItem(run_create["dotted_order"], "create", run_create) ) + self._insert_runtime_env([run_create]) self._create_run(run_create) def _create_run(self, run_create: dict): diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 089d19b09..ec4dbac97 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -700,6 +700,7 @@ def trace( DeprecationWarning, ) old_ctx = get_tracing_context() + is_disabled = old_ctx.get("enabled", True) is False outer_tags = _TAGS.get() outer_metadata = _METADATA.get() outer_project = _PROJECT_NAME.get() or utils.get_tracer_project() @@ -707,17 +708,16 @@ def trace( {"parent": parent, "run_tree": kwargs.get("run_tree"), "client": client} ) - # Merge and set context variables + # Merge context variables tags_ = sorted(set((tags or []) + (outer_tags or []))) - _TAGS.set(tags_) metadata = {**(metadata or {}), **(outer_metadata or {}), "ls_method": "trace"} - _METADATA.set(metadata) extra_outer = extra or {} extra_outer["metadata"] = metadata project_name_ = project_name or outer_project - if parent_run_ is not None: + # If it's disabled, we break the tree + if parent_run_ is not None and not is_disabled: new_run = parent_run_.create_child( name=name, run_id=run_id, @@ -740,9 +740,12 @@ def trace( tags=tags_, client=client, # type: ignore[arg-type] ) - new_run.post() - _PARENT_RUN_TREE.set(new_run) - _PROJECT_NAME.set(project_name_) + if not is_disabled: + new_run.post() + _TAGS.set(tags_) + _METADATA.set(metadata) + _PARENT_RUN_TREE.set(new_run) + _PROJECT_NAME.set(project_name_) try: yield new_run @@ -753,12 +756,14 @@ def trace( tb = utils._format_exc() tb = f"{e.__class__.__name__}: {e}\n\n{tb}" new_run.end(error=tb) - new_run.patch() + if not is_disabled: + new_run.patch() raise e finally: # Reset the old context _set_tracing_context(old_ctx) - new_run.patch() + if not is_disabled: + new_run.patch() def as_runnable(traceable_fn: Callable) -> Runnable: @@ -933,11 +938,6 @@ def _container_end( error_ = f"{repr(error)}\n\n{stacktrace}" run_tree.end(outputs=outputs_, error=error_) run_tree.patch() - if error: - try: - LOGGER.info(f"See trace: {run_tree.get_url()}") - except Exception: - pass on_end = container.get("on_end") if on_end is not None and callable(on_end): try: @@ -1027,12 +1027,7 @@ def _setup_run( ) reference_example_id = langsmith_extra.get("reference_example_id") id_ = langsmith_extra.get("run_id") - if ( - not project_cv - and not reference_example_id - and not parent_run_ - and not utils.tracing_is_enabled() - ): + if not parent_run_ and not utils.tracing_is_enabled(): utils.log_once( logging.DEBUG, "LangSmith tracing is enabled, returning original function." ) diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index ffe997c67..69fb501be 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -98,6 +98,8 @@ def infer_defaults(cls, values: dict) -> dict: values["events"] = [] if values.get("tags") is None: values["tags"] = [] + if values.get("outputs") is None: + values["outputs"] = {} return values @root_validator(pre=False) diff --git a/python/poetry.lock b/python/poetry.lock index 2d896248c..b41d40be6 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -238,63 +238,63 @@ files = [ [[package]] name = "coverage" -version = "7.5.3" +version = "7.5.4" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"}, - {file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc"}, - {file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d"}, - {file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c"}, - {file = "coverage-7.5.3-cp310-cp310-win32.whl", hash = "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84"}, - {file = "coverage-7.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac"}, - {file = "coverage-7.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974"}, - {file = "coverage-7.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807"}, - {file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8"}, - {file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614"}, - {file = "coverage-7.5.3-cp311-cp311-win32.whl", hash = "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9"}, - {file = "coverage-7.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a"}, - {file = "coverage-7.5.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8"}, - {file = "coverage-7.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db"}, - {file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35"}, - {file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84"}, - {file = "coverage-7.5.3-cp312-cp312-win32.whl", hash = "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08"}, - {file = "coverage-7.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb"}, - {file = "coverage-7.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb"}, - {file = "coverage-7.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98"}, - {file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce"}, - {file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0"}, - {file = "coverage-7.5.3-cp38-cp38-win32.whl", hash = "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485"}, - {file = "coverage-7.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56"}, - {file = "coverage-7.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85"}, - {file = "coverage-7.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341"}, - {file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303"}, - {file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd"}, - {file = "coverage-7.5.3-cp39-cp39-win32.whl", hash = "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d"}, - {file = "coverage-7.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0"}, - {file = "coverage-7.5.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884"}, - {file = "coverage-7.5.3.tar.gz", hash = "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f"}, + {file = "coverage-7.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cfb5a4f556bb51aba274588200a46e4dd6b505fb1a5f8c5ae408222eb416f99"}, + {file = "coverage-7.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2174e7c23e0a454ffe12267a10732c273243b4f2d50d07544a91198f05c48f47"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2214ee920787d85db1b6a0bd9da5f8503ccc8fcd5814d90796c2f2493a2f4d2e"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1137f46adb28e3813dec8c01fefadcb8c614f33576f672962e323b5128d9a68d"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b385d49609f8e9efc885790a5a0e89f2e3ae042cdf12958b6034cc442de428d3"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4a474f799456e0eb46d78ab07303286a84a3140e9700b9e154cfebc8f527016"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5cd64adedf3be66f8ccee418473c2916492d53cbafbfcff851cbec5a8454b136"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e564c2cf45d2f44a9da56f4e3a26b2236504a496eb4cb0ca7221cd4cc7a9aca9"}, + {file = "coverage-7.5.4-cp310-cp310-win32.whl", hash = "sha256:7076b4b3a5f6d2b5d7f1185fde25b1e54eb66e647a1dfef0e2c2bfaf9b4c88c8"}, + {file = "coverage-7.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:018a12985185038a5b2bcafab04ab833a9a0f2c59995b3cec07e10074c78635f"}, + {file = "coverage-7.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:db14f552ac38f10758ad14dd7b983dbab424e731588d300c7db25b6f89e335b5"}, + {file = "coverage-7.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3257fdd8e574805f27bb5342b77bc65578e98cbc004a92232106344053f319ba"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a6612c99081d8d6134005b1354191e103ec9705d7ba2754e848211ac8cacc6b"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d45d3cbd94159c468b9b8c5a556e3f6b81a8d1af2a92b77320e887c3e7a5d080"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed550e7442f278af76d9d65af48069f1fb84c9f745ae249c1a183c1e9d1b025c"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a892be37ca35eb5019ec85402c3371b0f7cda5ab5056023a7f13da0961e60da"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8192794d120167e2a64721d88dbd688584675e86e15d0569599257566dec9bf0"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:820bc841faa502e727a48311948e0461132a9c8baa42f6b2b84a29ced24cc078"}, + {file = "coverage-7.5.4-cp311-cp311-win32.whl", hash = "sha256:6aae5cce399a0f065da65c7bb1e8abd5c7a3043da9dceb429ebe1b289bc07806"}, + {file = "coverage-7.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:d2e344d6adc8ef81c5a233d3a57b3c7d5181f40e79e05e1c143da143ccb6377d"}, + {file = "coverage-7.5.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:54317c2b806354cbb2dc7ac27e2b93f97096912cc16b18289c5d4e44fc663233"}, + {file = "coverage-7.5.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:042183de01f8b6d531e10c197f7f0315a61e8d805ab29c5f7b51a01d62782747"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6bb74ed465d5fb204b2ec41d79bcd28afccf817de721e8a807d5141c3426638"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3d45ff86efb129c599a3b287ae2e44c1e281ae0f9a9bad0edc202179bcc3a2e"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5013ed890dc917cef2c9f765c4c6a8ae9df983cd60dbb635df8ed9f4ebc9f555"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1014fbf665fef86cdfd6cb5b7371496ce35e4d2a00cda501cf9f5b9e6fced69f"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3684bc2ff328f935981847082ba4fdc950d58906a40eafa93510d1b54c08a66c"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:581ea96f92bf71a5ec0974001f900db495488434a6928a2ca7f01eee20c23805"}, + {file = "coverage-7.5.4-cp312-cp312-win32.whl", hash = "sha256:73ca8fbc5bc622e54627314c1a6f1dfdd8db69788f3443e752c215f29fa87a0b"}, + {file = "coverage-7.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:cef4649ec906ea7ea5e9e796e68b987f83fa9a718514fe147f538cfeda76d7a7"}, + {file = "coverage-7.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdd31315fc20868c194130de9ee6bfd99755cc9565edff98ecc12585b90be882"}, + {file = "coverage-7.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:02ff6e898197cc1e9fa375581382b72498eb2e6d5fc0b53f03e496cfee3fac6d"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d05c16cf4b4c2fc880cb12ba4c9b526e9e5d5bb1d81313d4d732a5b9fe2b9d53"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5986ee7ea0795a4095ac4d113cbb3448601efca7f158ec7f7087a6c705304e4"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df54843b88901fdc2f598ac06737f03d71168fd1175728054c8f5a2739ac3e4"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ab73b35e8d109bffbda9a3e91c64e29fe26e03e49addf5b43d85fc426dde11f9"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:aea072a941b033813f5e4814541fc265a5c12ed9720daef11ca516aeacd3bd7f"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:16852febd96acd953b0d55fc842ce2dac1710f26729b31c80b940b9afcd9896f"}, + {file = "coverage-7.5.4-cp38-cp38-win32.whl", hash = "sha256:8f894208794b164e6bd4bba61fc98bf6b06be4d390cf2daacfa6eca0a6d2bb4f"}, + {file = "coverage-7.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:e2afe743289273209c992075a5a4913e8d007d569a406ffed0bd080ea02b0633"}, + {file = "coverage-7.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b95c3a8cb0463ba9f77383d0fa8c9194cf91f64445a63fc26fb2327e1e1eb088"}, + {file = "coverage-7.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7564cc09dd91b5a6001754a5b3c6ecc4aba6323baf33a12bd751036c998be4"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44da56a2589b684813f86d07597fdf8a9c6ce77f58976727329272f5a01f99f7"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e16f3d6b491c48c5ae726308e6ab1e18ee830b4cdd6913f2d7f77354b33f91c8"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbc5958cb471e5a5af41b0ddaea96a37e74ed289535e8deca404811f6cb0bc3d"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a04e990a2a41740b02d6182b498ee9796cf60eefe40cf859b016650147908029"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ddbd2f9713a79e8e7242d7c51f1929611e991d855f414ca9996c20e44a895f7c"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b1ccf5e728ccf83acd313c89f07c22d70d6c375a9c6f339233dcf792094bcbf7"}, + {file = "coverage-7.5.4-cp39-cp39-win32.whl", hash = "sha256:56b4eafa21c6c175b3ede004ca12c653a88b6f922494b023aeb1e836df953ace"}, + {file = "coverage-7.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:65e528e2e921ba8fd67d9055e6b9f9e34b21ebd6768ae1c1723f4ea6ace1234d"}, + {file = "coverage-7.5.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:79b356f3dd5b26f3ad23b35c75dbdaf1f9e2450b6bcefc6d0825ea0aa3f86ca5"}, + {file = "coverage-7.5.4.tar.gz", hash = "sha256:a44963520b069e12789d0faea4e9fdb1e410cdc4aab89d94f7f55cbb7fef0353"}, ] [package.dependencies] @@ -588,38 +588,38 @@ files = [ [[package]] name = "mypy" -version = "1.10.0" +version = "1.10.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, - {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, - {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, - {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, - {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, - {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, - {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, - {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, - {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, - {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, - {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, - {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, - {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, - {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, - {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, - {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, - {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, - {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, - {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, - {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, - {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, - {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, - {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, - {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, - {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, - {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, - {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, ] [package.dependencies] @@ -700,13 +700,13 @@ files = [ [[package]] name = "openai" -version = "1.35.3" +version = "1.35.7" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.35.3-py3-none-any.whl", hash = "sha256:7b26544cef80f125431c073ffab3811d2421fbb9e30d3bd5c2436aba00b042d5"}, - {file = "openai-1.35.3.tar.gz", hash = "sha256:d6177087f150b381d49499be782d764213fdf638d391b29ca692b84dd675a389"}, + {file = "openai-1.35.7-py3-none-any.whl", hash = "sha256:3d1e0b0aac9b0db69a972d36dc7efa7563f8e8d65550b27a48f2a0c2ec207e80"}, + {file = "openai-1.35.7.tar.gz", hash = "sha256:009bfa1504c9c7ef64d87be55936d142325656bbc6d98c68b669d6472e4beb09"}, ] [package.dependencies] @@ -874,109 +874,121 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pydantic" -version = "2.7.4" +version = "2.8.0" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, - {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, + {file = "pydantic-2.8.0-py3-none-any.whl", hash = "sha256:ead4f3a1e92386a734ca1411cb25d94147cf8778ed5be6b56749047676d6364e"}, + {file = "pydantic-2.8.0.tar.gz", hash = "sha256:d970ffb9d030b710795878940bd0489842c638e7252fc4a19c3ae2f7da4d6141"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.4" -typing-extensions = ">=4.6.1" +pydantic-core = "2.20.0" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] [package.extras] email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.4" +version = "2.20.0" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"}, - {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"}, - {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"}, - {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"}, - {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"}, - {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"}, - {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"}, - {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"}, - {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"}, - {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"}, - {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"}, - {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"}, - {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"}, - {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"}, - {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"}, - {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"}, - {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"}, - {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"}, - {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"}, - {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"}, - {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"}, - {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"}, - {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"}, - {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"}, - {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"}, - {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"}, - {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"}, - {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"}, - {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"}, - {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"}, + {file = "pydantic_core-2.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e9dcd7fb34f7bfb239b5fa420033642fff0ad676b765559c3737b91f664d4fa9"}, + {file = "pydantic_core-2.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:649a764d9b0da29816889424697b2a3746963ad36d3e0968784ceed6e40c6355"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7701df088d0b05f3460f7ba15aec81ac8b0fb5690367dfd072a6c38cf5b7fdb5"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab760f17c3e792225cdaef31ca23c0aea45c14ce80d8eff62503f86a5ab76bff"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1ad5b4d73cde784cf64580166568074f5ccd2548d765e690546cff3d80937d"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b81ec2efc04fc1dbf400647d4357d64fb25543bae38d2d19787d69360aad21c9"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4a9732a5cad764ba37f3aa873dccb41b584f69c347a57323eda0930deec8e10"}, + {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6dc85b9e10cc21d9c1055f15684f76fa4facadddcb6cd63abab702eb93c98943"}, + {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:21d9f7e24f63fdc7118e6cc49defaab8c1d27570782f7e5256169d77498cf7c7"}, + {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8b315685832ab9287e6124b5d74fc12dda31e6421d7f6b08525791452844bc2d"}, + {file = "pydantic_core-2.20.0-cp310-none-win32.whl", hash = "sha256:c3dc8ec8b87c7ad534c75b8855168a08a7036fdb9deeeed5705ba9410721c84d"}, + {file = "pydantic_core-2.20.0-cp310-none-win_amd64.whl", hash = "sha256:85770b4b37bb36ef93a6122601795231225641003e0318d23c6233c59b424279"}, + {file = "pydantic_core-2.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:58e251bb5a5998f7226dc90b0b753eeffa720bd66664eba51927c2a7a2d5f32c"}, + {file = "pydantic_core-2.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78d584caac52c24240ef9ecd75de64c760bbd0e20dbf6973631815e3ef16ef8b"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5084ec9721f82bef5ff7c4d1ee65e1626783abb585f8c0993833490b63fe1792"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d0f52684868db7c218437d260e14d37948b094493f2646f22d3dda7229bbe3f"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1def125d59a87fe451212a72ab9ed34c118ff771e5473fef4f2f95d8ede26d75"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b34480fd6778ab356abf1e9086a4ced95002a1e195e8d2fd182b0def9d944d11"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d42669d319db366cb567c3b444f43caa7ffb779bf9530692c6f244fc635a41eb"}, + {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53b06aea7a48919a254b32107647be9128c066aaa6ee6d5d08222325f25ef175"}, + {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1f038156b696a1c39d763b2080aeefa87ddb4162c10aa9fabfefffc3dd8180fa"}, + {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3f0f3a4a23717280a5ee3ac4fb1f81d6fde604c9ec5100f7f6f987716bb8c137"}, + {file = "pydantic_core-2.20.0-cp311-none-win32.whl", hash = "sha256:316fe7c3fec017affd916a0c83d6f1ec697cbbbdf1124769fa73328e7907cc2e"}, + {file = "pydantic_core-2.20.0-cp311-none-win_amd64.whl", hash = "sha256:2d06a7fa437f93782e3f32d739c3ec189f82fca74336c08255f9e20cea1ed378"}, + {file = "pydantic_core-2.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d6f8c49657f3eb7720ed4c9b26624063da14937fc94d1812f1e04a2204db3e17"}, + {file = "pydantic_core-2.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad1bd2f377f56fec11d5cfd0977c30061cd19f4fa199bf138b200ec0d5e27eeb"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed741183719a5271f97d93bbcc45ed64619fa38068aaa6e90027d1d17e30dc8d"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d82e5ed3a05f2dcb89c6ead2fd0dbff7ac09bc02c1b4028ece2d3a3854d049ce"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2ba34a099576234671f2e4274e5bc6813b22e28778c216d680eabd0db3f7dad"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:879ae6bb08a063b3e1b7ac8c860096d8fd6b48dd9b2690b7f2738b8c835e744b"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0eefc7633a04c0694340aad91fbfd1986fe1a1e0c63a22793ba40a18fcbdc8"}, + {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73deadd6fd8a23e2f40b412b3ac617a112143c8989a4fe265050fd91ba5c0608"}, + {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:35681445dc85446fb105943d81ae7569aa7e89de80d1ca4ac3229e05c311bdb1"}, + {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0f6dd3612a3b9f91f2e63924ea18a4476656c6d01843ca20a4c09e00422195af"}, + {file = "pydantic_core-2.20.0-cp312-none-win32.whl", hash = "sha256:7e37b6bb6e90c2b8412b06373c6978d9d81e7199a40e24a6ef480e8acdeaf918"}, + {file = "pydantic_core-2.20.0-cp312-none-win_amd64.whl", hash = "sha256:7d4df13d1c55e84351fab51383520b84f490740a9f1fec905362aa64590b7a5d"}, + {file = "pydantic_core-2.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:d43e7ab3b65e4dc35a7612cfff7b0fd62dce5bc11a7cd198310b57f39847fd6c"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6a24d7b5893392f2b8e3b7a0031ae3b14c6c1942a4615f0d8794fdeeefb08b"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2f13c3e955a087c3ec86f97661d9f72a76e221281b2262956af381224cfc243"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72432fd6e868c8d0a6849869e004b8bcae233a3c56383954c228316694920b38"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d70a8ff2d4953afb4cbe6211f17268ad29c0b47e73d3372f40e7775904bc28fc"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e49524917b8d3c2f42cd0d2df61178e08e50f5f029f9af1f402b3ee64574392"}, + {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4f0f71653b1c1bad0350bc0b4cc057ab87b438ff18fa6392533811ebd01439c"}, + {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:16197e6f4fdecb9892ed2436e507e44f0a1aa2cff3b9306d1c879ea2f9200997"}, + {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:763602504bf640b3ded3bba3f8ed8a1cc2fc6a87b8d55c1c5689f428c49c947e"}, + {file = "pydantic_core-2.20.0-cp313-none-win32.whl", hash = "sha256:a3f243f318bd9523277fa123b3163f4c005a3e8619d4b867064de02f287a564d"}, + {file = "pydantic_core-2.20.0-cp313-none-win_amd64.whl", hash = "sha256:03aceaf6a5adaad3bec2233edc5a7905026553916615888e53154807e404545c"}, + {file = "pydantic_core-2.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d6f2d8b8da1f03f577243b07bbdd3412eee3d37d1f2fd71d1513cbc76a8c1239"}, + {file = "pydantic_core-2.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a272785a226869416c6b3c1b7e450506152d3844207331f02f27173562c917e0"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efbb412d55a4ffe73963fed95c09ccb83647ec63b711c4b3752be10a56f0090b"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e4f46189d8740561b43655263a41aac75ff0388febcb2c9ec4f1b60a0ec12f3"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3df115f4a3c8c5e4d5acf067d399c6466d7e604fc9ee9acbe6f0c88a0c3cf"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a340d2bdebe819d08f605e9705ed551c3feb97e4fd71822d7147c1e4bdbb9508"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:616b9c2f882393d422ba11b40e72382fe975e806ad693095e9a3b67c59ea6150"}, + {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25c46bb2ff6084859bbcfdf4f1a63004b98e88b6d04053e8bf324e115398e9e7"}, + {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23425eccef8f2c342f78d3a238c824623836c6c874d93c726673dbf7e56c78c0"}, + {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:52527e8f223ba29608d999d65b204676398009725007c9336651c2ec2d93cffc"}, + {file = "pydantic_core-2.20.0-cp38-none-win32.whl", hash = "sha256:1c3c5b7f70dd19a6845292b0775295ea81c61540f68671ae06bfe4421b3222c2"}, + {file = "pydantic_core-2.20.0-cp38-none-win_amd64.whl", hash = "sha256:8093473d7b9e908af1cef30025609afc8f5fd2a16ff07f97440fd911421e4432"}, + {file = "pydantic_core-2.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ee7785938e407418795e4399b2bf5b5f3cf6cf728077a7f26973220d58d885cf"}, + {file = "pydantic_core-2.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e75794883d635071cf6b4ed2a5d7a1e50672ab7a051454c76446ef1ebcdcc91"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:344e352c96e53b4f56b53d24728217c69399b8129c16789f70236083c6ceb2ac"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:978d4123ad1e605daf1ba5e01d4f235bcf7b6e340ef07e7122e8e9cfe3eb61ab"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c05eaf6c863781eb834ab41f5963604ab92855822a2062897958089d1335dad"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc7e43b4a528ffca8c9151b6a2ca34482c2fdc05e6aa24a84b7f475c896fc51d"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658287a29351166510ebbe0a75c373600cc4367a3d9337b964dada8d38bcc0f4"}, + {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1dacf660d6de692fe351e8c806e7efccf09ee5184865893afbe8e59be4920b4a"}, + {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e147fc6e27b9a487320d78515c5f29798b539179f7777018cedf51b7749e4f4"}, + {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c867230d715a3dd1d962c8d9bef0d3168994ed663e21bf748b6e3a529a129aab"}, + {file = "pydantic_core-2.20.0-cp39-none-win32.whl", hash = "sha256:22b813baf0dbf612752d8143a2dbf8e33ccb850656b7850e009bad2e101fc377"}, + {file = "pydantic_core-2.20.0-cp39-none-win_amd64.whl", hash = "sha256:3a7235b46c1bbe201f09b6f0f5e6c36b16bad3d0532a10493742f91fbdc8035f"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cafde15a6f7feaec2f570646e2ffc5b73412295d29134a29067e70740ec6ee20"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2aec8eeea0b08fd6bc2213d8e86811a07491849fd3d79955b62d83e32fa2ad5f"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840200827984f1c4e114008abc2f5ede362d6e11ed0b5931681884dd41852ff1"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ea1d8b7df522e5ced34993c423c3bf3735c53df8b2a15688a2f03a7d678800"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5b8376a867047bf08910573deb95d3c8dfb976eb014ee24f3b5a61ccc5bee1b"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d08264b4460326cefacc179fc1411304d5af388a79910832835e6f641512358b"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7a3639011c2e8a9628466f616ed7fb413f30032b891898e10895a0a8b5857d6c"}, + {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:05e83ce2f7eba29e627dd8066aa6c4c0269b2d4f889c0eba157233a353053cea"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:603a843fea76a595c8f661cd4da4d2281dff1e38c4a836a928eac1a2f8fe88e4"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac76f30d5d3454f4c28826d891fe74d25121a346c69523c9810ebba43f3b1cec"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e3b1d4b1b3f6082849f9b28427ef147a5b46a6132a3dbaf9ca1baa40c88609"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2761f71faed820e25ec62eacba670d1b5c2709bb131a19fcdbfbb09884593e5a"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0586cddbf4380e24569b8a05f234e7305717cc8323f50114dfb2051fcbce2a3"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b8c46a8cf53e849eea7090f331ae2202cd0f1ceb090b00f5902c423bd1e11805"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b4a085bd04af7245e140d1b95619fe8abb445a3d7fdf219b3f80c940853268ef"}, + {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:116b326ac82c8b315e7348390f6d30bcfe6e688a7d3f1de50ff7bcc2042a23c2"}, + {file = "pydantic_core-2.20.0.tar.gz", hash = "sha256:366be8e64e0cb63d87cf79b4e1765c0703dd6313c729b22e7b9e378db6b96877"}, ] [package.dependencies] @@ -1349,13 +1361,13 @@ types-urllib3 = "*" [[package]] name = "types-requests" -version = "2.32.0.20240602" +version = "2.32.0.20240622" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240602.tar.gz", hash = "sha256:3f98d7bbd0dd94ebd10ff43a7fbe20c3b8528acace6d8efafef0b6a184793f06"}, - {file = "types_requests-2.32.0.20240602-py3-none-any.whl", hash = "sha256:ed3946063ea9fbc6b5fc0c44fa279188bae42d582cb63760be6cb4b9d06c3de8"}, + {file = "types-requests-2.32.0.20240622.tar.gz", hash = "sha256:ed5e8a412fcc39159d6319385c009d642845f250c63902718f605cd90faade31"}, + {file = "types_requests-2.32.0.20240622-py3-none-any.whl", hash = "sha256:97bac6b54b5bd4cf91d407e62f0932a74821bc2211f22116d9ee1dd643826caf"}, ] [package.dependencies] diff --git a/python/pyproject.toml b/python/pyproject.toml index 274754add..97a359f4c 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -26,7 +26,10 @@ langsmith = "langsmith.cli.main:main" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -pydantic = [{version = ">=1,<3", python = "<3.12.4"}, {version = "^2.7.4", python=">=3.12.4"}] +pydantic = [ + { version = ">=1,<3", python = "<3.12.4" }, + { version = "^2.7.4", python = ">=3.12.4" }, +] requests = "^2" orjson = "^3.9.14" @@ -92,10 +95,7 @@ docstring-code-format = true docstring-code-line-length = 80 [tool.mypy] -plugins = [ - "pydantic.v1.mypy", - "pydantic.mypy", -] +plugins = ["pydantic.v1.mypy", "pydantic.mypy"] ignore_missing_imports = "True" disallow_untyped_defs = "True" diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index 980d410cf..f706407ab 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -562,14 +562,6 @@ def test_batch_ingest_runs(langchain_client: Client) -> None: assert run3.inputs == {"input1": 1, "input2": 2} assert run3.error == "error" - # read the project - result = langchain_client.read_project(project_name=_session) - assert result.error_rate > 0 - assert result.first_token_p50 is None - assert result.first_token_p99 is None - - langchain_client.delete_project(project_name=_session) - @freeze_time("2023-01-01") def test_get_info() -> None: diff --git a/python/tests/integration_tests/test_runs.py b/python/tests/integration_tests/test_runs.py index 165a0cf6f..ddbce85ac 100644 --- a/python/tests/integration_tests/test_runs.py +++ b/python/tests/integration_tests/test_runs.py @@ -1,5 +1,6 @@ import asyncio import time +import uuid from collections import defaultdict from concurrent.futures import ThreadPoolExecutor from typing import AsyncGenerator, Generator, Optional @@ -24,11 +25,14 @@ def poll_runs_until_count( max_retries: int = 10, sleep_time: int = 2, require_success: bool = True, + filter_: Optional[str] = None, ): retries = 0 while retries < max_retries: try: - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list( + langchain_client.list_runs(project_name=project_name, filter=filter_) + ) if len(runs) == count: if not require_success or all( [run.status == "success" for run in runs] @@ -45,8 +49,7 @@ def test_nested_runs( langchain_client: Client, ): project_name = "__My Tracer Project - test_nested_runs" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") def my_run(text: str): @@ -61,10 +64,20 @@ def my_llm_run(text: str): def my_chain_run(text: str): return my_run(text) - my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + my_chain_run( + "foo", + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) for _ in range(15): try: - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list( + langchain_client.list_runs( + project_name=project_name, + filter=f"and(eq(metadata_key,'test_run'),eq(metadata_value,'{run_meta}'))", + ) + ) assert len(runs) == 3 break except (ls_utils.LangSmithError, AssertionError): @@ -81,10 +94,6 @@ def my_chain_run(text: str): assert runs_dict["my_llm_run"].parent_run_id == runs_dict["my_run"].id assert runs_dict["my_llm_run"].run_type == "llm" assert runs_dict["my_llm_run"].inputs == {"text": "foo"} - try: - langchain_client.delete_project(project_name=project_name) - except Exception: - pass async def test_list_runs_multi_project(langchain_client: Client): @@ -92,28 +101,32 @@ async def test_list_runs_multi_project(langchain_client: Client): "__My Tracer Project - test_list_runs_multi_project", "__My Tracer Project - test_list_runs_multi_project2", ] - try: - for project_name in project_names: - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) - - @traceable(run_type="chain") - async def my_run(text: str): - return "Completed: " + text - - for project_name in project_names: - await my_run("foo", langsmith_extra=dict(project_name=project_name)) - poll_runs_until_count(langchain_client, project_names[0], 1) - poll_runs_until_count(langchain_client, project_names[1], 1) - runs = list(langchain_client.list_runs(project_name=project_names)) - assert len(runs) == 2 - assert all([run.outputs["output"] == "Completed: foo" for run in runs]) # type: ignore - assert runs[0].session_id != runs[1].session_id - - finally: - for project_name in project_names: - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + + @traceable(run_type="chain") + async def my_run(text: str): + return "Completed: " + text + + run_meta = uuid.uuid4().hex + for project_name in project_names: + await my_run( + "foo", + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + + poll_runs_until_count(langchain_client, project_names[0], 1, filter_=filter_) + poll_runs_until_count(langchain_client, project_names[1], 1, filter_=filter_) + runs = list( + langchain_client.list_runs( + project_name=project_names, + filter=filter_, + ) + ) + assert len(runs) == 2 + assert all([run.outputs["output"] == "Completed: foo" for run in runs]) # type: ignore + assert runs[0].session_id != runs[1].session_id async def test_nested_async_runs(langchain_client: Client): diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index f731fb892..d0998986d 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -13,6 +13,7 @@ import langsmith from langsmith import Client +from langsmith import schemas as ls_schemas from langsmith.run_helpers import ( _get_inputs, as_runnable, @@ -1121,3 +1122,128 @@ def parent(inputs: dict) -> dict: assert parent_patch["id"] == parent_uid assert parent_patch["outputs"] == expected_at_stage["parent_output"] assert parent_patch["inputs"] == expected_at_stage["parent_input"] + + +def test_trace_respects_tracing_context(): + mock_client = _get_mock_client() + with tracing_context(enabled=False): + with trace(name="foo", inputs={"a": 1}, client=mock_client): + pass + + mock_calls = _get_calls(mock_client) + assert not mock_calls + + +def test_trace_nested_enable_disable(): + # Test that you can disable then re-enable tracing + # and the trace connects as expected + mock_client = _get_mock_client() + with tracing_context(enabled=True): + with trace(name="foo", inputs={"a": 1}, client=mock_client) as run: + with tracing_context(enabled=False): + with trace(name="bar", inputs={"b": 2}, client=mock_client) as run2: + with tracing_context(enabled=True): + with trace( + name="baz", inputs={"c": 3}, client=mock_client + ) as run3: + run3.end(outputs={"c": 3}) + run2.end(outputs={"b": 2}) + run.end(outputs={"a": 1}) + + # Now we need to ensure that there are 2 runs created (2 posts and 2 patches), + # run -> run3 + # with run2 being invisible + mock_calls = _get_calls(mock_client, verbs={"POST", "PATCH"}) + datas = [json.loads(mock_post.kwargs["data"]) for mock_post in mock_calls] + assert "post" in datas[0] + posted = datas[0]["post"] + assert len(posted) == 2 + assert posted[0]["name"] == "foo" + assert posted[1]["name"] == "baz" + dotted_parts = posted[1]["dotted_order"].split(".") + assert len(dotted_parts) == 2 + parent_dotted = posted[0]["dotted_order"] + assert parent_dotted == dotted_parts[0] + + +def test_tracing_disabled_project_name_set(): + mock_client = _get_mock_client() + + @traceable + def foo(a: int) -> int: + return a + + with tracing_context(enabled=False): + with trace( + name="foo", inputs={"a": 1}, client=mock_client, project_name="my_project" + ): + pass + foo(1, langsmith_extra={"client": mock_client, "project_name": "my_project"}) + + mock_calls = _get_calls(mock_client) + assert not mock_calls + + +@pytest.mark.parametrize("auto_batch_tracing", [True, False]) +async def test_traceable_async_exception(auto_batch_tracing: bool): + mock_client = _get_mock_client( + auto_batch_tracing=auto_batch_tracing, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + + @traceable + async def my_function(a: int) -> int: + raise ValueError("foo") + + with tracing_context(enabled=True): + with pytest.raises(ValueError, match="foo"): + await my_function(1, langsmith_extra={"client": mock_client}) + + # Get ALL the call args for the mock_client + num_calls = 1 if auto_batch_tracing else 2 + mock_calls = _get_calls( + mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls + ) + assert len(mock_calls) == num_calls + + +@pytest.mark.parametrize("auto_batch_tracing", [True, False]) +async def test_traceable_async_gen_exception(auto_batch_tracing: bool): + mock_client = _get_mock_client( + auto_batch_tracing=auto_batch_tracing, + info=ls_schemas.LangSmithInfo( + batch_ingest_config=ls_schemas.BatchIngestConfig( + size_limit_bytes=None, # Note this field is not used here + size_limit=100, + scale_up_nthreads_limit=16, + scale_up_qsize_trigger=1000, + scale_down_nempty_trigger=4, + ) + ), + ) + + @traceable + async def my_function(a: int) -> AsyncGenerator[int, None]: + for i in range(5): + yield i + raise ValueError("foo") + + with tracing_context(enabled=True): + with pytest.raises(ValueError, match="foo"): + async for _ in my_function(1, langsmith_extra={"client": mock_client}): + pass + + # Get ALL the call args for the mock_client + num_calls = 1 if auto_batch_tracing else 2 + mock_calls = _get_calls( + mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls + ) + assert len(mock_calls) == num_calls From e4357500b971c7d1f334849b31c5bbf4279cba39 Mon Sep 17 00:00:00 2001 From: David Duong Date: Mon, 8 Jul 2024 03:20:29 +0200 Subject: [PATCH 205/373] feat(ci): autopublish JS package (#841) --- .github/workflows/release_js.yml | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/.github/workflows/release_js.yml b/.github/workflows/release_js.yml index 5f2ac7294..4f1aee583 100644 --- a/.github/workflows/release_js.yml +++ b/.github/workflows/release_js.yml @@ -1,6 +1,11 @@ name: JS Release on: + push: + branches: + - main + paths: + - "js/package.json" workflow_dispatch: jobs: @@ -11,33 +16,34 @@ jobs: permissions: contents: write id-token: write + defaults: + run: + working-directory: "js" steps: - uses: actions/checkout@v3 # JS Build - - name: Use Node.js ${{ matrix.node-version }} + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: ${{ matrix.node-version }} + node-version: 20.x cache: "yarn" cache-dependency-path: "js/yarn.lock" - name: Install dependencies - run: cd js && yarn install --immutable + run: yarn install --immutable - name: Build - run: cd js && yarn run build + run: yarn run build - name: Check version - run: cd js && yarn run check-version + run: yarn run check-version - name: Check NPM version id: check_npm_version run: | - cd js if yarn run check-npm-version; then - echo "::set-output name=should_publish::true" + echo "should_publish=true" >> $GITHUB_OUTPUT else - echo "::set-output name=should_publish::false" + echo "should_publish=false" >> $GITHUB_OUTPUT fi - name: Publish package to NPM if: steps.check_npm_version.outputs.should_publish == 'true' run: | - cd js echo "//registry.npmjs.org/:_authToken=${{ secrets.NPM_TOKEN }}" > .npmrc yarn publish --non-interactive From 6ac13f3c0659aeae34fda3a4417b4595fc549b7a Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Mon, 8 Jul 2024 02:38:04 +0100 Subject: [PATCH 206/373] py: Fix traceable decorator blocking asyncio event loop (#849) - currently happens in two situations - for errored runs, when fetching the langsmith run url for a debug print statement (which most people won't even want in production, but thats a separate issue) - for all runs, when batch tracing is turned off --------- Co-authored-by: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> --- python/langsmith/run_helpers.py | 36 ++++++++++--- python/pyproject.toml | 2 +- python/tests/integration_tests/test_runs.py | 56 ++++++++++++++------- python/tests/unit_tests/test_run_helpers.py | 12 ++++- 4 files changed, 78 insertions(+), 28 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index ec4dbac97..88b8a7158 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -432,7 +432,8 @@ async def async_wrapper( **kwargs: Any, ) -> Any: """Async version of wrapper function.""" - run_container = _setup_run( + run_container = await _aio_to_thread( + _setup_run, func, container_input=container_input, langsmith_extra=langsmith_extra, @@ -458,16 +459,20 @@ async def async_wrapper( ): function_result = await fr_coro except BaseException as e: - _container_end(run_container, error=e) + # shield from cancellation, given we're catching all exceptions + await asyncio.shield( + _aio_to_thread(_container_end, run_container, error=e) + ) raise e - _container_end(run_container, outputs=function_result) + await _aio_to_thread(_container_end, run_container, outputs=function_result) return function_result @functools.wraps(func) async def async_generator_wrapper( *args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any ) -> AsyncGenerator: - run_container = _setup_run( + run_container = await _aio_to_thread( + _setup_run, func, container_input=container_input, langsmith_extra=langsmith_extra, @@ -526,7 +531,9 @@ async def async_generator_wrapper( except StopAsyncIteration: pass except BaseException as e: - _container_end(run_container, error=e) + await asyncio.shield( + _aio_to_thread(_container_end, run_container, error=e) + ) raise e if results: if reduce_fn: @@ -539,7 +546,7 @@ async def async_generator_wrapper( function_result = results else: function_result = None - _container_end(run_container, outputs=function_result) + await _aio_to_thread(_container_end, run_container, outputs=function_result) @functools.wraps(func) def wrapper( @@ -1159,3 +1166,20 @@ def _get_inputs_safe( except BaseException as e: LOGGER.debug(f"Failed to get inputs for {signature}: {e}") return {"args": args, "kwargs": kwargs} + + +# Ported from Python 3.9+ to support Python 3.8 +async def _aio_to_thread(func, /, *args, **kwargs): + """Asynchronously run function *func* in a separate thread. + + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. + + Return a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = asyncio.get_running_loop() + ctx = contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) diff --git a/python/pyproject.toml b/python/pyproject.toml index 97a359f4c..925fcf475 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.83" +version = "0.1.84" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/integration_tests/test_runs.py b/python/tests/integration_tests/test_runs.py index ddbce85ac..405571dee 100644 --- a/python/tests/integration_tests/test_runs.py +++ b/python/tests/integration_tests/test_runs.py @@ -132,8 +132,6 @@ async def my_run(text: str): async def test_nested_async_runs(langchain_client: Client): """Test nested runs with a mix of async and sync functions.""" project_name = "__My Tracer Project - test_nested_async_runs" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) executor = ThreadPoolExecutor(max_workers=1) @traceable(run_type="chain") @@ -156,10 +154,15 @@ def my_sync_tool(text: str, *, my_arg: int = 10): async def my_chain_run(text: str): return await my_run(text) - await my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + meta = uuid.uuid4().hex + await my_chain_run( + "foo", + langsmith_extra=dict(project_name=project_name, metadata={"test_run": meta}), + ) executor.shutdown(wait=True) - poll_runs_until_count(langchain_client, project_name, 4) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 4, filter_=_filter) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) assert len(runs) == 4 runs_dict = {run.name: run for run in runs} assert runs_dict["my_chain_run"].parent_run_id is None @@ -175,14 +178,11 @@ async def my_chain_run(text: str): "text": "foo", "my_arg": 20, } - langchain_client.delete_project(project_name=project_name) async def test_nested_async_runs_with_threadpool(langchain_client: Client): """Test nested runs with a mix of async and sync functions.""" project_name = "__My Tracer Project - test_nested_async_runs_with_threadpol" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) @traceable(run_type="llm") async def async_llm(text: str): @@ -204,7 +204,12 @@ def my_run(text: str, *, run_tree: Optional[RunTree] = None): thread_pool = ThreadPoolExecutor(max_workers=1) for i in range(3): thread_pool.submit( - my_tool_run, f"Child Tool {i}", langsmith_extra={"run_tree": run_tree} + my_tool_run, + f"Child Tool {i}", + langsmith_extra={ + "run_tree": run_tree, + "metadata": getattr(run_tree, "metadata", {}), + }, ) thread_pool.shutdown(wait=True) return llm_run_result @@ -216,16 +221,27 @@ async def my_chain_run(text: str, run_tree: RunTree): thread_pool = ThreadPoolExecutor(max_workers=3) for i in range(2): thread_pool.submit( - my_run, f"Child {i}", langsmith_extra=dict(run_tree=run_tree) + my_run, + f"Child {i}", + langsmith_extra=dict(run_tree=run_tree, metadata=run_tree.metadata), ) thread_pool.shutdown(wait=True) return text - await my_chain_run("foo", langsmith_extra=dict(project_name=project_name)) + meta = uuid.uuid4().hex + await my_chain_run( + "foo", + langsmith_extra=dict(project_name=project_name, metadata={"test_run": meta}), + ) executor.shutdown(wait=True) - poll_runs_until_count(langchain_client, project_name, 17) - runs = list(langchain_client.list_runs(project_name=project_name)) - trace_runs = list(langchain_client.list_runs(trace_id=runs[0].trace_id)) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 17, filter_=filter_) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) + trace_runs = list( + langchain_client.list_runs( + trace_id=runs[0].trace_id, project_name=project_name, filter=filter_ + ) + ) assert len(trace_runs) == 17 assert len(runs) == 17 assert sum([run.run_type == "llm" for run in runs]) == 8 @@ -257,14 +273,15 @@ async def my_chain_run(text: str, run_tree: RunTree): async def test_context_manager(langchain_client: Client) -> None: project_name = "__My Tracer Project - test_context_manager" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) @traceable(run_type="llm") async def my_llm(prompt: str) -> str: return f"LLM {prompt}" - with trace("my_context", "chain", project_name=project_name) as run_tree: + meta = uuid.uuid4().hex + with trace( + "my_context", "chain", project_name=project_name, metadata={"test_run": meta} + ) as run_tree: await my_llm("foo") with trace("my_context2", "chain", run_tree=run_tree) as run_tree2: runs = [my_llm("baz"), my_llm("qux")] @@ -273,8 +290,9 @@ async def my_llm(prompt: str) -> str: await my_llm("corge") await asyncio.gather(*runs) run_tree.end(outputs={"End val": "my_context2"}) - poll_runs_until_count(langchain_client, project_name, 8) - runs_ = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))' + poll_runs_until_count(langchain_client, project_name, 8, filter_=_filter) + runs_ = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) assert len(runs_) == 8 diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index d0998986d..ee2029145 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -264,9 +264,17 @@ async def my_iterator_fn(a, b, d, **kwargs): assert call.args[1].startswith("https://api.smith.langchain.com") body = json.loads(call.kwargs["data"]) assert body["post"] - assert body["post"][0]["outputs"]["output"] == expected - # Assert the inputs are filtered as expected assert body["post"][0]["inputs"] == {"a": "FOOOOOO", "b": 2, "d": 3} + outputs_ = body["post"][0]["outputs"] + if "output" in outputs_: + assert outputs_["output"] == expected + # Assert the inputs are filtered as expected + else: + # It was put in the second batch + assert len(mock_calls) == 2 + body_2 = json.loads(mock_calls[1].kwargs["data"]) + assert body_2["patch"] + assert body_2["patch"][0]["outputs"]["output"] == expected @patch("langsmith.run_trees.Client", autospec=True) From 0162d8b12985edd8c5846db8b4f03463072276e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 10:21:47 -0700 Subject: [PATCH 207/373] chore(deps): bump certifi from 2024.6.2 to 2024.7.4 in /python (#854) Bumps [certifi](https://github.com/certifi/python-certifi) from 2024.6.2 to 2024.7.4.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=certifi&package-manager=pip&previous-version=2024.6.2&new-version=2024.7.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/langchain-ai/langsmith-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- python/poetry.lock | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/python/poetry.lock b/python/poetry.lock index b41d40be6..d347f4525 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -103,13 +103,13 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "certifi" -version = "2024.6.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"}, - {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, ] [[package]] @@ -1157,6 +1157,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1164,8 +1165,16 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1182,6 +1191,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1189,6 +1199,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, From c61aae0467e3a166809f065aeae44541a7ce8e1c Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 9 Jul 2024 17:46:03 -0700 Subject: [PATCH 208/373] feat: move hub sdk functionality to langsmith sdk --- python/langsmith/client.py | 175 +++++++++++++++++++++++++++++++++++ python/langsmith/schemas.py | 38 ++++++++ python/langsmith/utils.py | 25 +++++ python/tests/hub/__init__.py | 0 4 files changed, 238 insertions(+) create mode 100644 python/tests/hub/__init__.py diff --git a/python/langsmith/client.py b/python/langsmith/client.py index bd39b0e5a..e67ec8e21 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4561,6 +4561,181 @@ def _evaluate_strings( ) + def get_settings(self): + res = requests.get( + f"{self.api_url}/settings", + headers=self._headers, + ) + res.raise_for_status() + return res.json() + + + def list_prompts(self, limit: int = 100, offset: int = 0) -> ls_schemas.ListPromptsResponse: + res = requests.get( + f"{self.api_url}/repos?limit={limit}&offset={offset}", + headers=self._headers, + ) + res.raise_for_status() + res_dict = res.json() + return ls_schemas.ListPromptsResponse(**res_dict) + + + def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: + res = requests.get( + f"{self.api_url}/repos/{prompt_identifier}", + headers=self._headers, + ) + res.raise_for_status() + prompt = res.json()['repo'] + return ls_schemas.Prompt(**prompt) + + + def create_prompt( + self, prompt_name: str, *, description: str = "", is_public: bool = True + ): + json = { + "repo_handle": prompt_name, + "is_public": is_public, + "description": description, + } + res = requests.post( + f"{self.api_url}/repos/", + headers=self._headers, + json=json, + ) + res.raise_for_status() + return res.json() + + + def list_commits(self, prompt_name: str, limit: int = 100, offset: int = 0): + res = requests.get( + f"{self.api_url}/commits/{prompt_name}/?limit={limit}&offset={offset}", + headers=self._headers, + ) + res.raise_for_status() + return res.json() + + + def _get_latest_commit_hash(self, prompt_identifier: str) -> Optional[str]: + commits_resp = self.list_commits(prompt_identifier) + commits = commits_resp["commits"] + if len(commits) == 0: + return None + return commits[0]["commit_hash"] + + + def pull_prompt( + self, + prompt_identifier: str, + ) -> ls_schemas.PromptManifest: + """Pull a prompt from the LangSmith API. + + Args: + prompt_identifier: The identifier of the prompt (str, ex. "prompt_name", "owner/prompt_name", "owner/prompt_name:commit_hash") + + Yields: + Prompt + The prompt + """ + LS_VERSION_WITH_OPTIMIZATION="0.5.23" + use_optimization = ls_utils.is_version_greater_or_equal( + current_version=self.info.version, target_version=LS_VERSION_WITH_OPTIMIZATION + ) + + owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(prompt_identifier) + + if not use_optimization: + if commit_hash is None or commit_hash == "latest": + commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") + if commit_hash is None: + raise ValueError("No commits found") + + res = requests.get( + f"{self.api_url}/commits/{owner}/{prompt_name}/{commit_hash}", + headers=self._headers, + ) + res.raise_for_status() + result = res.json() + return ls_schemas.PromptManifest(**{"owner": owner, "repo": prompt_name, **result}) + + def push_prompt( + self, + prompt_identifier: str, + manifest_json: Any, + *, + parent_commit_hash: Optional[str] = "latest", + is_public: bool = False, + description: str = "", + ) -> str: + """Push a prompt to the LangSmith API. + + Args: + prompt_name: The name of the prompt + manifest_json: The JSON string of the prompt manifest + parent_commit_hash: The commit hash of the parent commit + is_public: Whether the new prompt is public + description: The description of the new prompt + """ + from langchain_core.load.dump import dumps + manifest_json = dumps(manifest_json) + settings = self.get_settings() + if is_public: + if not settings["tenant_handle"]: + raise ValueError( + """ + Cannot create public prompt without first creating a LangChain Hub handle. + + You can add a handle by creating a public prompt at: + https://smith.langchain.com/prompts + + This is a workspace-level handle and will be associated with all of your workspace's public prompts in the LangChain Hub. + """ + ) + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + prompt_full_name = f"{owner}/{prompt_name}" + try: + # check if the prompt exists + _ = self.get_prompt(prompt_full_name) + except requests.exceptions.HTTPError as e: + if e.response.status_code != 404: + raise e + # create prompt if it doesn't exist + # make sure I am owner if owner is specified + if ( + settings["tenant_handle"] + and owner != "-" + and settings["tenant_handle"] != owner + ): + raise ValueError( + f"Tenant {settings['tenant_handle']} is not the owner of repo {prompt_identifier}" + ) + self.create_prompt( + prompt_name, + is_public=is_public, + description=description, + ) + + manifest_dict = json.loads(manifest_json) + if parent_commit_hash == "latest": + parent_commit_hash = self._get_latest_commit_hash(prompt_full_name) + print('dict to submit', manifest_dict) + request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} + res = requests.post( + f"{self.api_url}/commits/{prompt_full_name}", + headers=self._headers, + json=request_dict, + ) + res.raise_for_status() + res = res.json() + commit_hash = res["commit"]["commit_hash"] + short_hash = commit_hash[:8] + url = ( + self._host_url + + f"/prompts/{prompt_name}/{short_hash}?organizationId={settings['id']}" + ) + return url + + def _tracing_thread_drain_queue( tracing_queue: Queue, limit: int = 100, block: bool = True ) -> List[TracingQueueItem]: diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 453aa13de..d8ea947bd 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -744,3 +744,41 @@ def metadata(self) -> dict[str, Any]: if self.extra is None or "metadata" not in self.extra: return {} return self.extra["metadata"] + + +class PromptManifest(BaseModel): + owner: str + repo: str + commit_hash: str + manifest: Dict[str, Any] + examples: List[dict] + + +class Prompt(BaseModel): + repo_handle: str + description: str | None + readme: str | None + id: str + tenant_id: str + created_at: datetime + updated_at: datetime + is_public: bool + is_archived: bool + tags: List[str] + original_repo_id: str | None + upstream_repo_id: str | None + owner: str + full_name: str + num_likes: int + num_downloads: int + num_views: int + liked_by_auth_user: bool + last_commit_hash: str | None + num_commits: int + original_repo_full_name: str | None + upstream_repo_full_name: str | None + + +class ListPromptsResponse(BaseModel): + repos: List[Prompt] + total: int diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 2c0152e0f..b8ad4f6fc 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -561,3 +561,28 @@ def deepish_copy(val: T) -> T: # what we can _LOGGER.debug("Failed to deepcopy input: %s", repr(e)) return _middle_copy(val, memo) + + +def is_version_greater_or_equal(current_version, target_version): + from packaging import version + + current = version.parse(current_version) + target = version.parse(target_version) + return current >= target + + +def parse_prompt_identifier(identifier: str) -> Tuple[str, str, str]: + """ + Parses a string in the format of `owner/repo:commit` and returns a tuple of + (owner, repo, commit). + """ + owner_prompt = identifier + commit = "latest" + if ":" in identifier: + owner_prompt, commit = identifier.split(":", 1) + + if "/" not in owner_prompt: + return "-", owner_prompt, commit + + owner, prompt = owner_prompt.split("/", 1) + return owner, prompt, commit diff --git a/python/tests/hub/__init__.py b/python/tests/hub/__init__.py new file mode 100644 index 000000000..e69de29bb From 86e3701a101b42036a4674e681433e7a330112e3 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 10 Jul 2024 09:42:29 -0700 Subject: [PATCH 209/373] Permit null run id in feedback create (#862) To fix aggregate feedback. --- python/langsmith/client.py | 4 +++- python/langsmith/run_trees.py | 3 +++ python/pyproject.toml | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index bd39b0e5a..2b7647cf6 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3660,7 +3660,9 @@ def create_feedback( feedback_source.metadata["__run"] = _run_meta feedback = ls_schemas.FeedbackCreate( id=_ensure_uuid(feedback_id), - run_id=_ensure_uuid(run_id), + # If run_id is None, this is interpreted as session-level + # feedback. + run_id=_ensure_uuid(run_id, accept_null=True), key=key, score=score, value=value, diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index 69fb501be..c2df73964 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -352,6 +352,9 @@ def from_runnable_config( kwargs["outputs"] = run.outputs kwargs["start_time"] = run.start_time kwargs["end_time"] = run.end_time + extra_ = kwargs.setdefault("extra", {}) + metadata_ = extra_.setdefault("metadata", {}) + metadata_.update(run.metadata) elif hasattr(tracer, "order_map") and cb.parent_run_id in tracer.order_map: dotted_order = tracer.order_map[cb.parent_run_id][1] else: diff --git a/python/pyproject.toml b/python/pyproject.toml index 925fcf475..a2b1a1183 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.84" +version = "0.1.85" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 95eee541633698f94123bbad18cdfbfb7874a247 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Wed, 10 Jul 2024 13:00:46 -0700 Subject: [PATCH 210/373] updates --- python/langsmith/client.py | 91 ++++++++++++++++++++++---------------- 1 file changed, 54 insertions(+), 37 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index e67ec8e21..6d092f335 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4590,9 +4590,21 @@ def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: return ls_schemas.Prompt(**prompt) + def current_tenant_is_owner(self, owner: str) -> bool: + settings = self.get_settings() + if owner != "-" and settings["tenant_handle"] != owner: + return False + return True + def create_prompt( - self, prompt_name: str, *, description: str = "", is_public: bool = True + self, owner: str, prompt_name: str, *, description: str = "", is_public: bool = True ): + if not self.current_tenant_is_owner(owner): + settings = self.get_settings() + raise ValueError( + f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}" + ) + json = { "repo_handle": prompt_name, "is_public": is_public, @@ -4622,7 +4634,17 @@ def _get_latest_commit_hash(self, prompt_identifier: str) -> Optional[str]: if len(commits) == 0: return None return commits[0]["commit_hash"] + + def prompt_exists(self, prompt_name: str) -> bool: + try: + # check if the prompt exists + self.get_prompt(prompt_name) + return True + except requests.exceptions.HTTPError as e: + if e.response.status_code == 404: + return False + raise e def pull_prompt( self, @@ -4670,47 +4692,42 @@ def push_prompt( """Push a prompt to the LangSmith API. Args: - prompt_name: The name of the prompt + prompt_identifier: The name of the prompt in the format "prompt_name" or "owner/prompt_name" manifest_json: The JSON string of the prompt manifest - parent_commit_hash: The commit hash of the parent commit - is_public: Whether the new prompt is public - description: The description of the new prompt + parent_commit_hash: The commit hash of the parent commit, default is "latest" + is_public: Whether the new prompt is public, default is False + description: The description of the new prompt, default is an empty string """ from langchain_core.load.dump import dumps + manifest_json = dumps(manifest_json) settings = self.get_settings() - if is_public: - if not settings["tenant_handle"]: - raise ValueError( - """ - Cannot create public prompt without first creating a LangChain Hub handle. - - You can add a handle by creating a public prompt at: - https://smith.langchain.com/prompts - - This is a workspace-level handle and will be associated with all of your workspace's public prompts in the LangChain Hub. - """ - ) + + if is_public and not settings.get("tenant_handle"): + raise ValueError( + """ + Cannot create a public prompt without first creating a LangChain Hub handle. + + You can add a handle by creating a public prompt at: + https://smith.langchain.com/prompts + + This is a workspace-level handle and will be associated with all of your workspace's public prompts in the LangChain Hub. + """ + ) + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) prompt_full_name = f"{owner}/{prompt_name}" - try: - # check if the prompt exists - _ = self.get_prompt(prompt_full_name) - except requests.exceptions.HTTPError as e: - if e.response.status_code != 404: - raise e - # create prompt if it doesn't exist - # make sure I am owner if owner is specified - if ( - settings["tenant_handle"] - and owner != "-" - and settings["tenant_handle"] != owner - ): - raise ValueError( - f"Tenant {settings['tenant_handle']} is not the owner of repo {prompt_identifier}" - ) + + if not self.current_tenant_is_owner(owner): + settings = self.get_settings() + raise ValueError( + f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}" + ) + + if not self.prompt_exists(prompt_full_name): self.create_prompt( - prompt_name, + owner=owner, + prompt_name=prompt_name, is_public=is_public, description=description, ) @@ -4718,16 +4735,16 @@ def push_prompt( manifest_dict = json.loads(manifest_json) if parent_commit_hash == "latest": parent_commit_hash = self._get_latest_commit_hash(prompt_full_name) - print('dict to submit', manifest_dict) request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} res = requests.post( f"{self.api_url}/commits/{prompt_full_name}", headers=self._headers, json=request_dict, ) + if res.status_code == 409: + raise ValueError("Conflict: The prompt has not been updated since the last commit") res.raise_for_status() - res = res.json() - commit_hash = res["commit"]["commit_hash"] + commit_hash = res.json()["commit"]["commit_hash"] short_hash = commit_hash[:8] url = ( self._host_url From 6030feed42f93e5a82446859f68a1ea58e94fce9 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 13:56:42 -0700 Subject: [PATCH 211/373] update --- python/Makefile | 3 + python/langsmith/client.py | 196 ++++++++-------------- python/tests/{hub => prompts}/__init__.py | 0 python/tests/prompts/test_prompts.py | 39 +++++ 4 files changed, 112 insertions(+), 126 deletions(-) rename python/tests/{hub => prompts}/__init__.py (100%) create mode 100644 python/tests/prompts/test_prompts.py diff --git a/python/Makefile b/python/Makefile index d06830bf9..a8bab2a27 100644 --- a/python/Makefile +++ b/python/Makefile @@ -18,6 +18,9 @@ doctest: evals: poetry run python -m pytest tests/evaluation +prompts: + poetry run python -m pytest tests/prompts + lint: poetry run ruff check . poetry run mypy . diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 6d092f335..0bf8033e1 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4562,83 +4562,38 @@ def _evaluate_strings( def get_settings(self): - res = requests.get( - f"{self.api_url}/settings", - headers=self._headers, - ) - res.raise_for_status() - return res.json() - - - def list_prompts(self, limit: int = 100, offset: int = 0) -> ls_schemas.ListPromptsResponse: - res = requests.get( - f"{self.api_url}/repos?limit={limit}&offset={offset}", - headers=self._headers, - ) - res.raise_for_status() - res_dict = res.json() - return ls_schemas.ListPromptsResponse(**res_dict) - - - def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: - res = requests.get( - f"{self.api_url}/repos/{prompt_identifier}", - headers=self._headers, - ) - res.raise_for_status() - prompt = res.json()['repo'] - return ls_schemas.Prompt(**prompt) + """ + Get the settings for the current tenant. + Returns: + dict: The settings for the current tenant. + """ + response = self.request_with_retries("GET", "/settings") + return response.json() + def current_tenant_is_owner(self, owner: str) -> bool: settings = self.get_settings() - if owner != "-" and settings["tenant_handle"] != owner: - return False - return True - - def create_prompt( - self, owner: str, prompt_name: str, *, description: str = "", is_public: bool = True - ): - if not self.current_tenant_is_owner(owner): - settings = self.get_settings() - raise ValueError( - f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}" - ) + return owner == "-" or settings["tenant_handle"] == owner - json = { - "repo_handle": prompt_name, - "is_public": is_public, - "description": description, - } - res = requests.post( - f"{self.api_url}/repos/", - headers=self._headers, - json=json, - ) - res.raise_for_status() - return res.json() + def list_prompts(self, limit: int = 100, offset: int = 0) -> ls_schemas.ListPromptsResponse: + params = {"limit": limit, "offset": offset} + res_dict = self.request_with_retries("GET", "/repos", params=params) + return ls_schemas.ListPromptsResponse(**res_dict) - def list_commits(self, prompt_name: str, limit: int = 100, offset: int = 0): - res = requests.get( - f"{self.api_url}/commits/{prompt_name}/?limit={limit}&offset={offset}", - headers=self._headers, - ) - res.raise_for_status() - return res.json() + def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: + response = self.request_with_retries("GET", f"/repos/{prompt_identifier}") + if response.status_code == 200: + res = response.json() + return ls_schemas.Prompt(**res['repo']) + else: + response.raise_for_status() - def _get_latest_commit_hash(self, prompt_identifier: str) -> Optional[str]: - commits_resp = self.list_commits(prompt_identifier) - commits = commits_resp["commits"] - if len(commits) == 0: - return None - return commits[0]["commit_hash"] - def prompt_exists(self, prompt_name: str) -> bool: try: - # check if the prompt exists self.get_prompt(prompt_name) return True except requests.exceptions.HTTPError as e: @@ -4646,10 +4601,15 @@ def prompt_exists(self, prompt_name: str) -> bool: return False raise e - def pull_prompt( - self, - prompt_identifier: str, - ) -> ls_schemas.PromptManifest: + + def _get_latest_commit_hash(self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0) -> Optional[str]: + response = self.request_with_retries("GET", f"/commits/{prompt_owner_and_name}/", params={"limit": limit, "offset": offset}) + commits_resp = response.json() + commits = commits_resp["commits"] + return commits[0]["commit_hash"] if commits else None + + + def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManifest: """Pull a prompt from the LangSmith API. Args: @@ -4659,45 +4619,37 @@ def pull_prompt( Prompt The prompt """ - LS_VERSION_WITH_OPTIMIZATION="0.5.23" + owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(prompt_identifier) + use_optimization = ls_utils.is_version_greater_or_equal( - current_version=self.info.version, target_version=LS_VERSION_WITH_OPTIMIZATION + current_version=self.info.version, target_version="0.5.23" ) - owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(prompt_identifier) + if not use_optimization and (commit_hash is None or commit_hash == "latest"): + commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") + if commit_hash is None: + raise ValueError("No commits found") - if not use_optimization: - if commit_hash is None or commit_hash == "latest": - commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") - if commit_hash is None: - raise ValueError("No commits found") + response = self.request_with_retries("GET", f"/commits/{owner}/{prompt_name}/{commit_hash}") + res = response.json() + return ls_schemas.PromptManifest(**{"owner": owner, "repo": prompt_name, **res}) - res = requests.get( - f"{self.api_url}/commits/{owner}/{prompt_name}/{commit_hash}", - headers=self._headers, - ) - res.raise_for_status() - result = res.json() - return ls_schemas.PromptManifest(**{"owner": owner, "repo": prompt_name, **result}) - - def push_prompt( - self, - prompt_identifier: str, - manifest_json: Any, - *, - parent_commit_hash: Optional[str] = "latest", - is_public: bool = False, - description: str = "", - ) -> str: - """Push a prompt to the LangSmith API. - Args: - prompt_identifier: The name of the prompt in the format "prompt_name" or "owner/prompt_name" - manifest_json: The JSON string of the prompt manifest - parent_commit_hash: The commit hash of the parent commit, default is "latest" - is_public: Whether the new prompt is public, default is False - description: The description of the new prompt, default is an empty string - """ + def pull_prompt(self, prompt_identifier: str) -> ls_schemas.PromptManifest: + from langchain_core.load.load import loads + from langchain_core.prompts import BasePromptTemplate + response = self.pull_prompt_manifest(prompt_identifier) + obj = loads(json.dumps(response.manifest)) + if isinstance(obj, BasePromptTemplate): + if obj.metadata is None: + obj.metadata = {} + obj.metadata["lc_hub_owner"] = response.owner + obj.metadata["lc_hub_repo"] = response.repo + obj.metadata["lc_hub_commit_hash"] = response.commit_hash + return obj + + + def push_prompt_manifest(self, prompt_identifier: str, manifest_json: Any, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: str = ""): from langchain_core.load.dump import dumps manifest_json = dumps(manifest_json) @@ -4719,38 +4671,30 @@ def push_prompt( prompt_full_name = f"{owner}/{prompt_name}" if not self.current_tenant_is_owner(owner): - settings = self.get_settings() - raise ValueError( - f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}" - ) + raise ValueError(f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}") if not self.prompt_exists(prompt_full_name): - self.create_prompt( - owner=owner, - prompt_name=prompt_name, - is_public=is_public, - description=description, - ) + self.request_with_retries("POST", "/repos/", json = { + "repo_handle": prompt_name, + "is_public": is_public, + "description": description, + }) manifest_dict = json.loads(manifest_json) if parent_commit_hash == "latest": parent_commit_hash = self._get_latest_commit_hash(prompt_full_name) + request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} - res = requests.post( - f"{self.api_url}/commits/{prompt_full_name}", - headers=self._headers, - json=request_dict, - ) - if res.status_code == 409: - raise ValueError("Conflict: The prompt has not been updated since the last commit") - res.raise_for_status() - commit_hash = res.json()["commit"]["commit_hash"] + response = self.request_with_retries("POST", f"/commits/{prompt_full_name}", json=request_dict) + res = response.json() + + commit_hash = res["commit"]["commit_hash"] short_hash = commit_hash[:8] - url = ( - self._host_url - + f"/prompts/{prompt_name}/{short_hash}?organizationId={settings['id']}" - ) - return url + return f"{self._host_url}/prompts/{prompt_name}/{short_hash}?organizationId={settings['id']}" + + + def push_prompt(self, prompt_identifier: str, obj: Any, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: str = "") -> str: + return self.push_prompt_manifest(prompt_identifier, obj, parent_commit_hash, is_public, description) def _tracing_thread_drain_queue( diff --git a/python/tests/hub/__init__.py b/python/tests/prompts/__init__.py similarity index 100% rename from python/tests/hub/__init__.py rename to python/tests/prompts/__init__.py diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py new file mode 100644 index 000000000..c018eadd0 --- /dev/null +++ b/python/tests/prompts/test_prompts.py @@ -0,0 +1,39 @@ +import asyncio +from typing import Sequence + +import pytest + +from langsmith import Client +from langsmith.schemas import Prompt + +from langchain_core.prompts import ChatPromptTemplate + +@pytest.fixture +def basic_fstring_prompt(): + return ChatPromptTemplate.from_messages( + [ + ("system", "You are a helpful asssistant."), + ("human", "{question}"), + ] + ) + +def test_push_prompt( + basic_fstring_prompt, +): + prompt_name = "basic_fstring_prompt" + langsmith_client = Client() + url = langsmith_client.push_prompt_manifest( + prompt_name, + basic_fstring_prompt + ) + assert prompt_name in url + + res = langsmith_client.push_prompt_manifest( + prompt_name, + basic_fstring_prompt + ) + assert res.status_code == 409 + + prompt = langsmith_client.pull_prompt_manifest(prompt_identifier=prompt_name) + assert prompt.repo == prompt_name + From f4b21a436cf87ef3619a889e1915f0e8bbb2d840 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 14:33:36 -0700 Subject: [PATCH 212/373] tests --- python/langsmith/client.py | 22 +++++-- python/tests/prompts/test_prompts.py | 95 ++++++++++++++++++++-------- 2 files changed, 86 insertions(+), 31 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 0bf8033e1..41660cd48 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4579,12 +4579,14 @@ def current_tenant_is_owner(self, owner: str) -> bool: def list_prompts(self, limit: int = 100, offset: int = 0) -> ls_schemas.ListPromptsResponse: params = {"limit": limit, "offset": offset} - res_dict = self.request_with_retries("GET", "/repos", params=params) - return ls_schemas.ListPromptsResponse(**res_dict) + response = self.request_with_retries("GET", "/repos", params=params) + res = response.json() + return ls_schemas.ListPromptsResponse(**res) def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: - response = self.request_with_retries("GET", f"/repos/{prompt_identifier}") + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + response = self.request_with_retries("GET", f"/repos/{owner}/{prompt_name}", to_ignore=[ls_utils.LangSmithError]) if response.status_code == 200: res = response.json() return ls_schemas.Prompt(**res['repo']) @@ -4592,9 +4594,9 @@ def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: response.raise_for_status() - def prompt_exists(self, prompt_name: str) -> bool: + def prompt_exists(self, prompt_identifier: str) -> bool: try: - self.get_prompt(prompt_name) + self.get_prompt(prompt_identifier) return True except requests.exceptions.HTTPError as e: if e.response.status_code == 404: @@ -4607,6 +4609,14 @@ def _get_latest_commit_hash(self, prompt_owner_and_name: str, limit: int = 1, of commits_resp = response.json() commits = commits_resp["commits"] return commits[0]["commit_hash"] if commits else None + + + def delete_prompt(self, prompt_identifier: str) -> bool: + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + if not self.current_tenant_is_owner(owner): + raise ValueError(f"Cannot delete prompt for another tenant. Current tenant: {self.get_settings()['tenant_handle']}, Requested tenant: {owner}") + response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") + return response.status_code == 204 def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManifest: @@ -4635,7 +4645,7 @@ def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManif return ls_schemas.PromptManifest(**{"owner": owner, "repo": prompt_name, **res}) - def pull_prompt(self, prompt_identifier: str) -> ls_schemas.PromptManifest: + def pull_prompt(self, prompt_identifier: str) -> Any: from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate response = self.pull_prompt_manifest(prompt_identifier) diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index c018eadd0..2b8f69f0f 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -1,39 +1,84 @@ -import asyncio -from typing import Sequence - import pytest +from uuid import uuid4 +from langsmith.client import Client +from langsmith.schemas import Prompt, ListPromptsResponse +from langchain_core.prompts import ChatPromptTemplate -from langsmith import Client -from langsmith.schemas import Prompt +@pytest.fixture +def langsmith_client() -> Client: + return Client() -from langchain_core.prompts import ChatPromptTemplate +@pytest.fixture +def prompt_template_1() -> ChatPromptTemplate: + return ChatPromptTemplate.from_template("tell me a joke about {topic}") @pytest.fixture -def basic_fstring_prompt(): +def prompt_template_2() -> ChatPromptTemplate: return ChatPromptTemplate.from_messages( [ - ("system", "You are a helpful asssistant."), + ("system", "You are a helpful assistant."), ("human", "{question}"), ] ) -def test_push_prompt( - basic_fstring_prompt, -): - prompt_name = "basic_fstring_prompt" - langsmith_client = Client() - url = langsmith_client.push_prompt_manifest( - prompt_name, - basic_fstring_prompt - ) - assert prompt_name in url +def test_list_prompts(langsmith_client: Client): + # Test listing prompts + response = langsmith_client.list_prompts(limit=10, offset=0) + assert isinstance(response, ListPromptsResponse) + assert len(response.repos) <= 10 - res = langsmith_client.push_prompt_manifest( - prompt_name, - basic_fstring_prompt - ) - assert res.status_code == 409 +def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + # First, create a prompt to test with + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + # Now test getting the prompt + prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, Prompt) + assert prompt.repo_handle == prompt_name + + # Clean up + langsmith_client.delete_prompt(prompt_name) + assert not langsmith_client.prompt_exists(prompt_name) + +def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): + # Test with a non-existent prompt + non_existent_prompt = f"non_existent_{uuid4().hex[:8]}" + assert not langsmith_client.prompt_exists(non_existent_prompt) + + # Create a prompt and test again + existent_prompt = f"existent_{uuid4().hex[:8]}" + langsmith_client.push_prompt(existent_prompt, prompt_template_2) + assert langsmith_client.prompt_exists(existent_prompt) + + # Clean up + langsmith_client.delete_prompt(existent_prompt) + assert not langsmith_client.prompt_exists(existent_prompt) + +def test_push_and_pull_prompt(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + + # Test pushing a prompt + push_result = langsmith_client.push_prompt(prompt_name, prompt_template_2) + assert isinstance(push_result, str) # Should return a URL + + # Test pulling the prompt + langsmith_client.pull_prompt(prompt_name) + + # Clean up + langsmith_client.delete_prompt(prompt_name) + +def test_push_prompt_manifest(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): + prompt_name = f"test_prompt_manifest_{uuid4().hex[:8]}" + + # Test pushing a prompt manifest + result = langsmith_client.push_prompt_manifest(prompt_name, prompt_template_2) + assert isinstance(result, str) # Should return a URL - prompt = langsmith_client.pull_prompt_manifest(prompt_identifier=prompt_name) - assert prompt.repo == prompt_name + # Verify the pushed manifest + pulled_prompt_manifest = langsmith_client.pull_prompt_manifest(prompt_name) + latest_commit_hash = langsmith_client._get_latest_commit_hash(f"-/{prompt_name}") + assert pulled_prompt_manifest.commit_hash == latest_commit_hash + # Clean up + langsmith_client.delete_prompt(prompt_name) From b65bcf19b0ccef0c61907b1d6b0e55db2142fb90 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 14:53:21 -0700 Subject: [PATCH 213/373] docstrings --- python/langsmith/client.py | 167 +++++++++++++++++++++++++++++-------- 1 file changed, 130 insertions(+), 37 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 41660cd48..16123b634 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4561,7 +4561,8 @@ def _evaluate_strings( ) - def get_settings(self): +class Client: + def get_settings(self) -> dict: """ Get the settings for the current tenant. @@ -4571,47 +4572,103 @@ def get_settings(self): response = self.request_with_retries("GET", "/settings") return response.json() - + def current_tenant_is_owner(self, owner: str) -> bool: + """ + Check if the current tenant is the owner of the prompt. + + Args: + owner (str): The owner to check against. + + Returns: + bool: True if the current tenant is the owner, False otherwise. + """ settings = self.get_settings() return owner == "-" or settings["tenant_handle"] == owner def list_prompts(self, limit: int = 100, offset: int = 0) -> ls_schemas.ListPromptsResponse: + """ + List prompts with pagination. + + Args: + limit (int): The maximum number of prompts to return. Defaults to 100. + offset (int): The number of prompts to skip. Defaults to 0. + + Returns: + ls_schemas.ListPromptsResponse: A response object containing the list of prompts. + """ params = {"limit": limit, "offset": offset} response = self.request_with_retries("GET", "/repos", params=params) - res = response.json() - return ls_schemas.ListPromptsResponse(**res) - + return ls_schemas.ListPromptsResponse(**response.json()) def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: + """ + Get a specific prompt by its identifier. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + ls_schemas.Prompt: The prompt object. + + Raises: + requests.exceptions.HTTPError: If the prompt is not found or another error occurs. + """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) response = self.request_with_retries("GET", f"/repos/{owner}/{prompt_name}", to_ignore=[ls_utils.LangSmithError]) if response.status_code == 200: - res = response.json() - return ls_schemas.Prompt(**res['repo']) - else: - response.raise_for_status() + return ls_schemas.Prompt(**response.json()['repo']) + response.raise_for_status() def prompt_exists(self, prompt_identifier: str) -> bool: + """ + Check if a prompt exists. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + bool: True if the prompt exists, False otherwise. + """ try: self.get_prompt(prompt_identifier) return True except requests.exceptions.HTTPError as e: - if e.response.status_code == 404: - return False - raise e + return e.response.status_code != 404 def _get_latest_commit_hash(self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0) -> Optional[str]: + """ + Get the latest commit hash for a prompt. + + Args: + prompt_owner_and_name (str): The owner and name of the prompt. + limit (int): The maximum number of commits to fetch. Defaults to 1. + offset (int): The number of commits to skip. Defaults to 0. + + Returns: + Optional[str]: The latest commit hash, or None if no commits are found. + """ response = self.request_with_retries("GET", f"/commits/{prompt_owner_and_name}/", params={"limit": limit, "offset": offset}) - commits_resp = response.json() - commits = commits_resp["commits"] + commits = response.json()["commits"] return commits[0]["commit_hash"] if commits else None - + def delete_prompt(self, prompt_identifier: str) -> bool: + """ + Delete a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt to delete. + + Returns: + bool: True if the prompt was successfully deleted, False otherwise. + + Raises: + ValueError: If the current tenant is not the owner of the prompt. + """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) if not self.current_tenant_is_owner(owner): raise ValueError(f"Cannot delete prompt for another tenant. Current tenant: {self.get_settings()['tenant_handle']}, Requested tenant: {owner}") @@ -4620,20 +4677,20 @@ def delete_prompt(self, prompt_identifier: str) -> bool: def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManifest: - """Pull a prompt from the LangSmith API. + """ + Pull a prompt manifest from the LangSmith API. Args: - prompt_identifier: The identifier of the prompt (str, ex. "prompt_name", "owner/prompt_name", "owner/prompt_name:commit_hash") + prompt_identifier (str): The identifier of the prompt. - Yields: - Prompt - The prompt + Returns: + ls_schemas.PromptManifest: The prompt manifest. + + Raises: + ValueError: If no commits are found for the prompt. """ owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(prompt_identifier) - - use_optimization = ls_utils.is_version_greater_or_equal( - current_version=self.info.version, target_version="0.5.23" - ) + use_optimization = ls_utils.is_version_greater_or_equal(self.info.version, "0.5.23") if not use_optimization and (commit_hash is None or commit_hash == "latest"): commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") @@ -4646,6 +4703,15 @@ def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManif def pull_prompt(self, prompt_identifier: str) -> Any: + """ + Pull a prompt and return it as a LangChain object. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + Any: The prompt object. + """ from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate response = self.pull_prompt_manifest(prompt_identifier) @@ -4653,13 +4719,31 @@ def pull_prompt(self, prompt_identifier: str) -> Any: if isinstance(obj, BasePromptTemplate): if obj.metadata is None: obj.metadata = {} - obj.metadata["lc_hub_owner"] = response.owner - obj.metadata["lc_hub_repo"] = response.repo - obj.metadata["lc_hub_commit_hash"] = response.commit_hash + obj.metadata.update({ + "lc_hub_owner": response.owner, + "lc_hub_repo": response.repo, + "lc_hub_commit_hash": response.commit_hash + }) return obj - def push_prompt_manifest(self, prompt_identifier: str, manifest_json: Any, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: str = ""): + def push_prompt_manifest(self, prompt_identifier: str, manifest_json: Any, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: str = "") -> str: + """ + Push a prompt manifest to the LangSmith API. + + Args: + prompt_identifier (str): The identifier of the prompt. + manifest_json (Any): The manifest to push. + parent_commit_hash (Optional[str]): The parent commit hash. Defaults to "latest". + is_public (bool): Whether the prompt should be public. Defaults to False. + description (str): A description of the prompt. Defaults to an empty string. + + Returns: + str: The URL of the pushed prompt. + + Raises: + ValueError: If a public prompt is attempted without a tenant handle or if the current tenant is not the owner. + """ from langchain_core.load.dump import dumps manifest_json = dumps(manifest_json) @@ -4667,14 +4751,8 @@ def push_prompt_manifest(self, prompt_identifier: str, manifest_json: Any, paren if is_public and not settings.get("tenant_handle"): raise ValueError( - """ - Cannot create a public prompt without first creating a LangChain Hub handle. - - You can add a handle by creating a public prompt at: - https://smith.langchain.com/prompts - - This is a workspace-level handle and will be associated with all of your workspace's public prompts in the LangChain Hub. - """ + "Cannot create a public prompt without first creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at: https://smith.langchain.com/prompts" ) owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) @@ -4684,7 +4762,7 @@ def push_prompt_manifest(self, prompt_identifier: str, manifest_json: Any, paren raise ValueError(f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}") if not self.prompt_exists(prompt_full_name): - self.request_with_retries("POST", "/repos/", json = { + self.request_with_retries("POST", "/repos/", json={ "repo_handle": prompt_name, "is_public": is_public, "description": description, @@ -4704,6 +4782,21 @@ def push_prompt_manifest(self, prompt_identifier: str, manifest_json: Any, paren def push_prompt(self, prompt_identifier: str, obj: Any, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: str = "") -> str: + """ + Push a prompt object to the LangSmith API. + + This method is a wrapper around push_prompt_manifest. + + Args: + prompt_identifier (str): The identifier of the prompt. The format is "name" or "-/name" or "workspace_handle/name". + obj (Any): The prompt object to push. + parent_commit_hash (Optional[str]): The parent commit hash. Defaults to "latest". + is_public (bool): Whether the prompt should be public. Defaults to False. + description (str): A description of the prompt. Defaults to an empty string. + + Returns: + str: The URL of the pushed prompt. + """ return self.push_prompt_manifest(prompt_identifier, obj, parent_commit_hash, is_public, description) From 9ed7046b389e816835616ecfd1fd23157deff61f Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 14:54:07 -0700 Subject: [PATCH 214/373] mistake --- python/langsmith/client.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 16123b634..1834ef5df 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4561,7 +4561,6 @@ def _evaluate_strings( ) -class Client: def get_settings(self) -> dict: """ Get the settings for the current tenant. From 9ee2f6eddace98f48ce68439e8158e934b89eaa0 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 14:54:56 -0700 Subject: [PATCH 215/373] unnecessary file --- python/tests/prompts/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 python/tests/prompts/__init__.py diff --git a/python/tests/prompts/__init__.py b/python/tests/prompts/__init__.py deleted file mode 100644 index e69de29bb..000000000 From b739f4c83ab4774694e21ff3645220540c5523c4 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 15:24:50 -0700 Subject: [PATCH 216/373] whitespace --- python/tests/prompts/test_prompts.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index 2b8f69f0f..4554314f4 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -4,14 +4,17 @@ from langsmith.schemas import Prompt, ListPromptsResponse from langchain_core.prompts import ChatPromptTemplate + @pytest.fixture def langsmith_client() -> Client: return Client() + @pytest.fixture def prompt_template_1() -> ChatPromptTemplate: return ChatPromptTemplate.from_template("tell me a joke about {topic}") + @pytest.fixture def prompt_template_2() -> ChatPromptTemplate: return ChatPromptTemplate.from_messages( @@ -21,12 +24,14 @@ def prompt_template_2() -> ChatPromptTemplate: ] ) + def test_list_prompts(langsmith_client: Client): # Test listing prompts response = langsmith_client.list_prompts(limit=10, offset=0) assert isinstance(response, ListPromptsResponse) assert len(response.repos) <= 10 + def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): # First, create a prompt to test with prompt_name = f"test_prompt_{uuid4().hex[:8]}" @@ -41,6 +46,7 @@ def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTempl langsmith_client.delete_prompt(prompt_name) assert not langsmith_client.prompt_exists(prompt_name) + def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): # Test with a non-existent prompt non_existent_prompt = f"non_existent_{uuid4().hex[:8]}" @@ -55,6 +61,7 @@ def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTe langsmith_client.delete_prompt(existent_prompt) assert not langsmith_client.prompt_exists(existent_prompt) + def test_push_and_pull_prompt(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" @@ -68,6 +75,7 @@ def test_push_and_pull_prompt(langsmith_client: Client, prompt_template_2: ChatP # Clean up langsmith_client.delete_prompt(prompt_name) + def test_push_prompt_manifest(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): prompt_name = f"test_prompt_manifest_{uuid4().hex[:8]}" From ba7ac7b8af4815cdb875cd0d9357aa1601c3fed0 Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Thu, 11 Jul 2024 15:54:50 -0700 Subject: [PATCH 217/373] update to human/system message --- python/langsmith/evaluation/llm_evaluator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py index b6fcecabb..ab2a6c1ce 100644 --- a/python/langsmith/evaluation/llm_evaluator.py +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -84,7 +84,7 @@ def __init__( Args: prompt_template (Union[str, List[Tuple[str, str]]): The prompt template to use for the evaluation. If a string is provided, it is - assumed to be a system message. + assumed to be a human / user message. score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]): The configuration for the score, either categorical or continuous. map_variables (Optional[Callable[[Run, Example], dict]], optional): @@ -177,7 +177,7 @@ def _initialize( if isinstance(prompt_template, str): self.prompt = ChatPromptTemplate.from_messages( - [("system", prompt_template)] + [("human", prompt_template)] ) else: self.prompt = ChatPromptTemplate.from_messages(prompt_template) From dc003c6e2dbd706a6863244b0b044e5873308d42 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Thu, 11 Jul 2024 16:36:46 -0700 Subject: [PATCH 218/373] Fix up contextvar propagation --- python/langsmith/__init__.py | 4 + python/langsmith/_expect.py | 9 +-- python/langsmith/_internal/_aiter.py | 15 +++- python/langsmith/_testing.py | 3 +- python/langsmith/beta/_evals.py | 5 +- python/langsmith/evaluation/_arunner.py | 56 +++++++------ python/langsmith/evaluation/_runner.py | 69 +++++++++------- python/langsmith/evaluation/llm_evaluator.py | 4 +- python/langsmith/run_helpers.py | 4 +- python/langsmith/utils.py | 80 +++++++++++++++++++ python/tests/evaluation/test_evaluation.py | 1 - .../integration_tests/test_llm_evaluator.py | 3 +- 12 files changed, 181 insertions(+), 72 deletions(-) diff --git a/python/langsmith/__init__.py b/python/langsmith/__init__.py index 23f8901b4..c0a8d9054 100644 --- a/python/langsmith/__init__.py +++ b/python/langsmith/__init__.py @@ -87,6 +87,10 @@ def __getattr__(name: str) -> Any: from langsmith._testing import unit return unit + elif name == "ContextThreadPoolExecutor": + from langsmith.utils import ContextThreadPoolExecutor + + return ContextThreadPoolExecutor raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/python/langsmith/_expect.py b/python/langsmith/_expect.py index fe459e409..967390597 100644 --- a/python/langsmith/_expect.py +++ b/python/langsmith/_expect.py @@ -46,7 +46,6 @@ def test_output_semantically_close(): from __future__ import annotations import atexit -import concurrent.futures import inspect from typing import ( TYPE_CHECKING, @@ -91,15 +90,13 @@ def __init__( client: Optional[ls_client.Client], key: str, value: Any, - _executor: Optional[concurrent.futures.ThreadPoolExecutor] = None, + _executor: Optional[ls_utils.ContextThreadPoolExecutor] = None, run_id: Optional[str] = None, ): self._client = client self.key = key self.value = value - self._executor = _executor or concurrent.futures.ThreadPoolExecutor( - max_workers=3 - ) + self._executor = _executor or ls_utils.ContextThreadPoolExecutor(max_workers=3) rt = rh.get_current_run_tree() self._run_id = rt.trace_id if rt else run_id @@ -255,7 +252,7 @@ class _Expect: def __init__(self, *, client: Optional[ls_client.Client] = None): self._client = client - self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) + self.executor = ls_utils.ContextThreadPoolExecutor(max_workers=3) atexit.register(self.executor.shutdown, wait=True) def embedding_distance( diff --git a/python/langsmith/_internal/_aiter.py b/python/langsmith/_internal/_aiter.py index aeb9d857a..c5cef0467 100644 --- a/python/langsmith/_internal/_aiter.py +++ b/python/langsmith/_internal/_aiter.py @@ -6,6 +6,8 @@ """ import asyncio +import contextvars +import functools import inspect from collections import deque from typing import ( @@ -277,8 +279,13 @@ async def process_item(item): async def process_generator(): tasks = [] + accepts_context = asyncio_accepts_context() async for item in generator: - task = asyncio.create_task(process_item(item)) + if accepts_context: + context = contextvars.copy_context() + task = asyncio.create_task(process_item(item), context=context) + else: + task = asyncio.create_task(process_item(item)) tasks.append(task) if n is not None and len(tasks) >= n: done, pending = await asyncio.wait( @@ -300,3 +307,9 @@ def accepts_context(callable: Callable[..., Any]) -> bool: return inspect.signature(callable).parameters.get("context") is not None except ValueError: return False + + +@functools.lru_cache(maxsize=1) +def asyncio_accepts_context(): + """Check if the current asyncio event loop accepts a context argument.""" + return accepts_context(asyncio.create_task) diff --git a/python/langsmith/_testing.py b/python/langsmith/_testing.py index 42cec872b..3d5ac9c3b 100644 --- a/python/langsmith/_testing.py +++ b/python/langsmith/_testing.py @@ -1,7 +1,6 @@ from __future__ import annotations import atexit -import concurrent.futures import datetime import functools import inspect @@ -392,7 +391,7 @@ def __init__( self._experiment = experiment self._dataset = dataset self._version: Optional[datetime.datetime] = None - self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + self._executor = ls_utils.ContextThreadPoolExecutor(max_workers=1) atexit.register(_end_tests, self) @property diff --git a/python/langsmith/beta/_evals.py b/python/langsmith/beta/_evals.py index f41bc8785..03b099fff 100644 --- a/python/langsmith/beta/_evals.py +++ b/python/langsmith/beta/_evals.py @@ -4,7 +4,6 @@ """ import collections -import concurrent.futures import datetime import itertools import uuid @@ -218,6 +217,8 @@ def compute_test_metrics( Returns: None: This function does not return any value. """ + from langsmith import ContextThreadPoolExecutor + evaluators_: List[ls_eval.RunEvaluator] = [] for func in evaluators: if isinstance(func, ls_eval.RunEvaluator): @@ -230,7 +231,7 @@ def compute_test_metrics( ) client = client or Client() traces = _load_nested_traces(project_name, client) - with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrency) as executor: + with ContextThreadPoolExecutor(max_workers=max_concurrency) as executor: results = executor.map( client.evaluate_run, *zip(*_outer_product(traces, evaluators_)) ) diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index b5e1cc2ed..718f8933e 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -628,7 +628,12 @@ async def _arun_evaluators( **{"experiment": self.experiment_name}, } with rh.tracing_context( - **{**current_context, "project_name": "evaluators", "metadata": metadata} + **{ + **current_context, + "project_name": "evaluators", + "metadata": metadata, + "enabled": True, + } ): run = current_results["run"] example = current_results["example"] @@ -682,11 +687,11 @@ async def _aapply_summary_evaluators( **current_context, "project_name": "evaluators", "metadata": metadata, + "enabled": True, } ): for evaluator in summary_evaluators: try: - # TODO: Support async evaluators summary_eval_result = evaluator(runs, examples) flattened_results = self.client._select_eval_results( summary_eval_result, @@ -813,30 +818,31 @@ def _get_run(r: run_trees.RunTree) -> None: nonlocal run run = r - try: - await fn( - example.inputs, - langsmith_extra=rh.LangSmithExtra( - reference_example_id=example.id, - on_end=_get_run, - project_name=experiment_name, - metadata={ - **metadata, - "example_version": ( - example.modified_at.isoformat() - if example.modified_at - else example.created_at.isoformat() - ), - }, - client=client, - ), + with rh.tracing_context(enabled=True): + try: + await fn( + example.inputs, + langsmith_extra=rh.LangSmithExtra( + reference_example_id=example.id, + on_end=_get_run, + project_name=experiment_name, + metadata={ + **metadata, + "example_version": ( + example.modified_at.isoformat() + if example.modified_at + else example.created_at.isoformat() + ), + }, + client=client, + ), + ) + except Exception as e: + logger.error(f"Error running target function: {e}") + return _ForwardResults( + run=cast(schemas.Run, run), + example=example, ) - except Exception as e: - logger.error(f"Error running target function: {e}") - return _ForwardResults( - run=cast(schemas.Run, run), - example=example, - ) def _ensure_async_traceable( diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index f5cc1ae4c..43f01dc36 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -685,7 +685,9 @@ def evaluate_and_submit_feedback( return result tqdm = _load_tqdm() - with cf.ThreadPoolExecutor(max_workers=max_concurrency or 1) as executor: + with ls_utils.ContextThreadPoolExecutor( + max_workers=max_concurrency or 1 + ) as executor: futures = [] for example_id, runs_list in tqdm(runs_dict.items()): results[example_id] = { @@ -1191,7 +1193,7 @@ def _predict( ) else: - with cf.ThreadPoolExecutor(max_concurrency) as executor: + with ls_utils.ContextThreadPoolExecutor(max_concurrency) as executor: futures = [ executor.submit( _forward, @@ -1223,7 +1225,12 @@ def _run_evaluators( }, } with rh.tracing_context( - **{**current_context, "project_name": "evaluators", "metadata": metadata} + **{ + **current_context, + "project_name": "evaluators", + "metadata": metadata, + "enabled": True, + } ): run = current_results["run"] example = current_results["example"] @@ -1264,10 +1271,13 @@ def _score( (e.g. from a previous prediction step) """ if max_concurrency == 0: + context = copy_context() for current_results in self.get_results(): - yield self._run_evaluators(evaluators, current_results) + yield context.run(self._run_evaluators, evaluators, current_results) else: - with cf.ThreadPoolExecutor(max_workers=max_concurrency) as executor: + with ls_utils.ContextThreadPoolExecutor( + max_workers=max_concurrency + ) as executor: futures = [] for current_results in self.get_results(): futures.append( @@ -1289,7 +1299,7 @@ def _apply_summary_evaluators( runs.append(run) examples.append(example) aggregate_feedback = [] - with cf.ThreadPoolExecutor() as executor: + with ls_utils.ContextThreadPoolExecutor() as executor: project_id = self._get_experiment().id current_context = rh.get_tracing_context() metadata = { @@ -1431,30 +1441,31 @@ def _get_run(r: run_trees.RunTree) -> None: nonlocal run run = r - try: - fn( - example.inputs, - langsmith_extra=rh.LangSmithExtra( - reference_example_id=example.id, - on_end=_get_run, - project_name=experiment_name, - metadata={ - **metadata, - "example_version": ( - example.modified_at.isoformat() - if example.modified_at - else example.created_at.isoformat() - ), - }, - client=client, - ), + with rh.tracing_context(enabled=True): + try: + fn( + example.inputs, + langsmith_extra=rh.LangSmithExtra( + reference_example_id=example.id, + on_end=_get_run, + project_name=experiment_name, + metadata={ + **metadata, + "example_version": ( + example.modified_at.isoformat() + if example.modified_at + else example.created_at.isoformat() + ), + }, + client=client, + ), + ) + except Exception as e: + logger.error(f"Error running target function: {e}") + return _ForwardResults( + run=cast(schemas.Run, run), + example=example, ) - except Exception as e: - logger.error(f"Error running target function: {e}") - return _ForwardResults( - run=cast(schemas.Run, run), - example=example, - ) def _resolve_data( diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py index ab2a6c1ce..1b2d39cfc 100644 --- a/python/langsmith/evaluation/llm_evaluator.py +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -176,9 +176,7 @@ def _initialize( ) if isinstance(prompt_template, str): - self.prompt = ChatPromptTemplate.from_messages( - [("human", prompt_template)] - ) + self.prompt = ChatPromptTemplate.from_messages([("human", prompt_template)]) else: self.prompt = ChatPromptTemplate.from_messages(prompt_template) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 88b8a7158..7c09b437b 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -442,7 +442,7 @@ async def async_wrapper( ) try: - accepts_context = aitertools.accepts_context(asyncio.create_task) + accepts_context = aitertools.asyncio_accepts_context() if func_accepts_parent_run: kwargs["run_tree"] = run_container["new_run"] if not func_accepts_config: @@ -490,7 +490,7 @@ async def async_generator_wrapper( kwargs.pop("config", None) async_gen_result = func(*args, **kwargs) # Can't iterate through if it's a coroutine - accepts_context = aitertools.accepts_context(asyncio.create_task) + accepts_context = aitertools.asyncio_accepts_context() if inspect.iscoroutine(async_gen_result): if accepts_context: async_gen_result = await asyncio.create_task( diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 2c0152e0f..e7de08f03 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -1,6 +1,7 @@ """Generic utility functions.""" import contextlib +import contextvars import copy import enum import functools @@ -11,11 +12,14 @@ import sys import threading import traceback +from concurrent.futures import Future, ThreadPoolExecutor from typing import ( Any, Callable, Dict, Generator, + Iterable, + Iterator, List, Mapping, Optional, @@ -23,9 +27,11 @@ Tuple, TypeVar, Union, + cast, ) import requests +from typing_extensions import ParamSpec from urllib3.util import Retry from langsmith import schemas as ls_schemas @@ -561,3 +567,77 @@ def deepish_copy(val: T) -> T: # what we can _LOGGER.debug("Failed to deepcopy input: %s", repr(e)) return _middle_copy(val, memo) + + +P = ParamSpec("P") + + +class ContextThreadPoolExecutor(ThreadPoolExecutor): + """ThreadPoolExecutor that copies the context to the child thread.""" + + def submit( # type: ignore[override] + self, + func: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, + ) -> Future[T]: + """Submit a function to the executor. + + Args: + func (Callable[..., T]): The function to submit. + *args (Any): The positional arguments to the function. + **kwargs (Any): The keyword arguments to the function. + + Returns: + Future[T]: The future for the function. + """ + return super().submit( + cast( + Callable[..., T], + functools.partial( + contextvars.copy_context().run, func, *args, **kwargs + ), + ) + ) + + def map( + self, + fn: Callable[..., T], + *iterables: Iterable[Any], + timeout: Optional[float] = None, + chunksize: int = 1, + ) -> Iterator[T]: + """Return an iterator equivalent to stdlib map. + + Each function will receive it's own copy of the context from the parent thread. + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + contexts = [contextvars.copy_context() for _ in range(len(iterables[0]))] # type: ignore[arg-type] + + def _wrapped_fn(*args: Any) -> T: + return contexts.pop().run(fn, *args) + + return super().map( + _wrapped_fn, + *iterables, + timeout=timeout, + chunksize=chunksize, + ) diff --git a/python/tests/evaluation/test_evaluation.py b/python/tests/evaluation/test_evaluation.py index ecb371806..e05f9e920 100644 --- a/python/tests/evaluation/test_evaluation.py +++ b/python/tests/evaluation/test_evaluation.py @@ -41,7 +41,6 @@ def predict(inputs: dict) -> dict: }, num_repetitions=3, ) - results.wait() assert len(results) == 30 examples = client.list_examples(dataset_name=dataset_name) for example in examples: diff --git a/python/tests/integration_tests/test_llm_evaluator.py b/python/tests/integration_tests/test_llm_evaluator.py index cedb74024..28b742096 100644 --- a/python/tests/integration_tests/test_llm_evaluator.py +++ b/python/tests/integration_tests/test_llm_evaluator.py @@ -193,11 +193,11 @@ async def apredict(inputs: dict) -> dict: model_provider="anthropic", model_name="claude-3-haiku-20240307", ) - results = evaluate( predict, data=dataset_name, evaluators=[reference_accuracy, accuracy], + experiment_prefix=__name__ + "::test_evaluate.evaluate", ) results.wait() @@ -205,4 +205,5 @@ async def apredict(inputs: dict) -> dict: apredict, data=dataset_name, evaluators=[reference_accuracy, accuracy], + experiment_prefix=__name__ + "::test_evaluate.aevaluate", ) From bf521dc4b0c54d5dc8800e2f3b9fed042b70e1aa Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 19:24:15 -0700 Subject: [PATCH 219/373] more functionality --- python/langsmith/client.py | 260 ++++++++++++++++++-------- python/langsmith/schemas.py | 46 ++++- python/langsmith/utils.py | 43 +++-- python/langsmith/wrappers/_openai.py | 12 +- python/tests/prompts/test_prompts.py | 131 ++++++++++--- python/tests/unit_tests/test_utils.py | 61 ++++-- 6 files changed, 420 insertions(+), 133 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 1834ef5df..86a13b107 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4560,10 +4560,8 @@ def _evaluate_strings( **kwargs, ) - def get_settings(self) -> dict: - """ - Get the settings for the current tenant. + """Get the settings for the current tenant. Returns: dict: The settings for the current tenant. @@ -4571,10 +4569,8 @@ def get_settings(self) -> dict: response = self.request_with_retries("GET", "/settings") return response.json() - def current_tenant_is_owner(self, owner: str) -> bool: - """ - Check if the current tenant is the owner of the prompt. + """Check if the current workspace has the same handle as owner. Args: owner (str): The owner to check against. @@ -4585,79 +4581,164 @@ def current_tenant_is_owner(self, owner: str) -> bool: settings = self.get_settings() return owner == "-" or settings["tenant_handle"] == owner + def prompt_exists(self, prompt_identifier: str) -> bool: + """Check if a prompt exists. - def list_prompts(self, limit: int = 100, offset: int = 0) -> ls_schemas.ListPromptsResponse: + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + bool: True if the prompt exists, False otherwise. """ - List prompts with pagination. + try: + self.get_prompt(prompt_identifier) + return True + except requests.exceptions.HTTPError as e: + return e.response.status_code != 404 + + def _get_latest_commit_hash( + self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0 + ) -> Optional[str]: + """Get the latest commit hash for a prompt. Args: - limit (int): The maximum number of prompts to return. Defaults to 100. - offset (int): The number of prompts to skip. Defaults to 0. + prompt_owner_and_name (str): The owner and name of the prompt. + limit (int): The maximum number of commits to fetch. Defaults to 1. + offset (int): The number of commits to skip. Defaults to 0. Returns: - ls_schemas.ListPromptsResponse: A response object containing the list of prompts. + Optional[str]: The latest commit hash, or None if no commits are found. """ - params = {"limit": limit, "offset": offset} - response = self.request_with_retries("GET", "/repos", params=params) - return ls_schemas.ListPromptsResponse(**response.json()) + response = self.request_with_retries( + "GET", + f"/commits/{prompt_owner_and_name}/", + params={"limit": limit, "offset": offset}, + ) + commits = response.json()["commits"] + return commits[0]["commit_hash"] if commits else None - def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: - """ - Get a specific prompt by its identifier. + def _like_or_unlike_prompt( + self, prompt_identifier: str, like: bool + ) -> Dict[str, int]: + """Like or unlike a prompt. Args: prompt_identifier (str): The identifier of the prompt. + like (bool): True to like the prompt, False to unlike it. Returns: - ls_schemas.Prompt: The prompt object. + A dictionary with the key 'likes' and the count of likes as the value. Raises: requests.exceptions.HTTPError: If the prompt is not found or another error occurs. """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) - response = self.request_with_retries("GET", f"/repos/{owner}/{prompt_name}", to_ignore=[ls_utils.LangSmithError]) - if response.status_code == 200: - return ls_schemas.Prompt(**response.json()['repo']) + response = self.request_with_retries( + "POST", f"/likes/{owner}/{prompt_name}", json={"like": like} + ) response.raise_for_status() + return response.json + def like_prompt(self, prompt_identifier: str) -> Dict[str, int]: + """Check if a prompt exists. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + A dictionary with the key 'likes' and the count of likes as the value. - def prompt_exists(self, prompt_identifier: str) -> bool: """ - Check if a prompt exists. + return self._like_or_unlike_prompt(prompt_identifier, like=True) + + def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: + """Unlike a prompt. Args: prompt_identifier (str): The identifier of the prompt. Returns: - bool: True if the prompt exists, False otherwise. + A dictionary with the key 'likes' and the count of likes as the value. + """ - try: - self.get_prompt(prompt_identifier) - return True - except requests.exceptions.HTTPError as e: - return e.response.status_code != 404 + return self._like_or_unlike_prompt(prompt_identifier, like=False) + def list_prompts( + self, limit: int = 100, offset: int = 0 + ) -> ls_schemas.ListPromptsResponse: + """List prompts with pagination. + + Args: + limit (int): The maximum number of prompts to return. Defaults to 100. + offset (int): The number of prompts to skip. Defaults to 0. - def _get_latest_commit_hash(self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0) -> Optional[str]: + Returns: + ls_schemas.ListPromptsResponse: A response object containing the list of prompts. """ - Get the latest commit hash for a prompt. + params = {"limit": limit, "offset": offset} + response = self.request_with_retries("GET", "/repos", params=params) + return ls_schemas.ListPromptsResponse(**response.json()) + + def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: + """Get a specific prompt by its identifier. Args: - prompt_owner_and_name (str): The owner and name of the prompt. - limit (int): The maximum number of commits to fetch. Defaults to 1. - offset (int): The number of commits to skip. Defaults to 0. + prompt_identifier (str): The identifier of the prompt. The identifier should be in the format "prompt_name" or "owner/prompt_name". Returns: - Optional[str]: The latest commit hash, or None if no commits are found. + ls_schemas.Prompt: The prompt object. + + Raises: + requests.exceptions.HTTPError: If the prompt is not found or another error occurs. """ - response = self.request_with_retries("GET", f"/commits/{prompt_owner_and_name}/", params={"limit": limit, "offset": offset}) - commits = response.json()["commits"] - return commits[0]["commit_hash"] if commits else None + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + response = self.request_with_retries( + "GET", f"/repos/{owner}/{prompt_name}", to_ignore=[ls_utils.LangSmithError] + ) + if response.status_code == 200: + return ls_schemas.Prompt(**response.json()["repo"]) + response.raise_for_status() + def update_prompt( + self, + prompt_identifier: str, + *, + description: Optional[str] = None, + is_public: Optional[bool] = None, + tags: Optional[List[str]] = None, + ) -> Dict[str, Any]: + """Update a prompt's metadata. - def delete_prompt(self, prompt_identifier: str) -> bool: + Args: + prompt_identifier (str): The identifier of the prompt to update. + description (Optional[str]): New description for the prompt. + is_public (Optional[bool]): New public status for the prompt. + tags (Optional[List[str]]): New list of tags for the prompt. + + Returns: + Dict[str, Any]: The updated prompt data as returned by the server. + + Raises: + ValueError: If the prompt_identifier is empty. + HTTPError: If the server request fails. """ - Delete a prompt. + json: Dict[str, Union[str, bool, List[str]]] = {} + if description is not None: + json["description"] = description + if is_public is not None: + json["is_public"] = is_public + if tags is not None: + json["tags"] = tags + + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + response = self.request_with_retries( + "PATCH", f"/repos/{owner}/{prompt_name}", json=json + ) + response.raise_for_status() + return response.json() + + def delete_prompt(self, prompt_identifier: str) -> bool: + """Delete a prompt. Args: prompt_identifier (str): The identifier of the prompt to delete. @@ -4670,14 +4751,14 @@ def delete_prompt(self, prompt_identifier: str) -> bool: """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) if not self.current_tenant_is_owner(owner): - raise ValueError(f"Cannot delete prompt for another tenant. Current tenant: {self.get_settings()['tenant_handle']}, Requested tenant: {owner}") + raise ValueError( + f"Cannot delete prompt for another tenant. Current tenant: {self.get_settings()['tenant_handle']}, Requested tenant: {owner}" + ) response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") return response.status_code == 204 - def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManifest: - """ - Pull a prompt manifest from the LangSmith API. + """Pull a prompt manifest from the LangSmith API. Args: prompt_identifier (str): The identifier of the prompt. @@ -4688,22 +4769,27 @@ def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManif Raises: ValueError: If no commits are found for the prompt. """ - owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(prompt_identifier) - use_optimization = ls_utils.is_version_greater_or_equal(self.info.version, "0.5.23") + owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier( + prompt_identifier + ) + use_optimization = ls_utils.is_version_greater_or_equal( + self.info.version, "0.5.23" + ) if not use_optimization and (commit_hash is None or commit_hash == "latest"): commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") if commit_hash is None: raise ValueError("No commits found") - response = self.request_with_retries("GET", f"/commits/{owner}/{prompt_name}/{commit_hash}") - res = response.json() - return ls_schemas.PromptManifest(**{"owner": owner, "repo": prompt_name, **res}) - + response = self.request_with_retries( + "GET", f"/commits/{owner}/{prompt_name}/{commit_hash}" + ) + return ls_schemas.PromptManifest( + **{"owner": owner, "repo": prompt_name, **response.json()} + ) def pull_prompt(self, prompt_identifier: str) -> Any: - """ - Pull a prompt and return it as a LangChain object. + """Pull a prompt and return it as a LangChain object. Args: prompt_identifier (str): The identifier of the prompt. @@ -4713,22 +4799,30 @@ def pull_prompt(self, prompt_identifier: str) -> Any: """ from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate + response = self.pull_prompt_manifest(prompt_identifier) obj = loads(json.dumps(response.manifest)) if isinstance(obj, BasePromptTemplate): if obj.metadata is None: obj.metadata = {} - obj.metadata.update({ - "lc_hub_owner": response.owner, - "lc_hub_repo": response.repo, - "lc_hub_commit_hash": response.commit_hash - }) + obj.metadata.update( + { + "lc_hub_owner": response.owner, + "lc_hub_repo": response.repo, + "lc_hub_commit_hash": response.commit_hash, + } + ) return obj - - def push_prompt_manifest(self, prompt_identifier: str, manifest_json: Any, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: str = "") -> str: - """ - Push a prompt manifest to the LangSmith API. + def push_prompt_manifest( + self, + prompt_identifier: str, + manifest_json: Any, + parent_commit_hash: Optional[str] = "latest", + is_public: bool = False, + description: str = "", + ) -> str: + """Push a prompt manifest to the LangSmith API. Args: prompt_identifier (str): The identifier of the prompt. @@ -4758,31 +4852,43 @@ def push_prompt_manifest(self, prompt_identifier: str, manifest_json: Any, paren prompt_full_name = f"{owner}/{prompt_name}" if not self.current_tenant_is_owner(owner): - raise ValueError(f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}") + raise ValueError( + f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}" + ) if not self.prompt_exists(prompt_full_name): - self.request_with_retries("POST", "/repos/", json={ - "repo_handle": prompt_name, - "is_public": is_public, - "description": description, - }) + self.request_with_retries( + "POST", + "/repos/", + json={ + "repo_handle": prompt_name, + "is_public": is_public, + "description": description, + }, + ) manifest_dict = json.loads(manifest_json) if parent_commit_hash == "latest": parent_commit_hash = self._get_latest_commit_hash(prompt_full_name) - + request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} - response = self.request_with_retries("POST", f"/commits/{prompt_full_name}", json=request_dict) - res = response.json() + response = self.request_with_retries( + "POST", f"/commits/{prompt_full_name}", json=request_dict + ) - commit_hash = res["commit"]["commit_hash"] + commit_hash = response.json()["commit"]["commit_hash"] short_hash = commit_hash[:8] return f"{self._host_url}/prompts/{prompt_name}/{short_hash}?organizationId={settings['id']}" - - def push_prompt(self, prompt_identifier: str, obj: Any, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: str = "") -> str: - """ - Push a prompt object to the LangSmith API. + def push_prompt( + self, + prompt_identifier: str, + obj: Any, + parent_commit_hash: Optional[str] = "latest", + is_public: bool = False, + description: str = "", + ) -> str: + """Push a prompt object to the LangSmith API. This method is a wrapper around push_prompt_manifest. @@ -4796,7 +4902,9 @@ def push_prompt(self, prompt_identifier: str, obj: Any, parent_commit_hash: Opti Returns: str: The URL of the pushed prompt. """ - return self.push_prompt_manifest(prompt_identifier, obj, parent_commit_hash, is_public, description) + return self.push_prompt_manifest( + prompt_identifier, obj, parent_commit_hash, is_public, description + ) def _tracing_thread_drain_queue( diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index d8ea947bd..ebebfcb63 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -747,38 +747,80 @@ def metadata(self) -> dict[str, Any]: class PromptManifest(BaseModel): + """Represents a Prompt with a manifest. + + Attributes: + repo (str): The name of the prompt. + commit_hash (str): The commit hash of the prompt. + manifest (Dict[str, Any]): The manifest of the prompt. + examples (List[dict]): The list of examples. + """ + owner: str + """The handle of the owner of the prompt.""" repo: str + """The name of the prompt.""" commit_hash: str + """The commit hash of the prompt.""" manifest: Dict[str, Any] + """The manifest of the prompt.""" examples: List[dict] + """The list of examples.""" class Prompt(BaseModel): + """Represents a Prompt with metadata.""" + + owner: str + """The handle of the owner of the prompt.""" repo_handle: str + """The name of the prompt.""" + full_name: str + """The full name of the prompt. (owner + repo_handle)""" description: str | None + """The description of the prompt.""" readme: str | None + """The README of the prompt.""" id: str + """The ID of the prompt.""" tenant_id: str + """The tenant ID of the prompt owner.""" created_at: datetime + """The creation time of the prompt.""" updated_at: datetime + """The last update time of the prompt.""" is_public: bool + """Whether the prompt is public.""" is_archived: bool + """Whether the prompt is archived.""" tags: List[str] + """The tags associated with the prompt.""" original_repo_id: str | None + """The ID of the original prompt, if forked.""" upstream_repo_id: str | None - owner: str - full_name: str + """The ID of the upstream prompt, if forked.""" num_likes: int + """The number of likes.""" num_downloads: int + """The number of downloads.""" num_views: int + """The number of views.""" liked_by_auth_user: bool + """Whether the prompt is liked by the authenticated user.""" last_commit_hash: str | None + """The hash of the last commit.""" num_commits: int + """The number of commits.""" original_repo_full_name: str | None + """The full name of the original prompt, if forked.""" upstream_repo_full_name: str | None + """The full name of the upstream prompt, if forked.""" class ListPromptsResponse(BaseModel): + """A list of prompts with metadata.""" + repos: List[Prompt] + """The list of prompts.""" total: int + """The total number of prompts.""" diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index b8ad4f6fc..6fb0f0ff9 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -564,6 +564,7 @@ def deepish_copy(val: T) -> T: def is_version_greater_or_equal(current_version, target_version): + """Check if the current version is greater or equal to the target version.""" from packaging import version current = version.parse(current_version) @@ -572,17 +573,35 @@ def is_version_greater_or_equal(current_version, target_version): def parse_prompt_identifier(identifier: str) -> Tuple[str, str, str]: - """ - Parses a string in the format of `owner/repo:commit` and returns a tuple of - (owner, repo, commit). - """ - owner_prompt = identifier - commit = "latest" - if ":" in identifier: - owner_prompt, commit = identifier.split(":", 1) + """Parse a string in the format of `owner/name[:commit]` or `name[:commit]` and returns a tuple of (owner, name, commit). + + Args: + identifier (str): The prompt identifier to parse. - if "/" not in owner_prompt: - return "-", owner_prompt, commit + Returns: + Tuple[str, str, str]: A tuple containing (owner, name, commit). - owner, prompt = owner_prompt.split("/", 1) - return owner, prompt, commit + Raises: + ValueError: If the identifier doesn't match the expected formats. + """ + if ( + not identifier + or identifier.count("/") > 1 + or identifier.startswith("/") + or identifier.endswith("/") + ): + raise ValueError(f"Invalid identifier format: {identifier}") + + parts = identifier.split(":", 1) + owner_name = parts[0] + commit = parts[1] if len(parts) > 1 else "latest" + + if "/" in owner_name: + owner, name = owner_name.split("/", 1) + if not owner or not name: + raise ValueError(f"Invalid identifier format: {identifier}") + return owner, name, commit + else: + if not owner_name: + raise ValueError(f"Invalid identifier format: {identifier}") + return "-", owner_name, commit diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 5b6798e8d..45aec0932 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,13 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"][ - "name" - ] += chunk.function.name + message["tool_calls"][index]["function"]["name"] += ( + chunk.function.name + ) if chunk.function.arguments: - message["tool_calls"][index]["function"][ - "arguments" - ] += chunk.function.arguments + message["tool_calls"][index]["function"]["arguments"] += ( + chunk.function.arguments + ) return { "index": choices[0].index, "finish_reason": next( diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index 4554314f4..2e6f1cb89 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -1,9 +1,11 @@ -import pytest from uuid import uuid4 -from langsmith.client import Client -from langsmith.schemas import Prompt, ListPromptsResponse + +import pytest from langchain_core.prompts import ChatPromptTemplate +from langsmith.client import Client +from langsmith.schemas import ListPromptsResponse, Prompt, PromptManifest + @pytest.fixture def langsmith_client() -> Client: @@ -25,68 +27,151 @@ def prompt_template_2() -> ChatPromptTemplate: ) +def test_current_tenant_is_owner(langsmith_client: Client): + settings = langsmith_client.get_settings() + assert langsmith_client.current_tenant_is_owner(settings["tenant_handle"]) + assert langsmith_client.current_tenant_is_owner("-") + assert not langsmith_client.current_tenant_is_owner("non_existent_owner") + + def test_list_prompts(langsmith_client: Client): - # Test listing prompts response = langsmith_client.list_prompts(limit=10, offset=0) assert isinstance(response, ListPromptsResponse) assert len(response.repos) <= 10 def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): - # First, create a prompt to test with prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, prompt_template_1) - # Now test getting the prompt prompt = langsmith_client.get_prompt(prompt_name) assert isinstance(prompt, Prompt) assert prompt.repo_handle == prompt_name - # Clean up langsmith_client.delete_prompt(prompt_name) - assert not langsmith_client.prompt_exists(prompt_name) def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): - # Test with a non-existent prompt non_existent_prompt = f"non_existent_{uuid4().hex[:8]}" assert not langsmith_client.prompt_exists(non_existent_prompt) - # Create a prompt and test again existent_prompt = f"existent_{uuid4().hex[:8]}" langsmith_client.push_prompt(existent_prompt, prompt_template_2) assert langsmith_client.prompt_exists(existent_prompt) - # Clean up langsmith_client.delete_prompt(existent_prompt) - assert not langsmith_client.prompt_exists(existent_prompt) -def test_push_and_pull_prompt(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): +def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + updated_data = langsmith_client.update_prompt( + prompt_name, + description="Updated description", + is_public=True, + tags=["test", "update"], + ) + assert isinstance(updated_data, dict) + + updated_prompt = langsmith_client.get_prompt(prompt_name) + assert updated_prompt.description == "Updated description" + assert updated_prompt.is_public + assert set(updated_prompt.tags) == set(["test", "update"]) + + langsmith_client.delete_prompt(prompt_name) + + +def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + assert langsmith_client.prompt_exists(prompt_name) + langsmith_client.delete_prompt(prompt_name) + assert not langsmith_client.prompt_exists(prompt_name) + + +def test_pull_prompt_manifest( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + manifest = langsmith_client.pull_prompt_manifest(prompt_name) + assert isinstance(manifest, PromptManifest) + assert manifest.repo == prompt_name + + langsmith_client.delete_prompt(prompt_name) + + +def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + pulled_prompt = langsmith_client.pull_prompt(prompt_name) + assert isinstance(pulled_prompt, ChatPromptTemplate) + + langsmith_client.delete_prompt(prompt_name) + + +def test_push_and_pull_prompt( + langsmith_client: Client, prompt_template_2: ChatPromptTemplate +): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - # Test pushing a prompt push_result = langsmith_client.push_prompt(prompt_name, prompt_template_2) - assert isinstance(push_result, str) # Should return a URL + assert isinstance(push_result, str) - # Test pulling the prompt - langsmith_client.pull_prompt(prompt_name) + pulled_prompt = langsmith_client.pull_prompt(prompt_name) + assert isinstance(pulled_prompt, ChatPromptTemplate) - # Clean up langsmith_client.delete_prompt(prompt_name) + # should fail + with pytest.raises(ValueError): + langsmith_client.push_prompt(f"random_handle/{prompt_name}", prompt_template_2) + -def test_push_prompt_manifest(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): +def test_push_prompt_manifest( + langsmith_client: Client, prompt_template_2: ChatPromptTemplate +): prompt_name = f"test_prompt_manifest_{uuid4().hex[:8]}" - # Test pushing a prompt manifest result = langsmith_client.push_prompt_manifest(prompt_name, prompt_template_2) - assert isinstance(result, str) # Should return a URL + assert isinstance(result, str) - # Verify the pushed manifest pulled_prompt_manifest = langsmith_client.pull_prompt_manifest(prompt_name) latest_commit_hash = langsmith_client._get_latest_commit_hash(f"-/{prompt_name}") assert pulled_prompt_manifest.commit_hash == latest_commit_hash - # Clean up + langsmith_client.delete_prompt(prompt_name) + + +def test_like_unlike_prompt( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + langsmith_client.like_prompt(prompt_name) + prompt = langsmith_client.get_prompt(prompt_name) + assert prompt.num_likes == 1 + + langsmith_client.unlike_prompt(prompt_name) + prompt = langsmith_client.get_prompt(prompt_name) + assert prompt.num_likes == 0 + + langsmith_client.delete_prompt(prompt_name) + + +def test_get_latest_commit_hash( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_prompt_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + commit_hash = langsmith_client._get_latest_commit_hash(f"-/{prompt_name}") + assert isinstance(commit_hash, str) + assert len(commit_hash) > 0 + langsmith_client.delete_prompt(prompt_name) diff --git a/python/tests/unit_tests/test_utils.py b/python/tests/unit_tests/test_utils.py index 9cadaa9cb..bd57385cf 100644 --- a/python/tests/unit_tests/test_utils.py +++ b/python/tests/unit_tests/test_utils.py @@ -13,7 +13,6 @@ import attr import dataclasses_json import pytest -from pydantic import BaseModel import langsmith.utils as ls_utils from langsmith import Client, traceable @@ -163,19 +162,6 @@ def __init__(self) -> None: class MyClassWithSlots: __slots__ = ["x", "y"] - - def __init__(self, x: int) -> None: - self.x = x - self.y = "y" - - class MyPydantic(BaseModel): - foo: str - bar: int - baz: dict - - @dataclasses.dataclass - class MyDataclass: - foo: str bar: int def something(self) -> None: @@ -264,3 +250,50 @@ class MyNamedTuple(NamedTuple): "fake_json": ClassWithFakeJson(), } assert ls_utils.deepish_copy(my_dict) == my_dict + + +def test_is_version_greater_or_equal(): + # Test versions equal to 0.5.23 + assert ls_utils.is_version_greater_or_equal("0.5.23", "0.5.23") + + # Test versions greater than 0.5.23 + assert ls_utils.is_version_greater_or_equal("0.5.24", "0.5.23") + assert ls_utils.is_version_greater_or_equal("0.6.0", "0.5.23") + assert ls_utils.is_version_greater_or_equal("1.0.0", "0.5.23") + + # Test versions less than 0.5.23 + assert not ls_utils.is_version_greater_or_equal("0.5.22", "0.5.23") + assert not ls_utils.is_version_greater_or_equal("0.5.0", "0.5.23") + assert not ls_utils.is_version_greater_or_equal("0.4.99", "0.5.23") + + +def test_parse_prompt_identifier(): + # Valid cases + assert ls_utils.parse_prompt_identifier("name") == ("-", "name", "latest") + assert ls_utils.parse_prompt_identifier("owner/name") == ("owner", "name", "latest") + assert ls_utils.parse_prompt_identifier("owner/name:commit") == ( + "owner", + "name", + "commit", + ) + assert ls_utils.parse_prompt_identifier("name:commit") == ("-", "name", "commit") + + # Invalid cases + invalid_identifiers = [ + "", + "/", + ":", + "owner/", + "/name", + "owner//name", + "owner/name/", + "owner/name/extra", + ":commit", + ] + + for invalid_id in invalid_identifiers: + try: + ls_utils.parse_prompt_identifier(invalid_id) + assert False, f"Expected ValueError for identifier: {invalid_id}" + except ValueError: + pass # This is the expected behavior From 124b78860342cb1d1dd819774b579ede2ed5e88f Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 19:27:45 -0700 Subject: [PATCH 220/373] fix --- python/tests/unit_tests/test_utils.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/python/tests/unit_tests/test_utils.py b/python/tests/unit_tests/test_utils.py index bd57385cf..09af201a7 100644 --- a/python/tests/unit_tests/test_utils.py +++ b/python/tests/unit_tests/test_utils.py @@ -13,6 +13,7 @@ import attr import dataclasses_json import pytest +from pydantic import BaseModel import langsmith.utils as ls_utils from langsmith import Client, traceable @@ -162,6 +163,18 @@ def __init__(self) -> None: class MyClassWithSlots: __slots__ = ["x", "y"] + def __init__(self, x: int) -> None: + self.x = x + self.y = "y" + + class MyPydantic(BaseModel): + foo: str + bar: int + baz: dict + + @dataclasses.dataclass + class MyDataclass: + foo: str bar: int def something(self) -> None: From df4f726d33c3ee495819dff44b160503e77c953b Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 11 Jul 2024 20:27:45 -0700 Subject: [PATCH 221/373] format --- python/langsmith/client.py | 90 ++++++++++++--------------- python/langsmith/utils.py | 4 +- python/tests/prompts/test_prompts.py | 15 ----- python/tests/unit_tests/test_utils.py | 1 + 4 files changed, 44 insertions(+), 66 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 86a13b107..c89c1e777 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4630,7 +4630,8 @@ def _like_or_unlike_prompt( A dictionary with the key 'likes' and the count of likes as the value. Raises: - requests.exceptions.HTTPError: If the prompt is not found or another error occurs. + requests.exceptions.HTTPError: If the prompt is not found or + another error occurs. """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) response = self.request_with_retries( @@ -4673,7 +4674,8 @@ def list_prompts( offset (int): The number of prompts to skip. Defaults to 0. Returns: - ls_schemas.ListPromptsResponse: A response object containing the list of prompts. + ls_schemas.ListPromptsResponse: A response object containing + the list of prompts. """ params = {"limit": limit, "offset": offset} response = self.request_with_retries("GET", "/repos", params=params) @@ -4683,13 +4685,15 @@ def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: """Get a specific prompt by its identifier. Args: - prompt_identifier (str): The identifier of the prompt. The identifier should be in the format "prompt_name" or "owner/prompt_name". + prompt_identifier (str): The identifier of the prompt. + The identifier should be in the format "prompt_name" or "owner/prompt_name". Returns: ls_schemas.Prompt: The prompt object. Raises: - requests.exceptions.HTTPError: If the prompt is not found or another error occurs. + requests.exceptions.HTTPError: If the prompt is not found or + another error occurs. """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) response = self.request_with_retries( @@ -4752,7 +4756,9 @@ def delete_prompt(self, prompt_identifier: str) -> bool: owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) if not self.current_tenant_is_owner(owner): raise ValueError( - f"Cannot delete prompt for another tenant. Current tenant: {self.get_settings()['tenant_handle']}, Requested tenant: {owner}" + f"Cannot delete prompt for another tenant.\n" + f"Current tenant: {self.get_settings()['tenant_handle']},\n" + f"Requested tenant: {owner}" ) response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") return response.status_code == 204 @@ -4789,32 +4795,35 @@ def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManif ) def pull_prompt(self, prompt_identifier: str) -> Any: - """Pull a prompt and return it as a LangChain object. + """Pull a prompt and return it as a LangChain PromptTemplate. + + This method requires `langchain_core` to convert the prompt manifest. Args: prompt_identifier (str): The identifier of the prompt. Returns: - Any: The prompt object. + Any: The prompt object in the specified format. """ from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate - response = self.pull_prompt_manifest(prompt_identifier) - obj = loads(json.dumps(response.manifest)) - if isinstance(obj, BasePromptTemplate): - if obj.metadata is None: - obj.metadata = {} - obj.metadata.update( + prompt_manifest = self.pull_prompt_manifest(prompt_identifier) + prompt = loads(json.dumps(prompt_manifest.manifest)) + if isinstance(prompt, BasePromptTemplate): + if prompt.metadata is None: + prompt.metadata = {} + prompt.metadata.update( { - "lc_hub_owner": response.owner, - "lc_hub_repo": response.repo, - "lc_hub_commit_hash": response.commit_hash, + "lc_hub_owner": prompt_manifest.owner, + "lc_hub_repo": prompt_manifest.repo, + "lc_hub_commit_hash": prompt_manifest.commit_hash, } ) - return obj - def push_prompt_manifest( + return prompt + + def push_prompt( self, prompt_identifier: str, manifest_json: Any, @@ -4827,7 +4836,8 @@ def push_prompt_manifest( Args: prompt_identifier (str): The identifier of the prompt. manifest_json (Any): The manifest to push. - parent_commit_hash (Optional[str]): The parent commit hash. Defaults to "latest". + parent_commit_hash (Optional[str]): The parent commit hash. + Defaults to "latest". is_public (bool): Whether the prompt should be public. Defaults to False. description (str): A description of the prompt. Defaults to an empty string. @@ -4835,7 +4845,8 @@ def push_prompt_manifest( str: The URL of the pushed prompt. Raises: - ValueError: If a public prompt is attempted without a tenant handle or if the current tenant is not the owner. + ValueError: If a public prompt is attempted without a tenant handle or + if the current tenant is not the owner. """ from langchain_core.load.dump import dumps @@ -4844,8 +4855,10 @@ def push_prompt_manifest( if is_public and not settings.get("tenant_handle"): raise ValueError( - "Cannot create a public prompt without first creating a LangChain Hub handle. " - "You can add a handle by creating a public prompt at: https://smith.langchain.com/prompts" + "Cannot create a public prompt without first\n" + "creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at:\n" + "https://smith.langchain.com/prompts" ) owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) @@ -4853,7 +4866,9 @@ def push_prompt_manifest( if not self.current_tenant_is_owner(owner): raise ValueError( - f"Cannot create prompt for another tenant. Current tenant: {settings['tenant_handle'] or 'no handle'}, Requested tenant: {owner}" + "Cannot create prompt for another tenant." + f"Current tenant: {settings['tenant_handle'] or 'no handle'}" + f", Requested tenant: {owner}" ) if not self.prompt_exists(prompt_full_name): @@ -4878,32 +4893,9 @@ def push_prompt_manifest( commit_hash = response.json()["commit"]["commit_hash"] short_hash = commit_hash[:8] - return f"{self._host_url}/prompts/{prompt_name}/{short_hash}?organizationId={settings['id']}" - - def push_prompt( - self, - prompt_identifier: str, - obj: Any, - parent_commit_hash: Optional[str] = "latest", - is_public: bool = False, - description: str = "", - ) -> str: - """Push a prompt object to the LangSmith API. - - This method is a wrapper around push_prompt_manifest. - - Args: - prompt_identifier (str): The identifier of the prompt. The format is "name" or "-/name" or "workspace_handle/name". - obj (Any): The prompt object to push. - parent_commit_hash (Optional[str]): The parent commit hash. Defaults to "latest". - is_public (bool): Whether the prompt should be public. Defaults to False. - description (str): A description of the prompt. Defaults to an empty string. - - Returns: - str: The URL of the pushed prompt. - """ - return self.push_prompt_manifest( - prompt_identifier, obj, parent_commit_hash, is_public, description + return ( + f"{self._host_url}/prompts/{prompt_name}/{short_hash}" + f"?organizationId={settings['id']}" ) diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 6fb0f0ff9..9f20ffd8c 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -573,13 +573,13 @@ def is_version_greater_or_equal(current_version, target_version): def parse_prompt_identifier(identifier: str) -> Tuple[str, str, str]: - """Parse a string in the format of `owner/name[:commit]` or `name[:commit]` and returns a tuple of (owner, name, commit). + """Parse a string in the format of owner/name:hash, name:hash, owner/name, or name. Args: identifier (str): The prompt identifier to parse. Returns: - Tuple[str, str, str]: A tuple containing (owner, name, commit). + Tuple[str, str, str]: A tuple containing (owner, name, hash). Raises: ValueError: If the identifier doesn't match the expected formats. diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index 2e6f1cb89..ec7f76da1 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -132,21 +132,6 @@ def test_push_and_pull_prompt( langsmith_client.push_prompt(f"random_handle/{prompt_name}", prompt_template_2) -def test_push_prompt_manifest( - langsmith_client: Client, prompt_template_2: ChatPromptTemplate -): - prompt_name = f"test_prompt_manifest_{uuid4().hex[:8]}" - - result = langsmith_client.push_prompt_manifest(prompt_name, prompt_template_2) - assert isinstance(result, str) - - pulled_prompt_manifest = langsmith_client.pull_prompt_manifest(prompt_name) - latest_commit_hash = langsmith_client._get_latest_commit_hash(f"-/{prompt_name}") - assert pulled_prompt_manifest.commit_hash == latest_commit_hash - - langsmith_client.delete_prompt(prompt_name) - - def test_like_unlike_prompt( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): diff --git a/python/tests/unit_tests/test_utils.py b/python/tests/unit_tests/test_utils.py index 09af201a7..8fd493478 100644 --- a/python/tests/unit_tests/test_utils.py +++ b/python/tests/unit_tests/test_utils.py @@ -163,6 +163,7 @@ def __init__(self) -> None: class MyClassWithSlots: __slots__ = ["x", "y"] + def __init__(self, x: int) -> None: self.x = x self.y = "y" From c0eeb92640b47726d0aada7264423bea93e557fe Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 10:21:19 -0700 Subject: [PATCH 222/373] expand list_prompts functionality --- _scripts/_fetch_schema.py | 18 ++++++++++++------ python/langsmith/client.py | 32 ++++++++++++++++++++++++++++++-- python/langsmith/schemas.py | 13 +++++++++++++ 3 files changed, 55 insertions(+), 8 deletions(-) diff --git a/_scripts/_fetch_schema.py b/_scripts/_fetch_schema.py index 741e12a9c..ba8c171bd 100644 --- a/_scripts/_fetch_schema.py +++ b/_scripts/_fetch_schema.py @@ -1,4 +1,5 @@ """Fetch and prune the Langsmith spec.""" + import argparse from pathlib import Path @@ -19,7 +20,9 @@ def process_schema(sub_schema): get_dependencies(schema, sub_schema["$ref"].split("/")[-1], new_components) else: if "items" in sub_schema and "$ref" in sub_schema["items"]: - get_dependencies(schema, sub_schema["items"]["$ref"].split("/")[-1], new_components) + get_dependencies( + schema, sub_schema["items"]["$ref"].split("/")[-1], new_components + ) for keyword in ["anyOf", "oneOf", "allOf"]: if keyword in sub_schema: for item in sub_schema[keyword]: @@ -38,8 +41,6 @@ def process_schema(sub_schema): process_schema(item) - - def _extract_langsmith_routes_and_properties(schema, operation_ids): new_paths = {} new_components = {"schemas": {}} @@ -98,20 +99,25 @@ def test_openapi_specification(spec: dict): assert errors is None, f"OpenAPI validation failed: {errors}" -def main(out_file: str = "openapi.yaml", url: str = "https://web.smith.langchain.com/openapi.json"): +def main( + out_file: str = "openapi.yaml", + url: str = "https://web.smith.langchain.com/openapi.json", +): langsmith_schema = get_langsmith_runs_schema(url=url) parent_dir = Path(__file__).parent.parent test_openapi_specification(langsmith_schema) with (parent_dir / "openapi" / out_file).open("w") as f: # Sort the schema keys so the openapi version and info come at the top - for key in ['openapi', 'info', 'paths', 'components']: + for key in ["openapi", "info", "paths", "components"]: langsmith_schema[key] = langsmith_schema.pop(key) f.write(yaml.dump(langsmith_schema, sort_keys=False)) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--url", type=str, default="https://web.smith.langchain.com/openapi.json") + parser.add_argument( + "--url", type=str, default="https://web.smith.langchain.com/openapi.json" + ) parser.add_argument("--output", type=str, default="openapi.yaml") args = parser.parse_args() main(args.output, url=args.url) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index c89c1e777..503964002 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4665,19 +4665,47 @@ def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: return self._like_or_unlike_prompt(prompt_identifier, like=False) def list_prompts( - self, limit: int = 100, offset: int = 0 + self, + *, + limit: int = 100, + offset: int = 0, + is_public: Optional[bool] = None, + is_archived: Optional[bool] = False, + sort_field: ls_schemas.PromptsSortField = ls_schemas.PromptsSortField.updated_at, + sort_direction: Literal["desc", "asc"] = "desc", + query: Optional[str] = None, ) -> ls_schemas.ListPromptsResponse: """List prompts with pagination. Args: limit (int): The maximum number of prompts to return. Defaults to 100. offset (int): The number of prompts to skip. Defaults to 0. + is_public (Optional[bool]): Filter prompts by if they are public. + is_archived (Optional[bool]): Filter prompts by if they are archived. + sort_field (ls_schemas.PromptsSortField): The field to sort by. + Defaults to "updated_at". + sort_direction (Literal["desc", "asc"]): The order to sort by. Defaults to "desc". + query (Optional[str]): Filter prompts by a search query. Returns: ls_schemas.ListPromptsResponse: A response object containing the list of prompts. """ - params = {"limit": limit, "offset": offset} + params = { + "limit": limit, + "offset": offset, + "is_public": "true" + if is_public + else "false" + if is_public is not None + else None, + "is_archived": "true" if is_archived else "false", + "sort_field": sort_field, + "sort_direction": sort_direction, + "query": query, + "match_prefix": "true" if query else None, + } + response = self.request_with_retries("GET", "/repos", params=params) return ls_schemas.ListPromptsResponse(**response.json()) diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index ebebfcb63..8166923a4 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -824,3 +824,16 @@ class ListPromptsResponse(BaseModel): """The list of prompts.""" total: int """The total number of prompts.""" + + +class PromptsSortField(str, Enum): + """Enum for sorting fields for prompts.""" + + num_downloads = "num_downloads" + """Number of downloads.""" + num_views = "num_views" + """Number of views.""" + updated_at = "updated_at" + """Last updated time.""" + num_likes = "num_likes" + """Number of likes.""" From 910fc028c7abd764c4a357758d6b67bd8694ba21 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 10:28:47 -0700 Subject: [PATCH 223/373] line length --- python/langsmith/client.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 503964002..d272d4c3c 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4671,7 +4671,8 @@ def list_prompts( offset: int = 0, is_public: Optional[bool] = None, is_archived: Optional[bool] = False, - sort_field: ls_schemas.PromptsSortField = ls_schemas.PromptsSortField.updated_at, + sort_field: ls_schemas.PromptsSortField = + ls_schemas.PromptsSortField.updated_at, sort_direction: Literal["desc", "asc"] = "desc", query: Optional[str] = None, ) -> ls_schemas.ListPromptsResponse: @@ -4684,7 +4685,8 @@ def list_prompts( is_archived (Optional[bool]): Filter prompts by if they are archived. sort_field (ls_schemas.PromptsSortField): The field to sort by. Defaults to "updated_at". - sort_direction (Literal["desc", "asc"]): The order to sort by. Defaults to "desc". + sort_direction (Literal["desc", "asc"]): The order to sort by. + Defaults to "desc". query (Optional[str]): Filter prompts by a search query. Returns: From 19e663e168dea75efc0960ea5511f9d940db8342 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 11:07:18 -0700 Subject: [PATCH 224/373] feat: methods to convert prompt to openai and anthropic formats --- python/langsmith/client.py | 58 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index d272d4c3c..c11cbc980 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4671,8 +4671,7 @@ def list_prompts( offset: int = 0, is_public: Optional[bool] = None, is_archived: Optional[bool] = False, - sort_field: ls_schemas.PromptsSortField = - ls_schemas.PromptsSortField.updated_at, + sort_field: ls_schemas.PromptsSortField = ls_schemas.PromptsSortField.updated_at, sort_direction: Literal["desc", "asc"] = "desc", query: Optional[str] = None, ) -> ls_schemas.ListPromptsResponse: @@ -4928,6 +4927,61 @@ def push_prompt( f"?organizationId={settings['id']}" ) + def convert_to_openai_format( + self, messages: Any, stop: Optional[List[str]] = None, **kwargs: Any + ) -> dict: + """Convert a prompt to OpenAI format. + + Requires the `langchain_openai` package to be installed. + + Args: + messages (Any): The messages to convert. + stop (Optional[List[str]]): Stop sequences for the prompt. + **kwargs: Additional arguments for the conversion. + + Returns: + dict: The prompt in OpenAI format. + """ + from langchain_openai import ChatOpenAI + + openai = ChatOpenAI() + + try: + return openai._get_request_payload(messages, stop=stop, **kwargs) + except Exception as e: + print(e) + return None + + def convert_to_anthropic_format( + self, + messages: Any, + model_name: Optional[str] = "claude-2", + stop: Optional[List[str]] = None, + **kwargs: Any, + ) -> dict: + """Convert a prompt to Anthropic format. + + Requires the `langchain_anthropic` package to be installed. + + Args: + messages (Any): The messages to convert. + model_name (Optional[str]): The model name to use. Defaults to "claude-2". + stop (Optional[List[str]]): Stop sequences for the prompt. + **kwargs: Additional arguments for the conversion. + + Returns: + dict: The prompt in Anthropic format. + """ + from langchain_anthropic import ChatAnthropic + + anthropic = ChatAnthropic(model_name=model_name) + + try: + return anthropic._get_request_payload(messages, stop=stop, **kwargs) + except Exception as e: + print(e) + return None + def _tracing_thread_drain_queue( tracing_queue: Queue, limit: int = 100, block: bool = True From 3ee0beb93e2fd29a40dac456164582f293b02bc9 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 11:16:34 -0700 Subject: [PATCH 225/373] integration tests --- python/langsmith/schemas.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 8166923a4..8bb542ad6 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -777,9 +777,9 @@ class Prompt(BaseModel): """The name of the prompt.""" full_name: str """The full name of the prompt. (owner + repo_handle)""" - description: str | None + description: str = None """The description of the prompt.""" - readme: str | None + readme: str = None """The README of the prompt.""" id: str """The ID of the prompt.""" @@ -795,9 +795,9 @@ class Prompt(BaseModel): """Whether the prompt is archived.""" tags: List[str] """The tags associated with the prompt.""" - original_repo_id: str | None + original_repo_id: str = None """The ID of the original prompt, if forked.""" - upstream_repo_id: str | None + upstream_repo_id: str = None """The ID of the upstream prompt, if forked.""" num_likes: int """The number of likes.""" @@ -807,13 +807,13 @@ class Prompt(BaseModel): """The number of views.""" liked_by_auth_user: bool """Whether the prompt is liked by the authenticated user.""" - last_commit_hash: str | None + last_commit_hash: str = None """The hash of the last commit.""" num_commits: int """The number of commits.""" - original_repo_full_name: str | None + original_repo_full_name: str = None """The full name of the original prompt, if forked.""" - upstream_repo_full_name: str | None + upstream_repo_full_name: str = None """The full name of the upstream prompt, if forked.""" From b175603f8ca51b9bbe1dec40e1e6040c6aab3900 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 11:19:02 -0700 Subject: [PATCH 226/373] format --- python/langsmith/client.py | 11 ++++------- python/langsmith/wrappers/_openai.py | 12 ++++++------ 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index d272d4c3c..469b13634 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4671,8 +4671,7 @@ def list_prompts( offset: int = 0, is_public: Optional[bool] = None, is_archived: Optional[bool] = False, - sort_field: ls_schemas.PromptsSortField = - ls_schemas.PromptsSortField.updated_at, + sort_field: ls_schemas.PromptsSortField = "updated_at", sort_direction: Literal["desc", "asc"] = "desc", query: Optional[str] = None, ) -> ls_schemas.ListPromptsResponse: @@ -4696,11 +4695,9 @@ def list_prompts( params = { "limit": limit, "offset": offset, - "is_public": "true" - if is_public - else "false" - if is_public is not None - else None, + "is_public": ( + "true" if is_public else "false" if is_public is not None else None + ), "is_archived": "true" if is_archived else "false", "sort_field": sort_field, "sort_direction": sort_direction, diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 45aec0932..5b6798e8d 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,13 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"]["name"] += ( - chunk.function.name - ) + message["tool_calls"][index]["function"][ + "name" + ] += chunk.function.name if chunk.function.arguments: - message["tool_calls"][index]["function"]["arguments"] += ( - chunk.function.arguments - ) + message["tool_calls"][index]["function"][ + "arguments" + ] += chunk.function.arguments return { "index": choices[0].index, "finish_reason": next( From fd9aaa2e061ff701e85d1e5f6234ae3a88a66824 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 11:26:13 -0700 Subject: [PATCH 227/373] schema --- python/langsmith/schemas.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 8bb542ad6..38343320f 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -771,15 +771,11 @@ class PromptManifest(BaseModel): class Prompt(BaseModel): """Represents a Prompt with metadata.""" - owner: str - """The handle of the owner of the prompt.""" repo_handle: str """The name of the prompt.""" - full_name: str - """The full name of the prompt. (owner + repo_handle)""" - description: str = None + description: Optional[str] = None """The description of the prompt.""" - readme: str = None + readme: Optional[str] = None """The README of the prompt.""" id: str """The ID of the prompt.""" @@ -795,10 +791,14 @@ class Prompt(BaseModel): """Whether the prompt is archived.""" tags: List[str] """The tags associated with the prompt.""" - original_repo_id: str = None + original_repo_id: Optional[str] = None """The ID of the original prompt, if forked.""" upstream_repo_id: str = None """The ID of the upstream prompt, if forked.""" + owner: Optional[str] + """The handle of the owner of the prompt.""" + full_name: str + """The full name of the prompt. (owner + repo_handle)""" num_likes: int """The number of likes.""" num_downloads: int @@ -807,7 +807,7 @@ class Prompt(BaseModel): """The number of views.""" liked_by_auth_user: bool """Whether the prompt is liked by the authenticated user.""" - last_commit_hash: str = None + last_commit_hash: Optional[str] = None """The hash of the last commit.""" num_commits: int """The number of commits.""" From 5df0391527d77646c377ca005e6c013bcd708f0a Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 11:32:13 -0700 Subject: [PATCH 228/373] more fixes --- python/langsmith/client.py | 2 +- python/langsmith/schemas.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 469b13634..88e189540 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4671,7 +4671,7 @@ def list_prompts( offset: int = 0, is_public: Optional[bool] = None, is_archived: Optional[bool] = False, - sort_field: ls_schemas.PromptsSortField = "updated_at", + sort_field: ls_schemas.PromptSortField = ls_schemas.PromptSortField.updated_at, sort_direction: Literal["desc", "asc"] = "desc", query: Optional[str] = None, ) -> ls_schemas.ListPromptsResponse: diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 38343320f..c37b9d14c 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -793,7 +793,7 @@ class Prompt(BaseModel): """The tags associated with the prompt.""" original_repo_id: Optional[str] = None """The ID of the original prompt, if forked.""" - upstream_repo_id: str = None + upstream_repo_id: Optional[str] = None """The ID of the upstream prompt, if forked.""" owner: Optional[str] """The handle of the owner of the prompt.""" @@ -811,9 +811,9 @@ class Prompt(BaseModel): """The hash of the last commit.""" num_commits: int """The number of commits.""" - original_repo_full_name: str = None + original_repo_full_name: Optional[str] = None """The full name of the original prompt, if forked.""" - upstream_repo_full_name: str = None + upstream_repo_full_name: Optional[str] = None """The full name of the upstream prompt, if forked.""" @@ -826,7 +826,7 @@ class ListPromptsResponse(BaseModel): """The total number of prompts.""" -class PromptsSortField(str, Enum): +class PromptSortField(str, Enum): """Enum for sorting fields for prompts.""" num_downloads = "num_downloads" From 89feff0962232eca0c88e991d0a48d7fa95e28c5 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 11:34:50 -0700 Subject: [PATCH 229/373] fix more --- python/langsmith/client.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 88e189540..2ac9a31fe 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4708,7 +4708,7 @@ def list_prompts( response = self.request_with_retries("GET", "/repos", params=params) return ls_schemas.ListPromptsResponse(**response.json()) - def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: + def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt | None: """Get a specific prompt by its identifier. Args: @@ -4729,6 +4729,7 @@ def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt: if response.status_code == 200: return ls_schemas.Prompt(**response.json()["repo"]) response.raise_for_status() + return None def update_prompt( self, @@ -4809,7 +4810,7 @@ def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManif self.info.version, "0.5.23" ) - if not use_optimization and (commit_hash is None or commit_hash == "latest"): + if not use_optimization and commit_hash == "latest": commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") if commit_hash is None: raise ValueError("No commits found") From fd61a4c5755f6d86b14b3538000f5655ebd92394 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 11:40:30 -0700 Subject: [PATCH 230/373] working through CI --- python/langsmith/client.py | 4 ++-- python/tests/prompts/test_prompts.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 2ac9a31fe..f99a640e5 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4708,7 +4708,7 @@ def list_prompts( response = self.request_with_retries("GET", "/repos", params=params) return ls_schemas.ListPromptsResponse(**response.json()) - def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt | None: + def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: """Get a specific prompt by its identifier. Args: @@ -4716,7 +4716,7 @@ def get_prompt(self, prompt_identifier: str) -> ls_schemas.Prompt | None: The identifier should be in the format "prompt_name" or "owner/prompt_name". Returns: - ls_schemas.Prompt: The prompt object. + Optional[ls_schemas.Prompt]: The prompt object. Raises: requests.exceptions.HTTPError: If the prompt is not found or diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index ec7f76da1..e0d77c30d 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -75,6 +75,7 @@ def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTe assert isinstance(updated_data, dict) updated_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(updated_prompt, Prompt) assert updated_prompt.description == "Updated description" assert updated_prompt.is_public assert set(updated_prompt.tags) == set(["test", "update"]) @@ -140,10 +141,12 @@ def test_like_unlike_prompt( langsmith_client.like_prompt(prompt_name) prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, Prompt) assert prompt.num_likes == 1 langsmith_client.unlike_prompt(prompt_name) prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, Prompt) assert prompt.num_likes == 0 langsmith_client.delete_prompt(prompt_name) From 78c688c7a6ff220c06e1a6a41665a6a5464c2763 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 11:44:44 -0700 Subject: [PATCH 231/373] ci --- python/langsmith/client.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index f99a640e5..6c0df3bab 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4638,7 +4638,7 @@ def _like_or_unlike_prompt( "POST", f"/likes/{owner}/{prompt_name}", json={"like": like} ) response.raise_for_status() - return response.json + return response.json() def like_prompt(self, prompt_identifier: str) -> Dict[str, int]: """Check if a prompt exists. @@ -4811,9 +4811,11 @@ def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManif ) if not use_optimization and commit_hash == "latest": - commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") - if commit_hash is None: + latest_commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}") + if latest_commit_hash is None: raise ValueError("No commits found") + else: + commit_hash = latest_commit_hash response = self.request_with_retries( "GET", f"/commits/{owner}/{prompt_name}/{commit_hash}" From 5dda8950d8c9dd849b5958fe634b2a07a72fb084 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 16:12:37 -0700 Subject: [PATCH 232/373] update update --- python/langsmith/client.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 6c0df3bab..16ab7a805 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4736,16 +4736,20 @@ def update_prompt( prompt_identifier: str, *, description: Optional[str] = None, - is_public: Optional[bool] = None, + readme: Optional[str] = None, tags: Optional[List[str]] = None, + is_public: Optional[bool] = None, + is_archived: Optional[bool] = None, ) -> Dict[str, Any]: """Update a prompt's metadata. Args: prompt_identifier (str): The identifier of the prompt to update. description (Optional[str]): New description for the prompt. - is_public (Optional[bool]): New public status for the prompt. + readme (Optional[str]): New readme for the prompt. tags (Optional[List[str]]): New list of tags for the prompt. + is_public (Optional[bool]): New public status for the prompt. + is_archived (Optional[bool]): New archived status for the prompt. Returns: Dict[str, Any]: The updated prompt data as returned by the server. @@ -4754,11 +4758,19 @@ def update_prompt( ValueError: If the prompt_identifier is empty. HTTPError: If the server request fails. """ + if not prompt_identifier: + raise ValueError("The prompt_identifier cannot be empty.") + json: Dict[str, Union[str, bool, List[str]]] = {} + if description is not None: json["description"] = description + if readme is not None: + json["readme"] = readme if is_public is not None: json["is_public"] = is_public + if is_archived is not None: + json["is_archived"] = is_archived if tags is not None: json["tags"] = tags From bf115e1196bbd73efb5321035d628d642186b62d Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 16:18:19 -0700 Subject: [PATCH 233/373] allow passing of readme in push --- python/langsmith/client.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 16ab7a805..327d974ac 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4871,7 +4871,9 @@ def push_prompt( manifest_json: Any, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, - description: str = "", + description: Optional[str] = "", + readme: Optional[str] = "", + tags: Optional[List[str]] = [], ) -> str: """Push a prompt manifest to the LangSmith API. @@ -4881,7 +4883,12 @@ def push_prompt( parent_commit_hash (Optional[str]): The parent commit hash. Defaults to "latest". is_public (bool): Whether the prompt should be public. Defaults to False. - description (str): A description of the prompt. Defaults to an empty string. + description (Optional[str]): A description of the prompt. + Defaults to an empty string. + readme (Optional[str]): A readme for the prompt. + Defaults to an empty string. + tags (Optional[List[str]]): A list of tags for the prompt. + Defaults to an empty list. Returns: str: The URL of the pushed prompt. @@ -4921,6 +4928,8 @@ def push_prompt( "repo_handle": prompt_name, "is_public": is_public, "description": description, + "readme": readme, + "tags": tags, }, ) From 4ae94e18874cd7f1f2dd4f9b1f5f90ab92ef6c81 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 17:34:41 -0700 Subject: [PATCH 234/373] add more test cases --- python/langsmith/client.py | 235 +++++++++++++++++++-------- python/tests/prompts/test_prompts.py | 181 +++++++++++++++++++-- 2 files changed, 334 insertions(+), 82 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 327d974ac..b6fa40b54 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4560,7 +4560,7 @@ def _evaluate_strings( **kwargs, ) - def get_settings(self) -> dict: + def _get_settings(self) -> dict: """Get the settings for the current tenant. Returns: @@ -4569,7 +4569,7 @@ def get_settings(self) -> dict: response = self.request_with_retries("GET", "/settings") return response.json() - def current_tenant_is_owner(self, owner: str) -> bool: + def _current_tenant_is_owner(self, owner: str) -> bool: """Check if the current workspace has the same handle as owner. Args: @@ -4578,24 +4578,9 @@ def current_tenant_is_owner(self, owner: str) -> bool: Returns: bool: True if the current tenant is the owner, False otherwise. """ - settings = self.get_settings() + settings = self._get_settings() return owner == "-" or settings["tenant_handle"] == owner - def prompt_exists(self, prompt_identifier: str) -> bool: - """Check if a prompt exists. - - Args: - prompt_identifier (str): The identifier of the prompt. - - Returns: - bool: True if the prompt exists, False otherwise. - """ - try: - self.get_prompt(prompt_identifier) - return True - except requests.exceptions.HTTPError as e: - return e.response.status_code != 404 - def _get_latest_commit_hash( self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0 ) -> Optional[str]: @@ -4640,6 +4625,21 @@ def _like_or_unlike_prompt( response.raise_for_status() return response.json() + def _get_prompt_url(self, prompt_identifier: str) -> str: + """Get a URL for a prompt.""" + owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier( + prompt_identifier + ) + + if self._current_tenant_is_owner(owner): + return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}" + + settings = self._get_settings() + return ( + f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}" + f"?organizationId={settings['id']}" + ) + def like_prompt(self, prompt_identifier: str) -> Dict[str, int]: """Check if a prompt exists. @@ -4664,6 +4664,21 @@ def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: """ return self._like_or_unlike_prompt(prompt_identifier, like=False) + def prompt_exists(self, prompt_identifier: str) -> bool: + """Check if a prompt exists. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + bool: True if the prompt exists, False otherwise. + """ + try: + self.get_prompt(prompt_identifier) + return True + except requests.exceptions.HTTPError as e: + return e.response.status_code != 404 + def list_prompts( self, *, @@ -4731,6 +4746,103 @@ def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: response.raise_for_status() return None + def create_prompt( + self, + prompt_identifier: str, + *, + description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[List[str]] = None, + is_public: bool = False, + ) -> ls_schemas.Prompt: + """Create a new prompt. + + Does not attach prompt manifest, just creates an empty prompt. + + Args: + prompt_name (str): The name of the prompt. + description (Optional[str]): A description of the prompt. + readme (Optional[str]): A readme for the prompt. + tags (Optional[List[str]]): A list of tags for the prompt. + is_public (bool): Whether the prompt should be public. Defaults to False. + + Returns: + ls_schemas.Prompt: The created prompt object. + + Raises: + ValueError: If the current tenant is not the owner. + HTTPError: If the server request fails. + """ + settings = self._get_settings() + if is_public and not settings.get("tenant_handle"): + raise ValueError( + "Cannot create a public prompt without first\n" + "creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at:\n" + "https://smith.langchain.com/prompts" + ) + + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + if not self._current_tenant_is_owner(owner=owner): + raise ValueError( + f"Cannot create prompt for another tenant.\n" + f"Current tenant: {self._get_settings()['tenant_handle']},\n" + f"Requested tenant: {owner}" + ) + + json: Dict[str, Union[str, bool, List[str]]] = { + "repo_handle": prompt_name, + "description": description or "", + "readme": readme or "", + "tags": tags or [], + "is_public": is_public, + } + + response = self.request_with_retries("POST", "/repos", json=json) + response.raise_for_status() + return ls_schemas.Prompt(**response.json()["repo"]) + + def create_commit( + self, + prompt_identifier: str, + *, + manifest_json: Any, + parent_commit_hash: Optional[str] = "latest", + ) -> str: + """Create a commit for a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + manifest_json (Any): The manifest JSON to commit. + parent_commit_hash (Optional[str]): The hash of the parent commit. + Defaults to "latest". + + Returns: + str: The url of the prompt commit. + + Raises: + HTTPError: If the server request fails. + """ + from langchain_core.load.dump import dumps + + manifest_json = dumps(manifest_json) + manifest_dict = json.loads(manifest_json) + + owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) + prompt_owner_and_name = f"{owner}/{prompt_name}" + + if parent_commit_hash == "latest": + parent_commit_hash = self._get_latest_commit_hash(prompt_owner_and_name) + + request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} + response = self.request_with_retries( + "POST", f"/commits/{prompt_owner_and_name}", json=request_dict + ) + + commit_hash = response.json()["commit"]["commit_hash"] + + return self._get_prompt_url(f"{prompt_owner_and_name}:{commit_hash}") + def update_prompt( self, prompt_identifier: str, @@ -4758,8 +4870,14 @@ def update_prompt( ValueError: If the prompt_identifier is empty. HTTPError: If the server request fails. """ - if not prompt_identifier: - raise ValueError("The prompt_identifier cannot be empty.") + settings = self._get_settings() + if is_public and not settings.get("tenant_handle"): + raise ValueError( + "Cannot create a public prompt without first\n" + "creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at:\n" + "https://smith.langchain.com/prompts" + ) json: Dict[str, Union[str, bool, List[str]]] = {} @@ -4794,10 +4912,10 @@ def delete_prompt(self, prompt_identifier: str) -> bool: ValueError: If the current tenant is not the owner of the prompt. """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) - if not self.current_tenant_is_owner(owner): + if not self._current_tenant_is_owner(owner): raise ValueError( f"Cannot delete prompt for another tenant.\n" - f"Current tenant: {self.get_settings()['tenant_handle']},\n" + f"Current tenant: {self._get_settings()['tenant_handle']},\n" f"Requested tenant: {owner}" ) response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") @@ -4868,14 +4986,14 @@ def pull_prompt(self, prompt_identifier: str) -> Any: def push_prompt( self, prompt_identifier: str, - manifest_json: Any, + manifest_json: Optional[Any] = None, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: Optional[str] = "", readme: Optional[str] = "", tags: Optional[List[str]] = [], ) -> str: - """Push a prompt manifest to the LangSmith API. + """Push a prompt to the LangSmith API. Args: prompt_identifier (str): The identifier of the prompt. @@ -4897,57 +5015,34 @@ def push_prompt( ValueError: If a public prompt is attempted without a tenant handle or if the current tenant is not the owner. """ - from langchain_core.load.dump import dumps - - manifest_json = dumps(manifest_json) - settings = self.get_settings() - - if is_public and not settings.get("tenant_handle"): - raise ValueError( - "Cannot create a public prompt without first\n" - "creating a LangChain Hub handle. " - "You can add a handle by creating a public prompt at:\n" - "https://smith.langchain.com/prompts" - ) - - owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) - prompt_full_name = f"{owner}/{prompt_name}" - - if not self.current_tenant_is_owner(owner): - raise ValueError( - "Cannot create prompt for another tenant." - f"Current tenant: {settings['tenant_handle'] or 'no handle'}" - f", Requested tenant: {owner}" + # Create or update prompt metadata + if self.prompt_exists(prompt_identifier): + self.update_prompt( + prompt_identifier, + description=description, + readme=readme, + tags=tags, + is_public=is_public, ) - - if not self.prompt_exists(prompt_full_name): - self.request_with_retries( - "POST", - "/repos/", - json={ - "repo_handle": prompt_name, - "is_public": is_public, - "description": description, - "readme": readme, - "tags": tags, - }, + else: + self.create_prompt( + prompt_identifier, + is_public=is_public, + description=description, + readme=readme, + tags=tags, ) - manifest_dict = json.loads(manifest_json) - if parent_commit_hash == "latest": - parent_commit_hash = self._get_latest_commit_hash(prompt_full_name) + if manifest_json is None: + return self._get_prompt_url(prompt_identifier=prompt_identifier) - request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} - response = self.request_with_retries( - "POST", f"/commits/{prompt_full_name}", json=request_dict - ) - - commit_hash = response.json()["commit"]["commit_hash"] - short_hash = commit_hash[:8] - return ( - f"{self._host_url}/prompts/{prompt_name}/{short_hash}" - f"?organizationId={settings['id']}" + # Create a commit + url = self.create_commit( + prompt_identifier=prompt_identifier, + manifest_json=manifest_json, + parent_commit_hash=parent_commit_hash, ) + return url def _tracing_thread_drain_queue( diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index e0d77c30d..c657ff54a 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -1,10 +1,10 @@ from uuid import uuid4 import pytest -from langchain_core.prompts import ChatPromptTemplate +from langchain_core.prompts import ChatPromptTemplate, PromptTemplate +import langsmith.schemas as ls_schemas from langsmith.client import Client -from langsmith.schemas import ListPromptsResponse, Prompt, PromptManifest @pytest.fixture @@ -27,16 +27,21 @@ def prompt_template_2() -> ChatPromptTemplate: ) +@pytest.fixture +def prompt_template_3() -> PromptTemplate: + return PromptTemplate.from_template("Summarize the following text: {text}") + + def test_current_tenant_is_owner(langsmith_client: Client): - settings = langsmith_client.get_settings() - assert langsmith_client.current_tenant_is_owner(settings["tenant_handle"]) - assert langsmith_client.current_tenant_is_owner("-") - assert not langsmith_client.current_tenant_is_owner("non_existent_owner") + settings = langsmith_client._get_settings() + assert langsmith_client._current_tenant_is_owner(settings["tenant_handle"]) + assert langsmith_client._current_tenant_is_owner("-") + assert not langsmith_client._current_tenant_is_owner("non_existent_owner") def test_list_prompts(langsmith_client: Client): response = langsmith_client.list_prompts(limit=10, offset=0) - assert isinstance(response, ListPromptsResponse) + assert isinstance(response, ls_schemas.ListPromptsResponse) assert len(response.repos) <= 10 @@ -45,7 +50,7 @@ def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTempl langsmith_client.push_prompt(prompt_name, prompt_template_1) prompt = langsmith_client.get_prompt(prompt_name) - assert isinstance(prompt, Prompt) + assert isinstance(prompt, ls_schemas.Prompt) assert prompt.repo_handle == prompt_name langsmith_client.delete_prompt(prompt_name) @@ -75,7 +80,7 @@ def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTe assert isinstance(updated_data, dict) updated_prompt = langsmith_client.get_prompt(prompt_name) - assert isinstance(updated_prompt, Prompt) + assert isinstance(updated_prompt, ls_schemas.Prompt) assert updated_prompt.description == "Updated description" assert updated_prompt.is_public assert set(updated_prompt.tags) == set(["test", "update"]) @@ -99,7 +104,7 @@ def test_pull_prompt_manifest( langsmith_client.push_prompt(prompt_name, prompt_template_1) manifest = langsmith_client.pull_prompt_manifest(prompt_name) - assert isinstance(manifest, PromptManifest) + assert isinstance(manifest, ls_schemas.PromptManifest) assert manifest.repo == prompt_name langsmith_client.delete_prompt(prompt_name) @@ -141,12 +146,12 @@ def test_like_unlike_prompt( langsmith_client.like_prompt(prompt_name) prompt = langsmith_client.get_prompt(prompt_name) - assert isinstance(prompt, Prompt) + assert isinstance(prompt, ls_schemas.Prompt) assert prompt.num_likes == 1 langsmith_client.unlike_prompt(prompt_name) prompt = langsmith_client.get_prompt(prompt_name) - assert isinstance(prompt, Prompt) + assert isinstance(prompt, ls_schemas.Prompt) assert prompt.num_likes == 0 langsmith_client.delete_prompt(prompt_name) @@ -163,3 +168,155 @@ def test_get_latest_commit_hash( assert len(commit_hash) > 0 langsmith_client.delete_prompt(prompt_name) + + +def test_create_prompt(langsmith_client: Client): + prompt_name = f"test_create_prompt_{uuid4().hex[:8]}" + created_prompt = langsmith_client.create_prompt( + prompt_name, + description="Test description", + readme="Test readme", + tags=["test", "create"], + is_public=False, + ) + assert isinstance(created_prompt, ls_schemas.Prompt) + assert created_prompt.repo_handle == prompt_name + assert created_prompt.description == "Test description" + assert created_prompt.readme == "Test readme" + assert set(created_prompt.tags) == set(["test", "create"]) + assert not created_prompt.is_public + + langsmith_client.delete_prompt(prompt_name) + + +def test_create_commit( + langsmith_client: Client, + prompt_template_2: ChatPromptTemplate, + prompt_template_3: PromptTemplate, +): + prompt_name = f"test_create_commit_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_3) + commit_url = langsmith_client.create_commit( + prompt_name, manifest_json=prompt_template_2 + ) + assert isinstance(commit_url, str) + assert prompt_name in commit_url + + prompt = langsmith_client.get_prompt(prompt_name) + assert prompt.num_commits == 2 + + langsmith_client.delete_prompt(prompt_name) + + +def test_push_prompt_new(langsmith_client: Client, prompt_template_3: PromptTemplate): + prompt_name = f"test_push_new_{uuid4().hex[:8]}" + url = langsmith_client.push_prompt( + prompt_name, + prompt_template_3, + is_public=True, + description="New prompt", + tags=["new", "test"], + ) + + assert isinstance(url, str) + assert prompt_name in url + + prompt = langsmith_client.get_prompt(prompt_name) + assert prompt.is_public + assert prompt.description == "New prompt" + assert "new" in prompt.tags + assert "test" in prompt.tags + + langsmith_client.delete_prompt(prompt_name) + + +def test_push_prompt_update( + langsmith_client: Client, + prompt_template_1: ChatPromptTemplate, + prompt_template_3: PromptTemplate, +): + prompt_name = f"test_push_update_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + updated_url = langsmith_client.push_prompt( + prompt_name, + prompt_template_3, + description="Updated prompt", + tags=["updated", "test"], + ) + + assert isinstance(updated_url, str) + assert prompt_name in updated_url + + updated_prompt = langsmith_client.get_prompt(prompt_name) + assert updated_prompt.description == "Updated prompt" + assert "updated" in updated_prompt.tags + assert "test" in updated_prompt.tags + + langsmith_client.delete_prompt(prompt_name) + + +@pytest.mark.parametrize("is_public,expected_count", [(True, 1), (False, 1)]) +def test_list_prompts_filter( + langsmith_client: Client, + prompt_template_1: ChatPromptTemplate, + is_public: bool, + expected_count: int, +): + prompt_name = f"test_list_filter_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1, is_public=is_public) + + response = langsmith_client.list_prompts(is_public=is_public, query=prompt_name) + + assert response.total == expected_count + if expected_count > 0: + assert response.repos[0].repo_handle == prompt_name + + langsmith_client.delete_prompt(prompt_name) + + +def test_update_prompt_archive( + langsmith_client: Client, prompt_template_1: ChatPromptTemplate +): + prompt_name = f"test_archive_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, prompt_template_1) + + langsmith_client.update_prompt(prompt_name, is_archived=True) + archived_prompt = langsmith_client.get_prompt(prompt_name) + assert archived_prompt.is_archived + + langsmith_client.update_prompt(prompt_name, is_archived=False) + unarchived_prompt = langsmith_client.get_prompt(prompt_name) + assert not unarchived_prompt.is_archived + + langsmith_client.delete_prompt(prompt_name) + + +@pytest.mark.parametrize( + "sort_field,sort_direction", + [ + (ls_schemas.PromptSortField.updated_at, "desc"), + ], +) +def test_list_prompts_sorting( + langsmith_client: Client, + prompt_template_1: ChatPromptTemplate, + sort_field: ls_schemas.PromptSortField, + sort_direction: str, +): + prompt_names = [f"test_sort_{i}_{uuid4().hex[:8]}" for i in range(3)] + for name in prompt_names: + langsmith_client.push_prompt(name, prompt_template_1) + + response = langsmith_client.list_prompts( + sort_field=sort_field, sort_direction=sort_direction, limit=10 + ) + + assert len(response.repos) >= 3 + sorted_names = [ + repo.repo_handle for repo in response.repos if repo.repo_handle in prompt_names + ] + assert sorted_names == sorted(sorted_names, reverse=(sort_direction == "desc")) + + for name in prompt_names: + langsmith_client.delete_prompt(name) From 6ec38be2d5117c52d682b27facf409b1ceb35e4f Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 12 Jul 2024 17:41:41 -0700 Subject: [PATCH 235/373] fix tests --- python/tests/prompts/test_prompts.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index c657ff54a..e13bce680 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -1,3 +1,4 @@ +from typing import Literal from uuid import uuid4 import pytest @@ -203,6 +204,7 @@ def test_create_commit( assert prompt_name in commit_url prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) assert prompt.num_commits == 2 langsmith_client.delete_prompt(prompt_name) @@ -222,6 +224,7 @@ def test_push_prompt_new(langsmith_client: Client, prompt_template_3: PromptTemp assert prompt_name in url prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(prompt, ls_schemas.Prompt) assert prompt.is_public assert prompt.description == "New prompt" assert "new" in prompt.tags @@ -249,6 +252,7 @@ def test_push_prompt_update( assert prompt_name in updated_url updated_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(updated_prompt, ls_schemas.Prompt) assert updated_prompt.description == "Updated prompt" assert "updated" in updated_prompt.tags assert "test" in updated_prompt.tags @@ -283,19 +287,21 @@ def test_update_prompt_archive( langsmith_client.update_prompt(prompt_name, is_archived=True) archived_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(archived_prompt, ls_schemas.Prompt) assert archived_prompt.is_archived langsmith_client.update_prompt(prompt_name, is_archived=False) unarchived_prompt = langsmith_client.get_prompt(prompt_name) + assert isinstance(unarchived_prompt, ls_schemas.Prompt) assert not unarchived_prompt.is_archived langsmith_client.delete_prompt(prompt_name) @pytest.mark.parametrize( - "sort_field,sort_direction", + "sort_field, sort_direction", [ - (ls_schemas.PromptSortField.updated_at, "desc"), + (ls_schemas.PromptSortField.updated_at, Literal["desc"]), ], ) def test_list_prompts_sorting( From c330b89a397472c94a6825d653c4890968a9ea99 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Sun, 14 Jul 2024 09:11:47 -0700 Subject: [PATCH 236/373] type hint --- python/tests/prompts/test_prompts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index e13bce680..ac6801767 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -301,14 +301,14 @@ def test_update_prompt_archive( @pytest.mark.parametrize( "sort_field, sort_direction", [ - (ls_schemas.PromptSortField.updated_at, Literal["desc"]), + (ls_schemas.PromptSortField.updated_at, "desc"), ], ) def test_list_prompts_sorting( langsmith_client: Client, prompt_template_1: ChatPromptTemplate, sort_field: ls_schemas.PromptSortField, - sort_direction: str, + sort_direction: Literal["asc", "desc"], ): prompt_names = [f"test_sort_{i}_{uuid4().hex[:8]}" for i in range(3)] for name in prompt_names: From 03ccfe01e4b97ccc991c6c9c95f25cc2258f70b7 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Sun, 14 Jul 2024 12:02:29 -0700 Subject: [PATCH 237/373] push prompt --- python/langsmith/client.py | 113 ++++++++++++++++++++++-------------- python/langsmith/schemas.py | 2 +- 2 files changed, 70 insertions(+), 45 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index b6fa40b54..1c32f7661 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4626,12 +4626,20 @@ def _like_or_unlike_prompt( return response.json() def _get_prompt_url(self, prompt_identifier: str) -> str: - """Get a URL for a prompt.""" + """Get a URL for a prompt. + + Args: + prompt_identifier (str): The identifier of the prompt. + + Returns: + str: The URL for the prompt. + + """ owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier( prompt_identifier ) - if self._current_tenant_is_owner(owner): + if not self._current_tenant_is_owner(owner): return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}" settings = self._get_settings() @@ -4640,20 +4648,23 @@ def _get_prompt_url(self, prompt_identifier: str) -> str: f"?organizationId={settings['id']}" ) - def like_prompt(self, prompt_identifier: str) -> Dict[str, int]: + def _prompt_exists(self, prompt_identifier: str) -> bool: """Check if a prompt exists. Args: prompt_identifier (str): The identifier of the prompt. Returns: - A dictionary with the key 'likes' and the count of likes as the value. - + bool: True if the prompt exists, False otherwise. """ - return self._like_or_unlike_prompt(prompt_identifier, like=True) + try: + self.get_prompt(prompt_identifier) + return True + except requests.exceptions.HTTPError as e: + return e.response.status_code != 404 - def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: - """Unlike a prompt. + def like_prompt(self, prompt_identifier: str) -> Dict[str, int]: + """Check if a prompt exists. Args: prompt_identifier (str): The identifier of the prompt. @@ -4662,22 +4673,19 @@ def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: A dictionary with the key 'likes' and the count of likes as the value. """ - return self._like_or_unlike_prompt(prompt_identifier, like=False) + return self._like_or_unlike_prompt(prompt_identifier, like=True) - def prompt_exists(self, prompt_identifier: str) -> bool: - """Check if a prompt exists. + def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: + """Unlike a prompt. Args: prompt_identifier (str): The identifier of the prompt. Returns: - bool: True if the prompt exists, False otherwise. + A dictionary with the key 'likes' and the count of likes as the value. + """ - try: - self.get_prompt(prompt_identifier) - return True - except requests.exceptions.HTTPError as e: - return e.response.status_code != 404 + return self._like_or_unlike_prompt(prompt_identifier, like=False) def list_prompts( self, @@ -4757,7 +4765,7 @@ def create_prompt( ) -> ls_schemas.Prompt: """Create a new prompt. - Does not attach prompt manifest, just creates an empty prompt. + Does not attach prompt object, just creates an empty prompt. Args: prompt_name (str): The name of the prompt. @@ -4805,15 +4813,15 @@ def create_prompt( def create_commit( self, prompt_identifier: str, + object: dict, *, - manifest_json: Any, parent_commit_hash: Optional[str] = "latest", ) -> str: - """Create a commit for a prompt. + """Create a commit for an existing prompt. Args: prompt_identifier (str): The identifier of the prompt. - manifest_json (Any): The manifest JSON to commit. + object (dict): The LangChain object to commit. parent_commit_hash (Optional[str]): The hash of the parent commit. Defaults to "latest". @@ -4822,11 +4830,23 @@ def create_commit( Raises: HTTPError: If the server request fails. + ValueError: If the prompt does not exist. """ - from langchain_core.load.dump import dumps + if not self._prompt_exists(prompt_identifier): + raise ls_utils.LangSmithNotFoundError( + "Prompt does not exist, you must create it first." + ) + + try: + from langchain_core.load.dump import dumps + except ImportError: + raise ImportError( + "The client.create_commit function requires the langchain_core" + "package to run.\nInstall with pip install langchain_core" + ) - manifest_json = dumps(manifest_json) - manifest_dict = json.loads(manifest_json) + object = dumps(object) + manifest_dict = json.loads(object) owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) prompt_owner_and_name = f"{owner}/{prompt_name}" @@ -4855,6 +4875,8 @@ def update_prompt( ) -> Dict[str, Any]: """Update a prompt's metadata. + To update the content of a prompt, use push_prompt or create_commit instead. + Args: prompt_identifier (str): The identifier of the prompt to update. description (Optional[str]): New description for the prompt. @@ -4921,14 +4943,14 @@ def delete_prompt(self, prompt_identifier: str) -> bool: response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") return response.status_code == 204 - def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManifest: - """Pull a prompt manifest from the LangSmith API. + def pull_prompt_object(self, prompt_identifier: str) -> ls_schemas.PromptObject: + """Pull a prompt object from the LangSmith API. Args: prompt_identifier (str): The identifier of the prompt. Returns: - ls_schemas.PromptManifest: The prompt manifest. + ls_schemas.PromptObject: The prompt object. Raises: ValueError: If no commits are found for the prompt. @@ -4950,14 +4972,14 @@ def pull_prompt_manifest(self, prompt_identifier: str) -> ls_schemas.PromptManif response = self.request_with_retries( "GET", f"/commits/{owner}/{prompt_name}/{commit_hash}" ) - return ls_schemas.PromptManifest( + return ls_schemas.PromptObject( **{"owner": owner, "repo": prompt_name, **response.json()} ) def pull_prompt(self, prompt_identifier: str) -> Any: """Pull a prompt and return it as a LangChain PromptTemplate. - This method requires `langchain_core` to convert the prompt manifest. + This method requires `langchain_core`. Args: prompt_identifier (str): The identifier of the prompt. @@ -4968,16 +4990,16 @@ def pull_prompt(self, prompt_identifier: str) -> Any: from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate - prompt_manifest = self.pull_prompt_manifest(prompt_identifier) - prompt = loads(json.dumps(prompt_manifest.manifest)) + prompt_object = self.pull_prompt_object(prompt_identifier) + prompt = loads(json.dumps(prompt_object.manifest)) if isinstance(prompt, BasePromptTemplate): if prompt.metadata is None: prompt.metadata = {} prompt.metadata.update( { - "lc_hub_owner": prompt_manifest.owner, - "lc_hub_repo": prompt_manifest.repo, - "lc_hub_commit_hash": prompt_manifest.commit_hash, + "lc_hub_owner": prompt_object.owner, + "lc_hub_repo": prompt_object.repo, + "lc_hub_commit_hash": prompt_object.commit_hash, } ) @@ -4986,7 +5008,8 @@ def pull_prompt(self, prompt_identifier: str) -> Any: def push_prompt( self, prompt_identifier: str, - manifest_json: Optional[Any] = None, + *, + object: Optional[dict] = None, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: Optional[str] = "", @@ -4995,9 +5018,14 @@ def push_prompt( ) -> str: """Push a prompt to the LangSmith API. + If the prompt does not exist, it will be created. + If the prompt exists, it will be updated. + + Can be used to update prompt metadata or prompt content. + Args: prompt_identifier (str): The identifier of the prompt. - manifest_json (Any): The manifest to push. + object (Optional[dict]): The LangChain object to push. parent_commit_hash (Optional[str]): The parent commit hash. Defaults to "latest". is_public (bool): Whether the prompt should be public. Defaults to False. @@ -5009,14 +5037,11 @@ def push_prompt( Defaults to an empty list. Returns: - str: The URL of the pushed prompt. + str: The URL of the prompt. - Raises: - ValueError: If a public prompt is attempted without a tenant handle or - if the current tenant is not the owner. """ # Create or update prompt metadata - if self.prompt_exists(prompt_identifier): + if self._prompt_exists(prompt_identifier): self.update_prompt( prompt_identifier, description=description, @@ -5033,13 +5058,13 @@ def push_prompt( tags=tags, ) - if manifest_json is None: + if object is None: return self._get_prompt_url(prompt_identifier=prompt_identifier) - # Create a commit + # Create a commit with the new manifest url = self.create_commit( prompt_identifier=prompt_identifier, - manifest_json=manifest_json, + object=object, parent_commit_hash=parent_commit_hash, ) return url diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index c37b9d14c..6b9894a7f 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -746,7 +746,7 @@ def metadata(self) -> dict[str, Any]: return self.extra["metadata"] -class PromptManifest(BaseModel): +class PromptObject(BaseModel): """Represents a Prompt with a manifest. Attributes: From a99c17ada7d06108b63a858f861803cb55794f11 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Sun, 14 Jul 2024 12:31:59 -0700 Subject: [PATCH 238/373] pull prompt --- python/langsmith/client.py | 44 ++++++---- python/tests/prompts/test_prompts.py | 124 +++++++++++++++++---------- 2 files changed, 104 insertions(+), 64 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 1c32f7661..43d6e3cd7 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4581,6 +4581,15 @@ def _current_tenant_is_owner(self, owner: str) -> bool: settings = self._get_settings() return owner == "-" or settings["tenant_handle"] == owner + def _owner_conflict_error( + self, action: str, owner: str + ) -> ls_utils.LangSmithUserError: + return ls_utils.LangSmithUserError( + f"Cannot {action} for another tenant.\n" + f"Current tenant: {self._get_settings()['tenant_handle']},\n" + f"Requested tenant: {owner}" + ) + def _get_latest_commit_hash( self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0 ) -> Optional[str]: @@ -4783,7 +4792,7 @@ def create_prompt( """ settings = self._get_settings() if is_public and not settings.get("tenant_handle"): - raise ValueError( + raise ls_utils.LangSmithUserError( "Cannot create a public prompt without first\n" "creating a LangChain Hub handle. " "You can add a handle by creating a public prompt at:\n" @@ -4792,11 +4801,7 @@ def create_prompt( owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) if not self._current_tenant_is_owner(owner=owner): - raise ValueError( - f"Cannot create prompt for another tenant.\n" - f"Current tenant: {self._get_settings()['tenant_handle']},\n" - f"Requested tenant: {owner}" - ) + raise self._owner_conflict_error("create a prompt", owner) json: Dict[str, Union[str, bool, List[str]]] = { "repo_handle": prompt_name, @@ -4842,7 +4847,7 @@ def create_commit( except ImportError: raise ImportError( "The client.create_commit function requires the langchain_core" - "package to run.\nInstall with pip install langchain_core" + "package to run.\nInstall with `pip install langchain_core`" ) object = dumps(object) @@ -4935,11 +4940,8 @@ def delete_prompt(self, prompt_identifier: str) -> bool: """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) if not self._current_tenant_is_owner(owner): - raise ValueError( - f"Cannot delete prompt for another tenant.\n" - f"Current tenant: {self._get_settings()['tenant_handle']},\n" - f"Requested tenant: {owner}" - ) + raise self._owner_conflict_error("delete a prompt", owner) + response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") return response.status_code == 204 @@ -4987,8 +4989,14 @@ def pull_prompt(self, prompt_identifier: str) -> Any: Returns: Any: The prompt object in the specified format. """ - from langchain_core.load.load import loads - from langchain_core.prompts import BasePromptTemplate + try: + from langchain_core.load.load import loads + from langchain_core.prompts import BasePromptTemplate + except ImportError: + raise ImportError( + "The client.pull_prompt function requires the langchain_core" + "package to run.\nInstall with `pip install langchain_core`" + ) prompt_object = self.pull_prompt_object(prompt_identifier) prompt = loads(json.dumps(prompt_object.manifest)) @@ -5018,11 +5026,11 @@ def push_prompt( ) -> str: """Push a prompt to the LangSmith API. + Can be used to update prompt metadata or prompt content. + If the prompt does not exist, it will be created. If the prompt exists, it will be updated. - Can be used to update prompt metadata or prompt content. - Args: prompt_identifier (str): The identifier of the prompt. object (Optional[dict]): The LangChain object to push. @@ -5063,8 +5071,8 @@ def push_prompt( # Create a commit with the new manifest url = self.create_commit( - prompt_identifier=prompt_identifier, - object=object, + prompt_identifier, + object, parent_commit_hash=parent_commit_hash, ) return url diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index ac6801767..e235e3d6e 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -5,6 +5,7 @@ from langchain_core.prompts import ChatPromptTemplate, PromptTemplate import langsmith.schemas as ls_schemas +import langsmith.utils as ls_utils from langsmith.client import Client @@ -48,7 +49,7 @@ def test_list_prompts(langsmith_client: Client): def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) prompt = langsmith_client.get_prompt(prompt_name) assert isinstance(prompt, ls_schemas.Prompt) @@ -59,18 +60,18 @@ def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTempl def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): non_existent_prompt = f"non_existent_{uuid4().hex[:8]}" - assert not langsmith_client.prompt_exists(non_existent_prompt) + assert not langsmith_client._prompt_exists(non_existent_prompt) existent_prompt = f"existent_{uuid4().hex[:8]}" - langsmith_client.push_prompt(existent_prompt, prompt_template_2) - assert langsmith_client.prompt_exists(existent_prompt) + langsmith_client.push_prompt(existent_prompt, object=prompt_template_2) + assert langsmith_client._prompt_exists(existent_prompt) langsmith_client.delete_prompt(existent_prompt) def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) updated_data = langsmith_client.update_prompt( prompt_name, @@ -91,21 +92,21 @@ def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTe def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) - assert langsmith_client.prompt_exists(prompt_name) + assert langsmith_client._prompt_exists(prompt_name) langsmith_client.delete_prompt(prompt_name) - assert not langsmith_client.prompt_exists(prompt_name) + assert not langsmith_client._prompt_exists(prompt_name) -def test_pull_prompt_manifest( +def test_pull_prompt_object( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) - manifest = langsmith_client.pull_prompt_manifest(prompt_name) - assert isinstance(manifest, ls_schemas.PromptManifest) + manifest = langsmith_client.pull_prompt_object(prompt_name) + assert isinstance(manifest, ls_schemas.PromptObject) assert manifest.repo == prompt_name langsmith_client.delete_prompt(prompt_name) @@ -113,10 +114,41 @@ def test_pull_prompt_manifest( def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + # test pulling with just prompt name pulled_prompt = langsmith_client.pull_prompt(prompt_name) assert isinstance(pulled_prompt, ChatPromptTemplate) + assert pulled_prompt.metadata["lc_hub_repo"] == prompt_name + + # test pulling with private owner (-) and name + pulled_prompt_2 = langsmith_client.pull_prompt(f"-/{prompt_name}") + assert pulled_prompt == pulled_prompt_2 + + # test pulling with tenant handle and name + tenant_handle = langsmith_client._get_settings()["tenant_handle"] + pulled_prompt_3 = langsmith_client.pull_prompt(f"{tenant_handle}/{prompt_name}") + assert ( + pulled_prompt.metadata["lc_hub_commit_hash"] + == pulled_prompt_3.metadata["lc_hub_commit_hash"] + ) + assert pulled_prompt_3.metadata["lc_hub_owner"] == tenant_handle + + # test pulling with handle, name and commit hash + tenant_handle = langsmith_client._get_settings()["tenant_handle"] + pulled_prompt_4 = langsmith_client.pull_prompt( + f"{tenant_handle}/{prompt_name}:latest" + ) + assert pulled_prompt_3 == pulled_prompt_4 + + # test pulling without handle, with commit hash + pulled_prompt_5 = langsmith_client.pull_prompt( + f"{prompt_name}:{pulled_prompt_4.metadata['lc_hub_commit_hash']}" + ) + assert ( + pulled_prompt_4.metadata["lc_hub_commit_hash"] + == pulled_prompt_5.metadata["lc_hub_commit_hash"] + ) langsmith_client.delete_prompt(prompt_name) @@ -126,7 +158,7 @@ def test_push_and_pull_prompt( ): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - push_result = langsmith_client.push_prompt(prompt_name, prompt_template_2) + push_result = langsmith_client.push_prompt(prompt_name, object=prompt_template_2) assert isinstance(push_result, str) pulled_prompt = langsmith_client.pull_prompt(prompt_name) @@ -135,15 +167,17 @@ def test_push_and_pull_prompt( langsmith_client.delete_prompt(prompt_name) # should fail - with pytest.raises(ValueError): - langsmith_client.push_prompt(f"random_handle/{prompt_name}", prompt_template_2) + with pytest.raises(ls_utils.LangSmithUserError): + langsmith_client.push_prompt( + f"random_handle/{prompt_name}", object=prompt_template_2 + ) def test_like_unlike_prompt( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) langsmith_client.like_prompt(prompt_name) prompt = langsmith_client.get_prompt(prompt_name) @@ -162,7 +196,7 @@ def test_get_latest_commit_hash( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) commit_hash = langsmith_client._get_latest_commit_hash(f"-/{prompt_name}") assert isinstance(commit_hash, str) @@ -196,10 +230,19 @@ def test_create_commit( prompt_template_3: PromptTemplate, ): prompt_name = f"test_create_commit_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_3) - commit_url = langsmith_client.create_commit( - prompt_name, manifest_json=prompt_template_2 - ) + try: + # this should fail because the prompt does not exist + commit_url = langsmith_client.create_commit( + prompt_name, object=prompt_template_2 + ) + pytest.fail("Expected LangSmithNotFoundError was not raised") + except ls_utils.LangSmithNotFoundError as e: + assert str(e) == "Prompt does not exist, you must create it first." + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + + langsmith_client.push_prompt(prompt_name, object=prompt_template_3) + commit_url = langsmith_client.create_commit(prompt_name, object=prompt_template_2) assert isinstance(commit_url, str) assert prompt_name in commit_url @@ -210,11 +253,11 @@ def test_create_commit( langsmith_client.delete_prompt(prompt_name) -def test_push_prompt_new(langsmith_client: Client, prompt_template_3: PromptTemplate): +def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate): prompt_name = f"test_push_new_{uuid4().hex[:8]}" url = langsmith_client.push_prompt( prompt_name, - prompt_template_3, + object=prompt_template_3, is_public=True, description="New prompt", tags=["new", "test"], @@ -229,33 +272,20 @@ def test_push_prompt_new(langsmith_client: Client, prompt_template_3: PromptTemp assert prompt.description == "New prompt" assert "new" in prompt.tags assert "test" in prompt.tags + assert prompt.num_commits == 1 - langsmith_client.delete_prompt(prompt_name) - - -def test_push_prompt_update( - langsmith_client: Client, - prompt_template_1: ChatPromptTemplate, - prompt_template_3: PromptTemplate, -): - prompt_name = f"test_push_update_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) - - updated_url = langsmith_client.push_prompt( + # test updating prompt metadata but not manifest + url = langsmith_client.push_prompt( prompt_name, - prompt_template_3, + is_public=False, description="Updated prompt", - tags=["updated", "test"], ) - assert isinstance(updated_url, str) - assert prompt_name in updated_url - updated_prompt = langsmith_client.get_prompt(prompt_name) assert isinstance(updated_prompt, ls_schemas.Prompt) assert updated_prompt.description == "Updated prompt" - assert "updated" in updated_prompt.tags - assert "test" in updated_prompt.tags + assert not updated_prompt.is_public + assert updated_prompt.num_commits == 1 langsmith_client.delete_prompt(prompt_name) @@ -268,7 +298,9 @@ def test_list_prompts_filter( expected_count: int, ): prompt_name = f"test_list_filter_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1, is_public=is_public) + langsmith_client.push_prompt( + prompt_name, object=prompt_template_1, is_public=is_public + ) response = langsmith_client.list_prompts(is_public=is_public, query=prompt_name) @@ -283,7 +315,7 @@ def test_update_prompt_archive( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): prompt_name = f"test_archive_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, prompt_template_1) + langsmith_client.push_prompt(prompt_name, object=prompt_template_1) langsmith_client.update_prompt(prompt_name, is_archived=True) archived_prompt = langsmith_client.get_prompt(prompt_name) @@ -312,7 +344,7 @@ def test_list_prompts_sorting( ): prompt_names = [f"test_sort_{i}_{uuid4().hex[:8]}" for i in range(3)] for name in prompt_names: - langsmith_client.push_prompt(name, prompt_template_1) + langsmith_client.push_prompt(name, object=prompt_template_1) response = langsmith_client.list_prompts( sort_field=sort_field, sort_direction=sort_direction, limit=10 From 3323e3203fb9cd011cbc5ae9d4098a98da10c693 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Sun, 14 Jul 2024 12:35:03 -0700 Subject: [PATCH 239/373] any --- python/langsmith/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 43d6e3cd7..22cb80488 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -5017,7 +5017,7 @@ def push_prompt( self, prompt_identifier: str, *, - object: Optional[dict] = None, + object: Optional[Any] = None, parent_commit_hash: Optional[str] = "latest", is_public: bool = False, description: Optional[str] = "", @@ -5033,7 +5033,7 @@ def push_prompt( Args: prompt_identifier (str): The identifier of the prompt. - object (Optional[dict]): The LangChain object to push. + object (Optional[Any]): The LangChain object to push. parent_commit_hash (Optional[str]): The parent commit hash. Defaults to "latest". is_public (bool): Whether the prompt should be public. Defaults to False. From 1416037e86c301a247288c71ba1eef22a4ced6ed Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Sun, 14 Jul 2024 12:40:51 -0700 Subject: [PATCH 240/373] lint --- python/langsmith/client.py | 8 ++++---- python/tests/prompts/test_prompts.py | 7 ++++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 22cb80488..1dfdf5f90 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4818,7 +4818,7 @@ def create_prompt( def create_commit( self, prompt_identifier: str, - object: dict, + object: Any, *, parent_commit_hash: Optional[str] = "latest", ) -> str: @@ -4826,7 +4826,7 @@ def create_commit( Args: prompt_identifier (str): The identifier of the prompt. - object (dict): The LangChain object to commit. + object (Any): The LangChain object to commit. parent_commit_hash (Optional[str]): The hash of the parent commit. Defaults to "latest". @@ -4850,8 +4850,8 @@ def create_commit( "package to run.\nInstall with `pip install langchain_core`" ) - object = dumps(object) - manifest_dict = json.loads(object) + json_object = dumps(object) + manifest_dict = json.loads(json_object) owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) prompt_owner_and_name = f"{owner}/{prompt_name}" diff --git a/python/tests/prompts/test_prompts.py b/python/tests/prompts/test_prompts.py index e235e3d6e..5c535c461 100644 --- a/python/tests/prompts/test_prompts.py +++ b/python/tests/prompts/test_prompts.py @@ -119,7 +119,9 @@ def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemp # test pulling with just prompt name pulled_prompt = langsmith_client.pull_prompt(prompt_name) assert isinstance(pulled_prompt, ChatPromptTemplate) - assert pulled_prompt.metadata["lc_hub_repo"] == prompt_name + assert ( + pulled_prompt.metadata and pulled_prompt.metadata["lc_hub_repo"] == prompt_name + ) # test pulling with private owner (-) and name pulled_prompt_2 = langsmith_client.pull_prompt(f"-/{prompt_name}") @@ -128,6 +130,7 @@ def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemp # test pulling with tenant handle and name tenant_handle = langsmith_client._get_settings()["tenant_handle"] pulled_prompt_3 = langsmith_client.pull_prompt(f"{tenant_handle}/{prompt_name}") + assert pulled_prompt.metadata and pulled_prompt_3.metadata assert ( pulled_prompt.metadata["lc_hub_commit_hash"] == pulled_prompt_3.metadata["lc_hub_commit_hash"] @@ -142,9 +145,11 @@ def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemp assert pulled_prompt_3 == pulled_prompt_4 # test pulling without handle, with commit hash + assert pulled_prompt_4.metadata pulled_prompt_5 = langsmith_client.pull_prompt( f"{prompt_name}:{pulled_prompt_4.metadata['lc_hub_commit_hash']}" ) + assert pulled_prompt_5.metadata assert ( pulled_prompt_4.metadata["lc_hub_commit_hash"] == pulled_prompt_5.metadata["lc_hub_commit_hash"] From 2b5808f62782541a6ae7a66db4ce7fc2ee5ba966 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 11:56:02 -0700 Subject: [PATCH 241/373] add prompts to integration tests --- python/tests/{prompts => integration_tests}/test_prompts.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename python/tests/{prompts => integration_tests}/test_prompts.py (100%) diff --git a/python/tests/prompts/test_prompts.py b/python/tests/integration_tests/test_prompts.py similarity index 100% rename from python/tests/prompts/test_prompts.py rename to python/tests/integration_tests/test_prompts.py From 2ba3ef680d04c7026f43375defa7ebe72146a81d Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 13:34:24 -0700 Subject: [PATCH 242/373] change timeout --- python/tests/integration_tests/test_prompts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 5c535c461..0e292748f 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -11,7 +11,7 @@ @pytest.fixture def langsmith_client() -> Client: - return Client() + return Client(timeout_ms=[20_000, 90_000]) @pytest.fixture From b51f6e63cc6be9a8312af34e95dcc6a698eb4629 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:35:17 -0700 Subject: [PATCH 243/373] Add dataset split update + list methods (#857) --- js/package.json | 4 +- js/src/client.ts | 91 ++++++++++++++++++++++++++++++++++++++ js/src/index.ts | 2 +- python/langsmith/client.py | 76 +++++++++++++++++++++++++++++++ 4 files changed, 170 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index eee94a13a..f6bb02d18 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.36", + "version": "0.1.37", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -261,4 +261,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/client.ts b/js/src/client.ts index 0f44d5124..6cba0c43b 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -2300,6 +2300,97 @@ export class Client { return result; } + public async listDatasetSplits({ + datasetId, + datasetName, + asOf, + }: { + datasetId?: string; + datasetName?: string; + asOf?: string | Date; + }): Promise { + let datasetId_: string; + if (datasetId === undefined && datasetName === undefined) { + throw new Error("Must provide dataset name or ID"); + } else if (datasetId !== undefined && datasetName !== undefined) { + throw new Error("Must provide either datasetName or datasetId, not both"); + } else if (datasetId === undefined) { + const dataset = await this.readDataset({ datasetName }); + datasetId_ = dataset.id; + } else { + datasetId_ = datasetId; + } + + assertUuid(datasetId_); + + const params = new URLSearchParams(); + const dataset_version = asOf + ? typeof asOf === "string" + ? asOf + : asOf?.toISOString() + : undefined; + if (dataset_version) { + params.append("as_of", dataset_version); + } + + const response = await this._get( + `/datasets/${datasetId_}/splits`, + params + ); + return response; + } + + public async updateDatasetSplits({ + datasetId, + datasetName, + splitName, + exampleIds, + remove = false, + }: { + datasetId?: string; + datasetName?: string; + splitName: string; + exampleIds: string[]; + remove?: boolean; + }): Promise { + let datasetId_: string; + if (datasetId === undefined && datasetName === undefined) { + throw new Error("Must provide dataset name or ID"); + } else if (datasetId !== undefined && datasetName !== undefined) { + throw new Error("Must provide either datasetName or datasetId, not both"); + } else if (datasetId === undefined) { + const dataset = await this.readDataset({ datasetName }); + datasetId_ = dataset.id; + } else { + datasetId_ = datasetId; + } + + assertUuid(datasetId_); + + const data = { + split_name: splitName, + examples: exampleIds.map((id) => { + assertUuid(id); + return id; + }), + remove, + }; + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/datasets/${datasetId_}/splits`, + { + method: "PUT", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + await raiseForStatus(response, "update dataset splits"); + } + /** * @deprecated This method is deprecated and will be removed in future LangSmith versions, use `evaluate` from `langsmith/evaluation` instead. */ diff --git a/js/src/index.ts b/js/src/index.ts index 575faa25a..429988932 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.36"; +export const __version__ = "0.1.37"; diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 2b7647cf6..951c7407c 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3334,6 +3334,82 @@ def delete_example(self, example_id: ID_TYPE) -> None: ) ls_utils.raise_for_status_with_text(response) + def list_dataset_splits( + self, + *, + dataset_id: Optional[ID_TYPE] = None, + dataset_name: Optional[str] = None, + as_of: Optional[Union[str, datetime.datetime]] = None, + ) -> List[str]: + """Get the splits for a dataset. + + Args: + dataset_id (ID_TYPE): The ID of the dataset. + as_of (Optional[Union[str, datetime.datetime]], optional): The version + of the dataset to retrieve splits for. Can be a timestamp or a + string tag. Defaults to "latest". + + Returns: + List[str]: The names of this dataset's. + """ + if dataset_id is None: + if dataset_name is None: + raise ValueError("Must provide dataset name or ID") + dataset_id = self.read_dataset(dataset_name=dataset_name).id + params = {} + if as_of is not None: + params["as_of"] = ( + as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of + ) + + response = self.request_with_retries( + "GET", + f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/splits", + params=params, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + + def update_dataset_splits( + self, + *, + dataset_id: Optional[ID_TYPE] = None, + dataset_name: Optional[str] = None, + split_name: str, + example_ids: List[ID_TYPE], + remove: bool = False, + ) -> None: + """Update the splits for a dataset. + + Args: + dataset_id (ID_TYPE): The ID of the dataset to update. + split_name (str): The name of the split to update. + example_ids (List[ID_TYPE]): The IDs of the examples to add to or + remove from the split. + remove (bool, optional): If True, remove the examples from the split. + If False, add the examples to the split. Defaults to False. + + Returns: + None + """ + if dataset_id is None: + if dataset_name is None: + raise ValueError("Must provide dataset name or ID") + dataset_id = self.read_dataset(dataset_name=dataset_name).id + data = { + "split_name": split_name, + "examples": [ + str(_as_uuid(id_, f"example_ids[{i}]")) + for i, id_ in enumerate(example_ids) + ], + "remove": remove, + } + + response = self.request_with_retries( + "PUT", f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/splits", json=data + ) + ls_utils.raise_for_status_with_text(response) + def _resolve_run_id( self, run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID], From c459e631c7f9356bb17940dd5573356a62766674 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 13:37:05 -0700 Subject: [PATCH 244/373] make tuple --- python/tests/integration_tests/test_prompts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 0e292748f..a9f914a3e 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -1,4 +1,4 @@ -from typing import Literal +from typing import Literal, Tuple from uuid import uuid4 import pytest @@ -11,7 +11,7 @@ @pytest.fixture def langsmith_client() -> Client: - return Client(timeout_ms=[20_000, 90_000]) + return Client(timeout_ms=Tuple[20_000, 90_000]) @pytest.fixture From 176c350bed05a893599d09248c6ac244655bd539 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 13:40:55 -0700 Subject: [PATCH 245/373] tuple --- python/tests/integration_tests/test_prompts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index a9f914a3e..7b9ecf16d 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -1,4 +1,4 @@ -from typing import Literal, Tuple +from typing import Literal from uuid import uuid4 import pytest @@ -11,7 +11,7 @@ @pytest.fixture def langsmith_client() -> Client: - return Client(timeout_ms=Tuple[20_000, 90_000]) + return Client(timeout_ms=(20_000, 90_000)) @pytest.fixture From 9ec30758259227c85a9ed6d7c20c173156b18948 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 13:50:10 -0700 Subject: [PATCH 246/373] timeout 50k ms --- python/tests/integration_tests/test_prompts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 7b9ecf16d..e9240e904 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -11,7 +11,7 @@ @pytest.fixture def langsmith_client() -> Client: - return Client(timeout_ms=(20_000, 90_000)) + return Client(timeout_ms=(50_000, 90_000)) @pytest.fixture From ae2633e74f0ffaa5787777faa3686812e7ec1ae2 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 15:16:38 -0700 Subject: [PATCH 247/373] add pulling prompt with model --- python/Makefile | 3 - python/langsmith/client.py | 40 +++++-- python/langsmith/schemas.py | 1 + .../tests/integration_tests/test_prompts.py | 111 ++++++++++++++++++ 4 files changed, 142 insertions(+), 13 deletions(-) diff --git a/python/Makefile b/python/Makefile index a8bab2a27..d06830bf9 100644 --- a/python/Makefile +++ b/python/Makefile @@ -18,9 +18,6 @@ doctest: evals: poetry run python -m pytest tests/evaluation -prompts: - poetry run python -m pytest tests/prompts - lint: poetry run ruff check . poetry run mypy . diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 1dfdf5f90..15c0d8455 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4926,7 +4926,7 @@ def update_prompt( response.raise_for_status() return response.json() - def delete_prompt(self, prompt_identifier: str) -> bool: + def delete_prompt(self, prompt_identifier: str) -> Any: """Delete a prompt. Args: @@ -4943,9 +4943,15 @@ def delete_prompt(self, prompt_identifier: str) -> bool: raise self._owner_conflict_error("delete a prompt", owner) response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") - return response.status_code == 204 - def pull_prompt_object(self, prompt_identifier: str) -> ls_schemas.PromptObject: + return response + + def pull_prompt_object( + self, + prompt_identifier: str, + *, + include_model: Optional[bool] = False, + ) -> ls_schemas.PromptObject: """Pull a prompt object from the LangSmith API. Args: @@ -4972,13 +4978,19 @@ def pull_prompt_object(self, prompt_identifier: str) -> ls_schemas.PromptObject: commit_hash = latest_commit_hash response = self.request_with_retries( - "GET", f"/commits/{owner}/{prompt_name}/{commit_hash}" + "GET", + ( + f"/commits/{owner}/{prompt_name}/{commit_hash}" + f"{'?include_model=true' if include_model else ''}" + ), ) return ls_schemas.PromptObject( **{"owner": owner, "repo": prompt_name, **response.json()} ) - def pull_prompt(self, prompt_identifier: str) -> Any: + def pull_prompt( + self, prompt_identifier: str, *, include_model: Optional[bool] = False + ) -> Any: """Pull a prompt and return it as a LangChain PromptTemplate. This method requires `langchain_core`. @@ -4998,12 +5010,20 @@ def pull_prompt(self, prompt_identifier: str) -> Any: "package to run.\nInstall with `pip install langchain_core`" ) - prompt_object = self.pull_prompt_object(prompt_identifier) + prompt_object = self.pull_prompt_object( + prompt_identifier, include_model=include_model + ) prompt = loads(json.dumps(prompt_object.manifest)) - if isinstance(prompt, BasePromptTemplate): - if prompt.metadata is None: - prompt.metadata = {} - prompt.metadata.update( + + if isinstance(prompt, BasePromptTemplate) or isinstance( + prompt.first, BasePromptTemplate + ): + prompt_template = ( + prompt if isinstance(prompt, BasePromptTemplate) else prompt.first + ) + if prompt_template.metadata is None: + prompt_template.metadata = {} + prompt_template.metadata.update( { "lc_hub_owner": prompt_object.owner, "lc_hub_repo": prompt_object.repo, diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 6b9894a7f..8970f114d 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -750,6 +750,7 @@ class PromptObject(BaseModel): """Represents a Prompt with a manifest. Attributes: + owner (str): The handle of the owner of the prompt. repo (str): The name of the prompt. commit_hash (str): The commit hash of the prompt. manifest (Dict[str, Any]): The manifest of the prompt. diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index e9240e904..92001dc06 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -3,6 +3,7 @@ import pytest from langchain_core.prompts import ChatPromptTemplate, PromptTemplate +from langchain_core.runnables.base import RunnableSequence import langsmith.schemas as ls_schemas import langsmith.utils as ls_utils @@ -34,6 +35,101 @@ def prompt_template_3() -> PromptTemplate: return PromptTemplate.from_template("Summarize the following text: {text}") +@pytest.fixture +def prompt_with_model() -> dict: + return { + "id": ["langsmith", "playground", "PromptPlayground"], + "lc": 1, + "type": "constructor", + "kwargs": { + "last": { + "id": ["langchain", "schema", "runnable", "RunnableBinding"], + "lc": 1, + "type": "constructor", + "kwargs": { + "bound": { + "id": ["langchain", "chat_models", "openai", "ChatOpenAI"], + "lc": 1, + "type": "constructor", + "kwargs": { + "openai_api_key": { + "id": ["OPENAI_API_KEY"], + "lc": 1, + "type": "secret", + } + }, + }, + "kwargs": {}, + }, + }, + "first": { + "id": ["langchain", "prompts", "chat", "ChatPromptTemplate"], + "lc": 1, + "type": "constructor", + "kwargs": { + "messages": [ + { + "id": [ + "langchain", + "prompts", + "chat", + "SystemMessagePromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "prompt": { + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "template": "You are a chatbot.", + "input_variables": [], + "template_format": "f-string", + }, + } + }, + }, + { + "id": [ + "langchain", + "prompts", + "chat", + "HumanMessagePromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "prompt": { + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate", + ], + "lc": 1, + "type": "constructor", + "kwargs": { + "template": "{question}", + "input_variables": ["question"], + "template_format": "f-string", + }, + } + }, + }, + ], + "input_variables": ["question"], + }, + }, + }, + } + + def test_current_tenant_is_owner(langsmith_client: Client): settings = langsmith_client._get_settings() assert langsmith_client._current_tenant_is_owner(settings["tenant_handle"]) @@ -178,6 +274,21 @@ def test_push_and_pull_prompt( ) +def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: dict): + prompt_name = f"test_prompt_with_model_{uuid4().hex[:8]}" + langsmith_client.push_prompt(prompt_name, object=prompt_with_model) + + pulled_prompt = langsmith_client.pull_prompt(prompt_name, include_model=True) + assert isinstance(pulled_prompt, RunnableSequence) + assert ( + pulled_prompt.first + and pulled_prompt.first.metadata + and pulled_prompt.first.metadata["lc_hub_repo"] == prompt_name + ) + + langsmith_client.delete_prompt(prompt_name) + + def test_like_unlike_prompt( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): From 6802bf6ff85d2de85bf517208f1b3cc79a6dbd24 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 15:18:45 -0700 Subject: [PATCH 248/373] mark as flaky --- .../tests/integration_tests/test_prompts.py | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 92001dc06..abb103465 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -130,19 +130,20 @@ def prompt_with_model() -> dict: } +@pytest.mark.skip(reason="This test is flaky") def test_current_tenant_is_owner(langsmith_client: Client): settings = langsmith_client._get_settings() assert langsmith_client._current_tenant_is_owner(settings["tenant_handle"]) assert langsmith_client._current_tenant_is_owner("-") assert not langsmith_client._current_tenant_is_owner("non_existent_owner") - +@pytest.mark.skip(reason="This test is flaky") def test_list_prompts(langsmith_client: Client): response = langsmith_client.list_prompts(limit=10, offset=0) assert isinstance(response, ls_schemas.ListPromptsResponse) assert len(response.repos) <= 10 - +@pytest.mark.skip(reason="This test is flaky") def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) @@ -154,6 +155,7 @@ def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTempl langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): non_existent_prompt = f"non_existent_{uuid4().hex[:8]}" assert not langsmith_client._prompt_exists(non_existent_prompt) @@ -165,6 +167,7 @@ def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTe langsmith_client.delete_prompt(existent_prompt) +@pytest.mark.skip(reason="This test is flaky") def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) @@ -186,6 +189,7 @@ def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTe langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) @@ -195,6 +199,7 @@ def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTe assert not langsmith_client._prompt_exists(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_pull_prompt_object( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): @@ -208,6 +213,7 @@ def test_pull_prompt_object( langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) @@ -254,6 +260,7 @@ def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemp langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_push_and_pull_prompt( langsmith_client: Client, prompt_template_2: ChatPromptTemplate ): @@ -274,6 +281,7 @@ def test_push_and_pull_prompt( ) +@pytest.mark.skip(reason="This test is flaky") def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: dict): prompt_name = f"test_prompt_with_model_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_with_model) @@ -289,6 +297,7 @@ def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_like_unlike_prompt( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): @@ -308,6 +317,7 @@ def test_like_unlike_prompt( langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_get_latest_commit_hash( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): @@ -321,6 +331,7 @@ def test_get_latest_commit_hash( langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_create_prompt(langsmith_client: Client): prompt_name = f"test_create_prompt_{uuid4().hex[:8]}" created_prompt = langsmith_client.create_prompt( @@ -340,6 +351,7 @@ def test_create_prompt(langsmith_client: Client): langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_create_commit( langsmith_client: Client, prompt_template_2: ChatPromptTemplate, @@ -369,6 +381,7 @@ def test_create_commit( langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate): prompt_name = f"test_push_new_{uuid4().hex[:8]}" url = langsmith_client.push_prompt( @@ -407,6 +420,7 @@ def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate @pytest.mark.parametrize("is_public,expected_count", [(True, 1), (False, 1)]) +@pytest.mark.skip(reason="This test is flaky") def test_list_prompts_filter( langsmith_client: Client, prompt_template_1: ChatPromptTemplate, @@ -427,6 +441,7 @@ def test_list_prompts_filter( langsmith_client.delete_prompt(prompt_name) +@pytest.mark.skip(reason="This test is flaky") def test_update_prompt_archive( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): @@ -452,6 +467,7 @@ def test_update_prompt_archive( (ls_schemas.PromptSortField.updated_at, "desc"), ], ) +@pytest.mark.skip(reason="This test is flaky") def test_list_prompts_sorting( langsmith_client: Client, prompt_template_1: ChatPromptTemplate, From 7c01b74fd7d6ad02526d259f2b09c7d79b715a34 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 15:22:32 -0700 Subject: [PATCH 249/373] ci --- python/tests/integration_tests/test_prompts.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index abb103465..6de593e25 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -137,12 +137,14 @@ def test_current_tenant_is_owner(langsmith_client: Client): assert langsmith_client._current_tenant_is_owner("-") assert not langsmith_client._current_tenant_is_owner("non_existent_owner") + @pytest.mark.skip(reason="This test is flaky") def test_list_prompts(langsmith_client: Client): response = langsmith_client.list_prompts(limit=10, offset=0) assert isinstance(response, ls_schemas.ListPromptsResponse) assert len(response.repos) <= 10 + @pytest.mark.skip(reason="This test is flaky") def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" @@ -290,6 +292,7 @@ def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: assert isinstance(pulled_prompt, RunnableSequence) assert ( pulled_prompt.first + and "metadata" in pulled_prompt.first and pulled_prompt.first.metadata and pulled_prompt.first.metadata["lc_hub_repo"] == prompt_name ) From 1fe51294232e6b224abe5390d49d6ec96ddef09e Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 15:29:49 -0700 Subject: [PATCH 250/373] lint --- python/langsmith/wrappers/_openai.py | 12 +++++------ .../tests/integration_tests/test_prompts.py | 20 +++++++++++-------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 5b6798e8d..45aec0932 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,13 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"][ - "name" - ] += chunk.function.name + message["tool_calls"][index]["function"]["name"] += ( + chunk.function.name + ) if chunk.function.arguments: - message["tool_calls"][index]["function"][ - "arguments" - ] += chunk.function.arguments + message["tool_calls"][index]["function"]["arguments"] += ( + chunk.function.arguments + ) return { "index": choices[0].index, "finish_reason": next( diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 6de593e25..e47f5b5cb 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -2,7 +2,11 @@ from uuid import uuid4 import pytest -from langchain_core.prompts import ChatPromptTemplate, PromptTemplate +from langchain_core.prompts import ( + BasePromptTemplate, + ChatPromptTemplate, + PromptTemplate, +) from langchain_core.runnables.base import RunnableSequence import langsmith.schemas as ls_schemas @@ -283,19 +287,19 @@ def test_push_and_pull_prompt( ) -@pytest.mark.skip(reason="This test is flaky") +# @pytest.mark.skip(reason="This test is flaky") def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: dict): prompt_name = f"test_prompt_with_model_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_with_model) pulled_prompt = langsmith_client.pull_prompt(prompt_name, include_model=True) assert isinstance(pulled_prompt, RunnableSequence) - assert ( - pulled_prompt.first - and "metadata" in pulled_prompt.first - and pulled_prompt.first.metadata - and pulled_prompt.first.metadata["lc_hub_repo"] == prompt_name - ) + if getattr(pulled_prompt, "first", None): + first = getattr(pulled_prompt, "first") + assert isinstance(first, BasePromptTemplate) + assert first.metadata and first.metadata["lc_hub_repo"] == prompt_name + else: + assert False, "pulled_prompt.first should exist, incorrect prompt format" langsmith_client.delete_prompt(prompt_name) From eb0c73c92488bf17f35880b727efe2a8212a9a51 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 15:32:48 -0700 Subject: [PATCH 251/373] mark test flaky --- python/tests/integration_tests/test_prompts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index e47f5b5cb..3d4787960 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -287,7 +287,7 @@ def test_push_and_pull_prompt( ) -# @pytest.mark.skip(reason="This test is flaky") +@pytest.mark.skip(reason="This test is flaky") def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: dict): prompt_name = f"test_prompt_with_model_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_with_model) From 8c54f776dfedd664a2b67d37c462561998450b20 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 15:38:07 -0700 Subject: [PATCH 252/373] merge --- python/langsmith/client.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index f31ee6dc3..9e7f2b468 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4705,11 +4705,7 @@ def list_prompts( offset: int = 0, is_public: Optional[bool] = None, is_archived: Optional[bool] = False, -<<<<<<< HEAD - sort_field: ls_schemas.PromptsSortField = ls_schemas.PromptsSortField.updated_at, -======= sort_field: ls_schemas.PromptSortField = ls_schemas.PromptSortField.updated_at, ->>>>>>> eb0c73c92488bf17f35880b727efe2a8212a9a51 sort_direction: Literal["desc", "asc"] = "desc", query: Optional[str] = None, ) -> ls_schemas.ListPromptsResponse: From 8eb31c189da1c731596ca23265f932031f5b0fe7 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 15:41:13 -0700 Subject: [PATCH 253/373] reformat --- python/langsmith/wrappers/_openai.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 45aec0932..5b6798e8d 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,13 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"]["name"] += ( - chunk.function.name - ) + message["tool_calls"][index]["function"][ + "name" + ] += chunk.function.name if chunk.function.arguments: - message["tool_calls"][index]["function"]["arguments"] += ( - chunk.function.arguments - ) + message["tool_calls"][index]["function"][ + "arguments" + ] += chunk.function.arguments return { "index": choices[0].index, "finish_reason": next( From d62fa73a7a8d9451a9868dd08a9f39abe7d2136b Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 16:15:42 -0700 Subject: [PATCH 254/373] convert tests --- python/langsmith/client.py | 111 ++++++++++++------------ python/langsmith/wrappers/_openai.py | 12 +-- python/tests/unit_tests/test_prompts.py | 50 +++++++++++ 3 files changed, 112 insertions(+), 61 deletions(-) create mode 100644 python/tests/unit_tests/test_prompts.py diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 9e7f2b468..01a3521d2 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -5099,61 +5099,6 @@ def push_prompt( ) return url - def convert_to_openai_format( - self, messages: Any, stop: Optional[List[str]] = None, **kwargs: Any - ) -> dict: - """Convert a prompt to OpenAI format. - - Requires the `langchain_openai` package to be installed. - - Args: - messages (Any): The messages to convert. - stop (Optional[List[str]]): Stop sequences for the prompt. - **kwargs: Additional arguments for the conversion. - - Returns: - dict: The prompt in OpenAI format. - """ - from langchain_openai import ChatOpenAI - - openai = ChatOpenAI() - - try: - return openai._get_request_payload(messages, stop=stop, **kwargs) - except Exception as e: - print(e) - return None - - def convert_to_anthropic_format( - self, - messages: Any, - model_name: Optional[str] = "claude-2", - stop: Optional[List[str]] = None, - **kwargs: Any, - ) -> dict: - """Convert a prompt to Anthropic format. - - Requires the `langchain_anthropic` package to be installed. - - Args: - messages (Any): The messages to convert. - model_name (Optional[str]): The model name to use. Defaults to "claude-2". - stop (Optional[List[str]]): Stop sequences for the prompt. - **kwargs: Additional arguments for the conversion. - - Returns: - dict: The prompt in Anthropic format. - """ - from langchain_anthropic import ChatAnthropic - - anthropic = ChatAnthropic(model_name=model_name) - - try: - return anthropic._get_request_payload(messages, stop=stop, **kwargs) - except Exception as e: - print(e) - return None - def _tracing_thread_drain_queue( tracing_queue: Queue, limit: int = 100, block: bool = True @@ -5301,3 +5246,59 @@ def _tracing_sub_thread_func( tracing_queue, limit=size_limit, block=False ): _tracing_thread_handle_batch(client, tracing_queue, next_batch) + + +def convert_to_openai_format( + messages: Any, stop: Optional[List[str]] = None, **kwargs: Any +) -> dict: + """Convert a prompt to OpenAI format. + + Requires the `langchain_openai` package to be installed. + + Args: + messages (Any): The messages to convert. + stop (Optional[List[str]]): Stop sequences for the prompt. + **kwargs: Additional arguments for the conversion. + + Returns: + dict: The prompt in OpenAI format. + """ + from langchain_openai import ChatOpenAI + + openai = ChatOpenAI() + + try: + return openai._get_request_payload(messages, stop=stop, **kwargs) + except Exception as e: + raise ls_utils.LangSmithError(f"Error converting to OpenAI format: {e}") + + +def convert_to_anthropic_format( + messages: Any, + model_name: str = "claude-2", + stop: Optional[List[str]] = None, + **kwargs: Any, +) -> dict: + """Convert a prompt to Anthropic format. + + Requires the `langchain_anthropic` package to be installed. + + Args: + messages (Any): The messages to convert. + model_name (Optional[str]): The model name to use. Defaults to "claude-2". + stop (Optional[List[str]]): Stop sequences for the prompt. + **kwargs: Additional arguments for the conversion. + + Returns: + dict: The prompt in Anthropic format. + """ + from langchain_anthropic import ChatAnthropic + + anthropic = ChatAnthropic( + model_name=model_name, timeout=None, stop=stop, base_url=None, api_key=None + ) + + try: + return anthropic._get_request_payload(messages, stop=stop, **kwargs) + except Exception as e: + raise ls_utils.LangSmithError(f"Error converting to Anthropic format: {e}") diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 5b6798e8d..45aec0932 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,13 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"][ - "name" - ] += chunk.function.name + message["tool_calls"][index]["function"]["name"] += ( + chunk.function.name + ) if chunk.function.arguments: - message["tool_calls"][index]["function"][ - "arguments" - ] += chunk.function.arguments + message["tool_calls"][index]["function"]["arguments"] += ( + chunk.function.arguments + ) return { "index": choices[0].index, "finish_reason": next( diff --git a/python/tests/unit_tests/test_prompts.py b/python/tests/unit_tests/test_prompts.py new file mode 100644 index 000000000..e88b0f67d --- /dev/null +++ b/python/tests/unit_tests/test_prompts.py @@ -0,0 +1,50 @@ +import pytest +from langchain_core.prompts import ChatPromptTemplate + +from langsmith.client import convert_to_anthropic_format, convert_to_openai_format + + +@pytest.fixture +def chat_prompt_template(): + return ChatPromptTemplate.from_messages( + [ + ("system", "You are a chatbot"), + ("user", "{question}"), + ] + ) + + +def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate): + invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) + + res = convert_to_openai_format( + invoked, + ) + + assert res == { + "messages": [ + {"content": "You are a chatbot", "role": "system"}, + {"content": "What is the meaning of life?", "role": "user"}, + ], + "model": "gpt-3.5-turbo", + "stream": False, + "n": 1, + "temperature": 0.7, + } + + +def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate): + invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) + + res = convert_to_anthropic_format( + invoked, + ) + + print("Res: ", res) + + assert res == { + "model": "claude-2", + "max_tokens": 1024, + "messages": [{"role": "user", "content": "What is the meaning of life?"}], + "system": "You are a chatbot", + } From 867a2a56a985257e690df3cce557952f76679f7b Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 15 Jul 2024 17:20:22 -0700 Subject: [PATCH 255/373] Multi presigned url endpoints (#873) --- python/langsmith/__init__.py | 6 +++ python/langsmith/client.py | 58 ++++++++++++++++++------- python/langsmith/utils.py | 82 ++++++++++++++++++++++++++++++++++++ python/pyproject.toml | 2 +- 4 files changed, 131 insertions(+), 17 deletions(-) diff --git a/python/langsmith/__init__.py b/python/langsmith/__init__.py index 23f8901b4..1af040e7c 100644 --- a/python/langsmith/__init__.py +++ b/python/langsmith/__init__.py @@ -87,6 +87,12 @@ def __getattr__(name: str) -> Any: from langsmith._testing import unit return unit + elif name == "ContextThreadPoolExecutor": + from langsmith.utils import ( + ContextThreadPoolExecutor, + ) + + return ContextThreadPoolExecutor raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 951c7407c..1eae92745 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4,6 +4,7 @@ import atexit import collections +import concurrent.futures as cf import datetime import functools import importlib @@ -4061,23 +4062,48 @@ def create_presigned_feedback_tokens( else: raise ValueError(f"Unknown expiration type: {type(expiration)}") # assemble body, one entry per key - body: List[Dict[str, Any]] = [ - { - "run_id": run_id, - "feedback_key": feedback_key, - "feedback_config": feedback_config, - "expires_in": expires_in, - "expires_at": expires_at, - } - for feedback_key, feedback_config in zip(feedback_keys, feedback_configs) - ] - response = self.request_with_retries( - "POST", - "/feedback/tokens", - data=_dumps_json(body), + body = _dumps_json( + [ + { + "run_id": run_id, + "feedback_key": feedback_key, + "feedback_config": feedback_config, + "expires_in": expires_in, + "expires_at": expires_at, + } + for feedback_key, feedback_config in zip( + feedback_keys, feedback_configs + ) + ] ) - ls_utils.raise_for_status_with_text(response) - return [ls_schemas.FeedbackIngestToken(**part) for part in response.json()] + + def req(api_url: str, api_key: Optional[str]) -> list: + response = self.request_with_retries( + "POST", + f"{api_url}/feedback/tokens", + request_kwargs={ + "data": body, + "header": { + **self._headers, + X_API_KEY: api_key or self.api_key, + }, + }, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + + tokens = [] + with cf.ThreadPoolExecutor(max_workers=len(self._write_api_urls)) as executor: + futs = [ + executor.submit(req, api_url, api_key) + for api_url, api_key in self._write_api_urls.items() + ] + for fut in cf.as_completed(futs): + response = fut.result() + tokens.extend( + [ls_schemas.FeedbackIngestToken(**part) for part in response] + ) + return tokens def list_presigned_feedback_tokens( self, diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 2c0152e0f..d35558f4f 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -1,6 +1,9 @@ """Generic utility functions.""" +from __future__ import annotations + import contextlib +import contextvars import copy import enum import functools @@ -11,11 +14,14 @@ import sys import threading import traceback +from concurrent.futures import Future, ThreadPoolExecutor from typing import ( Any, Callable, Dict, Generator, + Iterable, + Iterator, List, Mapping, Optional, @@ -23,9 +29,11 @@ Tuple, TypeVar, Union, + cast, ) import requests +from typing_extensions import ParamSpec from urllib3.util import Retry from langsmith import schemas as ls_schemas @@ -561,3 +569,77 @@ def deepish_copy(val: T) -> T: # what we can _LOGGER.debug("Failed to deepcopy input: %s", repr(e)) return _middle_copy(val, memo) + + +P = ParamSpec("P") + + +class ContextThreadPoolExecutor(ThreadPoolExecutor): + """ThreadPoolExecutor that copies the context to the child thread.""" + + def submit( # type: ignore[override] + self, + func: Callable[P, T], + *args: P.args, + **kwargs: P.kwargs, + ) -> Future[T]: + """Submit a function to the executor. + + Args: + func (Callable[..., T]): The function to submit. + *args (Any): The positional arguments to the function. + **kwargs (Any): The keyword arguments to the function. + + Returns: + Future[T]: The future for the function. + """ + return super().submit( + cast( + Callable[..., T], + functools.partial( + contextvars.copy_context().run, func, *args, **kwargs + ), + ) + ) + + def map( + self, + fn: Callable[..., T], + *iterables: Iterable[Any], + timeout: Optional[float] = None, + chunksize: int = 1, + ) -> Iterator[T]: + """Return an iterator equivalent to stdlib map. + + Each function will receive it's own copy of the context from the parent thread. + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + contexts = [contextvars.copy_context() for _ in range(len(iterables[0]))] # type: ignore[arg-type] + + def _wrapped_fn(*args: Any) -> T: + return contexts.pop().run(fn, *args) + + return super().map( + _wrapped_fn, + *iterables, + timeout=timeout, + chunksize=chunksize, + ) diff --git a/python/pyproject.toml b/python/pyproject.toml index a2b1a1183..ec6123c5b 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.85" +version = "0.1.86" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 2032fb0d0cf02fc97313714a80f2ac63d1e78ca6 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 18:05:48 -0700 Subject: [PATCH 256/373] rm skip --- python/tests/integration_tests/test_prompts.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 3d4787960..23599d16d 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -134,7 +134,6 @@ def prompt_with_model() -> dict: } -@pytest.mark.skip(reason="This test is flaky") def test_current_tenant_is_owner(langsmith_client: Client): settings = langsmith_client._get_settings() assert langsmith_client._current_tenant_is_owner(settings["tenant_handle"]) @@ -142,14 +141,12 @@ def test_current_tenant_is_owner(langsmith_client: Client): assert not langsmith_client._current_tenant_is_owner("non_existent_owner") -@pytest.mark.skip(reason="This test is flaky") def test_list_prompts(langsmith_client: Client): response = langsmith_client.list_prompts(limit=10, offset=0) assert isinstance(response, ls_schemas.ListPromptsResponse) assert len(response.repos) <= 10 -@pytest.mark.skip(reason="This test is flaky") def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) @@ -161,7 +158,6 @@ def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTempl langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate): non_existent_prompt = f"non_existent_{uuid4().hex[:8]}" assert not langsmith_client._prompt_exists(non_existent_prompt) @@ -173,7 +169,6 @@ def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTe langsmith_client.delete_prompt(existent_prompt) -@pytest.mark.skip(reason="This test is flaky") def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) @@ -195,7 +190,6 @@ def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTe langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) @@ -205,7 +199,6 @@ def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTe assert not langsmith_client._prompt_exists(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_pull_prompt_object( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): @@ -219,7 +212,6 @@ def test_pull_prompt_object( langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) @@ -266,7 +258,6 @@ def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemp langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_push_and_pull_prompt( langsmith_client: Client, prompt_template_2: ChatPromptTemplate ): @@ -287,7 +278,6 @@ def test_push_and_pull_prompt( ) -@pytest.mark.skip(reason="This test is flaky") def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: dict): prompt_name = f"test_prompt_with_model_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_with_model) @@ -304,7 +294,6 @@ def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_like_unlike_prompt( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): @@ -324,7 +313,6 @@ def test_like_unlike_prompt( langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_get_latest_commit_hash( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): @@ -338,7 +326,6 @@ def test_get_latest_commit_hash( langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_create_prompt(langsmith_client: Client): prompt_name = f"test_create_prompt_{uuid4().hex[:8]}" created_prompt = langsmith_client.create_prompt( @@ -358,7 +345,6 @@ def test_create_prompt(langsmith_client: Client): langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_create_commit( langsmith_client: Client, prompt_template_2: ChatPromptTemplate, @@ -388,7 +374,6 @@ def test_create_commit( langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate): prompt_name = f"test_push_new_{uuid4().hex[:8]}" url = langsmith_client.push_prompt( @@ -427,7 +412,6 @@ def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate @pytest.mark.parametrize("is_public,expected_count", [(True, 1), (False, 1)]) -@pytest.mark.skip(reason="This test is flaky") def test_list_prompts_filter( langsmith_client: Client, prompt_template_1: ChatPromptTemplate, @@ -448,7 +432,6 @@ def test_list_prompts_filter( langsmith_client.delete_prompt(prompt_name) -@pytest.mark.skip(reason="This test is flaky") def test_update_prompt_archive( langsmith_client: Client, prompt_template_1: ChatPromptTemplate ): @@ -474,7 +457,6 @@ def test_update_prompt_archive( (ls_schemas.PromptSortField.updated_at, "desc"), ], ) -@pytest.mark.skip(reason="This test is flaky") def test_list_prompts_sorting( langsmith_client: Client, prompt_template_1: ChatPromptTemplate, From 9cccb9024978d9db539256fd00d059d36f170b1e Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 20:22:05 -0700 Subject: [PATCH 257/373] test improper manifest formats --- .../tests/integration_tests/test_prompts.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 23599d16d..41449e608 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -371,6 +371,27 @@ def test_create_commit( assert isinstance(prompt, ls_schemas.Prompt) assert prompt.num_commits == 2 + # try submitting different types of unaccepted manifests + try: + # this should fail + commit_url = langsmith_client.create_commit(prompt_name, object={"hi": "hello"}) + except ls_utils.LangSmithError as e: + err = str(e) + assert "Manifest must have an id field" in err + assert "400 Client Error" in err + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + + try: + # this should fail + commit_url = langsmith_client.create_commit(prompt_name, object={"id": ["hi"]}) + except ls_utils.LangSmithError as e: + err = str(e) + assert "Manifest type hi is not supported" in err + assert "400 Client Error" in err + except Exception as e: + pytest.fail(f"Unexpected exception raised: {e}") + langsmith_client.delete_prompt(prompt_name) From a4f7a91080a227c47d64376e44efdd8a0e561f91 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 20:36:07 -0700 Subject: [PATCH 258/373] testing the tests --- python/tests/integration_tests/test_prompts.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 41449e608..ddbb373e1 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -140,6 +140,11 @@ def test_current_tenant_is_owner(langsmith_client: Client): assert langsmith_client._current_tenant_is_owner("-") assert not langsmith_client._current_tenant_is_owner("non_existent_owner") +def test_current_tenant_is_owner2(langsmith_client: Client): + settings = langsmith_client._get_settings() + assert langsmith_client._current_tenant_is_owner(settings["tenant_handle"]) + assert langsmith_client._current_tenant_is_owner("-") + assert not langsmith_client._current_tenant_is_owner("non_existent_owner") def test_list_prompts(langsmith_client: Client): response = langsmith_client.list_prompts(limit=10, offset=0) From d6b339131d48070a3ce804cf809c59a9df088514 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 20:48:54 -0700 Subject: [PATCH 259/373] testing the tests --- python/langsmith/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 15c0d8455..30688dc6a 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4605,7 +4605,7 @@ def _get_latest_commit_hash( """ response = self.request_with_retries( "GET", - f"/commits/{prompt_owner_and_name}/", + f"/commits/{prompt_owner_and_name}", params={"limit": limit, "offset": offset}, ) commits = response.json()["commits"] @@ -4861,7 +4861,7 @@ def create_commit( request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} response = self.request_with_retries( - "POST", f"/commits/{prompt_owner_and_name}", json=request_dict + "POST", f"/commits/{prompt_owner_and_name}/", json=request_dict ) commit_hash = response.json()["commit"]["commit_hash"] From 09118f3582d3241db32c6e6e652ea85b6584d2df Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Mon, 15 Jul 2024 21:07:35 -0700 Subject: [PATCH 260/373] feat(js): transparent handoff vol. 2 --- js/src/tests/traceable_langchain.test.ts | 104 ++++++++++++++++++++++- js/src/tests/utils/mock_client.ts | 15 +++- js/src/traceable.ts | 12 ++- 3 files changed, 125 insertions(+), 6 deletions(-) diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index c5d03e027..308586ffa 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -1,4 +1,4 @@ -import { traceable } from "../traceable.js"; +import { getCurrentRunTree, traceable } from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; import { FakeChatModel } from "@langchain/core/utils/testing"; @@ -8,6 +8,7 @@ import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; import { BaseMessage, HumanMessage } from "@langchain/core/messages"; import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; +import { RunnableLambda, RunnableMap } from "@langchain/core/runnables"; describe("to langchain", () => { const llm = new FakeChatModel({}); @@ -311,6 +312,107 @@ describe("to traceable", () => { edges: [], }); }); + + test("invoke inside runnable lambda", async () => { + const { client, callSpy, langChainTracer } = mockClient(); + + const lc = RunnableLambda.from(async () => "Hello from LangChain"); + const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); + + const childA = RunnableLambda.from(async () => { + const results: string[] = []; + results.push(await lc.invoke({})); + results.push(await ls()); + return results.join("\n"); + }); + + const childB = traceable( + async () => [await lc.invoke({}), await ls()].join("\n"), + { name: "childB" } + ); + + const rootLC = RunnableLambda.from(async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }); + + expect( + await rootLC.invoke( + {}, + { callbacks: [langChainTracer], runName: "rootLC" } + ) + ).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLC:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLC:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLC:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); + + callSpy.mockClear(); + + const rootLS = traceable( + async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }, + { name: "rootLS", client, tracingEnabled: true } + ); + + expect(await rootLS()).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLS:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLS:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLS:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); + }); }); test("explicit nested", async () => { diff --git a/js/src/tests/utils/mock_client.ts b/js/src/tests/utils/mock_client.ts index 7b985dc86..2cf8bf9c6 100644 --- a/js/src/tests/utils/mock_client.ts +++ b/js/src/tests/utils/mock_client.ts @@ -1,13 +1,24 @@ // eslint-disable-next-line import/no-extraneous-dependencies import { jest } from "@jest/globals"; import { Client } from "../../index.js"; +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; type ClientParams = Exclude[0], undefined>; export const mockClient = (config?: Omit) => { - const client = new Client({ ...config, autoBatchTracing: false }); + const client = new Client({ + ...config, + apiKey: "MOCK", + autoBatchTracing: false, + }); const callSpy = jest .spyOn((client as any).caller, "call") .mockResolvedValue({ ok: true, text: () => "" }); - return { client, callSpy }; + const langChainTracer = new LangChainTracer({ + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore Overriden client + client, + }); + + return { client, callSpy, langChainTracer }; }; diff --git a/js/src/traceable.ts b/js/src/traceable.ts index ee977e58f..54fbe7af6 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -24,9 +24,13 @@ import { isPromiseMethod, } from "./utils/asserts.js"; -AsyncLocalStorageProviderSingleton.initializeGlobalInstance( - new AsyncLocalStorage() -); +// make sure we also properly initialise the LangChain context storage +const myInstance = new AsyncLocalStorage(); +const als: AsyncLocalStorage = + (globalThis as any).__lc_tracing_async_local_storage_v2 ?? myInstance; +(globalThis as any).__lc_tracing_async_local_storage_v2 = als; + +AsyncLocalStorageProviderSingleton.initializeGlobalInstance(als); const handleRunInputs = (rawInputs: unknown[]): KVMap => { const firstInput = rawInputs[0]; @@ -476,6 +480,8 @@ export function traceable any>( onEnd(currentRunTree); } } + + // TODO: update child_execution_order of the parent run await postRunPromise; await currentRunTree?.patchRun(); } From 1868a9fa8da9d25cbab0c77d969464d779b385f0 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 21:16:44 -0700 Subject: [PATCH 261/373] testing the tests more --- python/langsmith/client.py | 18 +++++++++--------- python/tests/integration_tests/test_prompts.py | 11 ++++------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 30688dc6a..c0220f6e7 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4605,7 +4605,7 @@ def _get_latest_commit_hash( """ response = self.request_with_retries( "GET", - f"/commits/{prompt_owner_and_name}", + f"/commits/{prompt_owner_and_name}/", params={"limit": limit, "offset": offset}, ) commits = response.json()["commits"] @@ -4629,7 +4629,7 @@ def _like_or_unlike_prompt( """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) response = self.request_with_retries( - "POST", f"/likes/{owner}/{prompt_name}", json={"like": like} + "POST", f"/likes/{owner}/{prompt_name}/", json={"like": like} ) response.raise_for_status() return response.json() @@ -4737,7 +4737,7 @@ def list_prompts( "match_prefix": "true" if query else None, } - response = self.request_with_retries("GET", "/repos", params=params) + response = self.request_with_retries("GET", "/repos/", params=params) return ls_schemas.ListPromptsResponse(**response.json()) def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: @@ -4756,7 +4756,7 @@ def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) response = self.request_with_retries( - "GET", f"/repos/{owner}/{prompt_name}", to_ignore=[ls_utils.LangSmithError] + "GET", f"/repos/{owner}/{prompt_name}/", to_ignore=[ls_utils.LangSmithError] ) if response.status_code == 200: return ls_schemas.Prompt(**response.json()["repo"]) @@ -4811,7 +4811,7 @@ def create_prompt( "is_public": is_public, } - response = self.request_with_retries("POST", "/repos", json=json) + response = self.request_with_retries("POST", "/repos/", json=json) response.raise_for_status() return ls_schemas.Prompt(**response.json()["repo"]) @@ -4861,7 +4861,7 @@ def create_commit( request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} response = self.request_with_retries( - "POST", f"/commits/{prompt_owner_and_name}/", json=request_dict + "POST", f"/commits/{prompt_owner_and_name}", json=request_dict ) commit_hash = response.json()["commit"]["commit_hash"] @@ -4921,7 +4921,7 @@ def update_prompt( owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) response = self.request_with_retries( - "PATCH", f"/repos/{owner}/{prompt_name}", json=json + "PATCH", f"/repos/{owner}/{prompt_name}/", json=json ) response.raise_for_status() return response.json() @@ -4942,7 +4942,7 @@ def delete_prompt(self, prompt_identifier: str) -> Any: if not self._current_tenant_is_owner(owner): raise self._owner_conflict_error("delete a prompt", owner) - response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") + response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}/") return response @@ -4980,7 +4980,7 @@ def pull_prompt_object( response = self.request_with_retries( "GET", ( - f"/commits/{owner}/{prompt_name}/{commit_hash}" + f"/commits/{owner}/{prompt_name}/{commit_hash}/" f"{'?include_model=true' if include_model else ''}" ), ) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index ddbb373e1..5e371e768 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -140,11 +140,6 @@ def test_current_tenant_is_owner(langsmith_client: Client): assert langsmith_client._current_tenant_is_owner("-") assert not langsmith_client._current_tenant_is_owner("non_existent_owner") -def test_current_tenant_is_owner2(langsmith_client: Client): - settings = langsmith_client._get_settings() - assert langsmith_client._current_tenant_is_owner(settings["tenant_handle"]) - assert langsmith_client._current_tenant_is_owner("-") - assert not langsmith_client._current_tenant_is_owner("non_existent_owner") def test_list_prompts(langsmith_client: Client): response = langsmith_client.list_prompts(limit=10, offset=0) @@ -154,7 +149,9 @@ def test_list_prompts(langsmith_client: Client): def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): prompt_name = f"test_prompt_{uuid4().hex[:8]}" - langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + url = langsmith_client.push_prompt(prompt_name, object=prompt_template_1) + assert isinstance(url, str) + assert langsmith_client._prompt_exists(prompt_name) prompt = langsmith_client.get_prompt(prompt_name) assert isinstance(prompt, ls_schemas.Prompt) @@ -168,7 +165,7 @@ def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTe assert not langsmith_client._prompt_exists(non_existent_prompt) existent_prompt = f"existent_{uuid4().hex[:8]}" - langsmith_client.push_prompt(existent_prompt, object=prompt_template_2) + assert langsmith_client.push_prompt(existent_prompt, object=prompt_template_2) assert langsmith_client._prompt_exists(existent_prompt) langsmith_client.delete_prompt(existent_prompt) From bb6ee1dfc8f63e7ad3d9c7b09b11d337c034dbfa Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 22:02:06 -0700 Subject: [PATCH 262/373] maybe this --- python/langsmith/client.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index c0220f6e7..a6ee31056 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4666,11 +4666,8 @@ def _prompt_exists(self, prompt_identifier: str) -> bool: Returns: bool: True if the prompt exists, False otherwise. """ - try: - self.get_prompt(prompt_identifier) - return True - except requests.exceptions.HTTPError as e: - return e.response.status_code != 404 + prompt = self.get_prompt(prompt_identifier) + return True if prompt else False def like_prompt(self, prompt_identifier: str) -> Dict[str, int]: """Check if a prompt exists. @@ -4755,13 +4752,13 @@ def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: another error occurs. """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) - response = self.request_with_retries( - "GET", f"/repos/{owner}/{prompt_name}/", to_ignore=[ls_utils.LangSmithError] - ) - if response.status_code == 200: + try: + response = self.request_with_retries( + "GET", f"/repos/{owner}/{prompt_name}/" + ) return ls_schemas.Prompt(**response.json()["repo"]) - response.raise_for_status() - return None + except ls_utils.LangSmithNotFoundError: + return None def create_prompt( self, From a9e7276a41a3ab4150065aedccb4e75fcbe3f785 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 22:28:13 -0700 Subject: [PATCH 263/373] thought i did this before --- python/langsmith/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index a6ee31056..ac4b01f72 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4754,7 +4754,7 @@ def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) try: response = self.request_with_retries( - "GET", f"/repos/{owner}/{prompt_name}/" + "GET", f"/repos/{owner}/{prompt_name}" ) return ls_schemas.Prompt(**response.json()["repo"]) except ls_utils.LangSmithNotFoundError: From 43e20d225e55830ccf9e33d765f27ce8f9e20f7a Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Mon, 15 Jul 2024 22:36:09 -0700 Subject: [PATCH 264/373] they should literally all match --- python/langsmith/client.py | 12 +++++------- python/langsmith/utils.py | 4 ++-- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 68280d2fc..42b8c2e37 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4733,7 +4733,7 @@ def _like_or_unlike_prompt( """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) response = self.request_with_retries( - "POST", f"/likes/{owner}/{prompt_name}/", json={"like": like} + "POST", f"/likes/{owner}/{prompt_name}", json={"like": like} ) response.raise_for_status() return response.json() @@ -4857,9 +4857,7 @@ def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]: """ owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) try: - response = self.request_with_retries( - "GET", f"/repos/{owner}/{prompt_name}" - ) + response = self.request_with_retries("GET", f"/repos/{owner}/{prompt_name}") return ls_schemas.Prompt(**response.json()["repo"]) except ls_utils.LangSmithNotFoundError: return None @@ -5022,7 +5020,7 @@ def update_prompt( owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) response = self.request_with_retries( - "PATCH", f"/repos/{owner}/{prompt_name}/", json=json + "PATCH", f"/repos/{owner}/{prompt_name}", json=json ) response.raise_for_status() return response.json() @@ -5043,7 +5041,7 @@ def delete_prompt(self, prompt_identifier: str) -> Any: if not self._current_tenant_is_owner(owner): raise self._owner_conflict_error("delete a prompt", owner) - response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}/") + response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") return response @@ -5081,7 +5079,7 @@ def pull_prompt_object( response = self.request_with_retries( "GET", ( - f"/commits/{owner}/{prompt_name}/{commit_hash}/" + f"/commits/{owner}/{prompt_name}/{commit_hash}" f"{'?include_model=true' if include_model else ''}" ), ) diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 39b368e7a..9fecdc04f 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -571,7 +571,6 @@ def deepish_copy(val: T) -> T: return _middle_copy(val, memo) - def is_version_greater_or_equal(current_version, target_version): """Check if the current version is greater or equal to the target version.""" from packaging import version @@ -615,6 +614,7 @@ def parse_prompt_identifier(identifier: str) -> Tuple[str, str, str]: raise ValueError(f"Invalid identifier format: {identifier}") return "-", owner_name, commit + P = ParamSpec("P") @@ -686,4 +686,4 @@ def _wrapped_fn(*args: Any) -> T: *iterables, timeout=timeout, chunksize=chunksize, - ) \ No newline at end of file + ) From 0154e0961ac8857a335e488300a4411df821280a Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 15 Jul 2024 22:44:39 -0700 Subject: [PATCH 265/373] Update evaluate_existing (#863) Also make fewer ops blocking in aevaluate, and update evaluator types to reflect what they can actually support Closes #844 --- python/langsmith/_internal/_aiter.py | 19 +++++++++++ python/langsmith/evaluation/_arunner.py | 27 +++++++-------- python/langsmith/evaluation/_runner.py | 44 +++++++++++++++++-------- python/langsmith/run_helpers.py | 33 ++++++------------- 4 files changed, 70 insertions(+), 53 deletions(-) diff --git a/python/langsmith/_internal/_aiter.py b/python/langsmith/_internal/_aiter.py index aeb9d857a..1088ed07d 100644 --- a/python/langsmith/_internal/_aiter.py +++ b/python/langsmith/_internal/_aiter.py @@ -6,6 +6,8 @@ """ import asyncio +import contextvars +import functools import inspect from collections import deque from typing import ( @@ -300,3 +302,20 @@ def accepts_context(callable: Callable[..., Any]) -> bool: return inspect.signature(callable).parameters.get("context") is not None except ValueError: return False + + +# Ported from Python 3.9+ to support Python 3.8 +async def aio_to_thread(func, /, *args, **kwargs): + """Asynchronously run function *func* in a separate thread. + + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. + + Return a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = asyncio.get_running_loop() + ctx = contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index b5e1cc2ed..3c4973c15 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -27,7 +27,6 @@ from langsmith import run_trees, schemas from langsmith import utils as ls_utils from langsmith._internal import _aiter as aitertools -from langsmith.beta import warn_beta from langsmith.evaluation._runner import ( AEVALUATOR_T, DATA_T, @@ -36,6 +35,7 @@ ExperimentResultRow, _ExperimentManagerMixin, _ForwardResults, + _load_examples_map, _load_experiment, _load_tqdm, _load_traces, @@ -51,7 +51,6 @@ ATARGET_T = Callable[[dict], Awaitable[dict]] -@warn_beta async def aevaluate( target: Union[ATARGET_T, AsyncIterable[dict]], /, @@ -236,7 +235,6 @@ async def aevaluate( ) -@warn_beta async def aevaluate_existing( experiment: Union[str, uuid.UUID], /, @@ -316,17 +314,12 @@ async def aevaluate_existing( """ # noqa: E501 client = client or langsmith.Client() - project = _load_experiment(experiment, client) - runs = _load_traces(experiment, client, load_nested=load_nested) - data = [ - example - for example in client.list_examples( - dataset_id=project.reference_dataset_id, - as_of=project.metadata.get("dataset_version"), - ) - ] - runs = sorted(runs, key=lambda r: str(r.reference_example_id)) - data = sorted(data, key=lambda d: str(d.id)) + project = await aitertools.aio_to_thread(_load_experiment, experiment, client) + runs = await aitertools.aio_to_thread( + _load_traces, experiment, client, load_nested=load_nested + ) + data_map = await aitertools.aio_to_thread(_load_examples_map, client, project) + data = [data_map[run.reference_example_id] for run in runs] return await _aevaluate( runs, data=data, @@ -359,7 +352,8 @@ async def _aevaluate( ) client = client or langsmith.Client() runs = None if is_async_target else cast(Iterable[schemas.Run], target) - experiment_, runs = _resolve_experiment( + experiment_, runs = await aitertools.aio_to_thread( + _resolve_experiment, experiment, runs, client, @@ -696,7 +690,8 @@ async def _aapply_summary_evaluators( for result in flattened_results: feedback = result.dict(exclude={"target_run_id"}) evaluator_info = feedback.pop("evaluator_info", None) - self.client.create_feedback( + await aitertools.aio_to_thread( + self.client.create_feedback, **feedback, run_id=None, project_id=project_id, diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index f5cc1ae4c..f7470e92a 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -55,14 +55,23 @@ DATA_T = Union[str, uuid.UUID, Iterable[schemas.Example]] # Summary evaluator runs over the whole dataset # and reports aggregate metric(s) -SUMMARY_EVALUATOR_T = Callable[ - [Sequence[schemas.Run], Sequence[schemas.Example]], - Union[EvaluationResult, EvaluationResults], +SUMMARY_EVALUATOR_T = Union[ + Callable[ + [Sequence[schemas.Run], Sequence[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], + Callable[ + [List[schemas.Run], List[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], ] # Row-level evaluator EVALUATOR_T = Union[ RunEvaluator, - Callable[[schemas.Run, Optional[schemas.Example]], EvaluationResult], + Callable[ + [schemas.Run, Optional[schemas.Example]], + Union[EvaluationResult, EvaluationResults], + ], ] AEVALUATOR_T = Union[ Callable[ @@ -326,14 +335,8 @@ def evaluate_existing( client = client or langsmith.Client() project = _load_experiment(experiment, client) runs = _load_traces(experiment, client, load_nested=load_nested) - data = list( - client.list_examples( - dataset_id=project.reference_dataset_id, - as_of=project.metadata.get("dataset_version"), - ) - ) - runs = sorted(runs, key=lambda r: str(r.reference_example_id)) - data = sorted(data, key=lambda d: str(d.id)) + data_map = _load_examples_map(client, project) + data = [data_map[cast(uuid.UUID, run.reference_example_id)] for run in runs] return _evaluate( runs, data=data, @@ -343,6 +346,7 @@ def evaluate_existing( max_concurrency=max_concurrency, client=client, blocking=blocking, + experiment=project, ) @@ -866,6 +870,18 @@ def _load_traces( return results +def _load_examples_map( + client: langsmith.Client, project: schemas.TracerSession +) -> Dict[uuid.UUID, schemas.Example]: + return { + e.id: e + for e in client.list_examples( + dataset_id=project.reference_dataset_id, + as_of=project.metadata.get("dataset_version"), + ) + } + + IT = TypeVar("IT") @@ -1399,7 +1415,7 @@ def _wrapper_inner( def _wrapper_super_inner( runs_: str, examples_: str ) -> Union[EvaluationResult, EvaluationResults]: - return evaluator(runs, examples) + return evaluator(list(runs), list(examples)) return _wrapper_super_inner( f"Runs[] (Length={len(runs)})", f"Examples[] (Length={len(examples)})" @@ -1492,7 +1508,7 @@ def _resolve_experiment( if experiment is not None: if not experiment.name: raise ValueError("Experiment name must be defined if provided.") - return experiment, None + return experiment, runs # If we have runs, that means the experiment was already started. if runs is not None: if runs is not None: diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 88b8a7158..0a6bdc212 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -432,7 +432,7 @@ async def async_wrapper( **kwargs: Any, ) -> Any: """Async version of wrapper function.""" - run_container = await _aio_to_thread( + run_container = await aitertools.aio_to_thread( _setup_run, func, container_input=container_input, @@ -461,17 +461,19 @@ async def async_wrapper( except BaseException as e: # shield from cancellation, given we're catching all exceptions await asyncio.shield( - _aio_to_thread(_container_end, run_container, error=e) + aitertools.aio_to_thread(_container_end, run_container, error=e) ) raise e - await _aio_to_thread(_container_end, run_container, outputs=function_result) + await aitertools.aio_to_thread( + _container_end, run_container, outputs=function_result + ) return function_result @functools.wraps(func) async def async_generator_wrapper( *args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any ) -> AsyncGenerator: - run_container = await _aio_to_thread( + run_container = await aitertools.aio_to_thread( _setup_run, func, container_input=container_input, @@ -532,7 +534,7 @@ async def async_generator_wrapper( pass except BaseException as e: await asyncio.shield( - _aio_to_thread(_container_end, run_container, error=e) + aitertools.aio_to_thread(_container_end, run_container, error=e) ) raise e if results: @@ -546,7 +548,9 @@ async def async_generator_wrapper( function_result = results else: function_result = None - await _aio_to_thread(_container_end, run_container, outputs=function_result) + await aitertools.aio_to_thread( + _container_end, run_container, outputs=function_result + ) @functools.wraps(func) def wrapper( @@ -1166,20 +1170,3 @@ def _get_inputs_safe( except BaseException as e: LOGGER.debug(f"Failed to get inputs for {signature}: {e}") return {"args": args, "kwargs": kwargs} - - -# Ported from Python 3.9+ to support Python 3.8 -async def _aio_to_thread(func, /, *args, **kwargs): - """Asynchronously run function *func* in a separate thread. - - Any *args and **kwargs supplied for this function are directly passed - to *func*. Also, the current :class:`contextvars.Context` is propagated, - allowing context variables from the main thread to be accessed in the - separate thread. - - Return a coroutine that can be awaited to get the eventual result of *func*. - """ - loop = asyncio.get_running_loop() - ctx = contextvars.copy_context() - func_call = functools.partial(ctx.run, func, *args, **kwargs) - return await loop.run_in_executor(None, func_call) From 8e85f9b6433b737bf063dceee8b5c9cc7ddd47c3 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 09:51:23 -0700 Subject: [PATCH 266/373] fix tests --- python/langsmith/client.py | 21 ++++++-- .../tests/integration_tests/test_prompts.py | 50 ++++++++++++++++++- python/tests/unit_tests/test_prompts.py | 50 ------------------- 3 files changed, 66 insertions(+), 55 deletions(-) delete mode 100644 python/tests/unit_tests/test_prompts.py diff --git a/python/langsmith/client.py b/python/langsmith/client.py index ebcb0f88f..6c9b62968 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -5345,7 +5345,7 @@ def _tracing_sub_thread_func( _tracing_thread_handle_batch(client, tracing_queue, next_batch) -def convert_to_openai_format( +def convert_prompt_to_openai_format( messages: Any, stop: Optional[List[str]] = None, **kwargs: Any ) -> dict: """Convert a prompt to OpenAI format. @@ -5360,7 +5360,13 @@ def convert_to_openai_format( Returns: dict: The prompt in OpenAI format. """ - from langchain_openai import ChatOpenAI + try: + from langchain_openai import ChatOpenAI + except ImportError: + raise ImportError( + "The convert_prompt_to_openai_format function requires the langchain_openai" + "package to run.\nInstall with `pip install langchain_openai`" + ) openai = ChatOpenAI() @@ -5370,7 +5376,7 @@ def convert_to_openai_format( raise ls_utils.LangSmithError(f"Error converting to OpenAI format: {e}") -def convert_to_anthropic_format( +def convert_prompt_to_anthropic_format( messages: Any, model_name: str = "claude-2", stop: Optional[List[str]] = None, @@ -5389,7 +5395,14 @@ def convert_to_anthropic_format( Returns: dict: The prompt in Anthropic format. """ - from langchain_anthropic import ChatAnthropic + try: + from langchain_anthropic import ChatAnthropic + except ImportError: + raise ImportError( + "The convert_prompt_to_anthropic_format function requires the " + "langchain_anthropic package to run.\n" + "Install with `pip install langchain_anthropic`" + ) anthropic = ChatAnthropic( model_name=model_name, timeout=None, stop=stop, base_url=None, api_key=None diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 5e371e768..ece7f072a 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -11,7 +11,11 @@ import langsmith.schemas as ls_schemas import langsmith.utils as ls_utils -from langsmith.client import Client +from langsmith.client import ( + Client, + convert_prompt_to_anthropic_format, + convert_prompt_to_openai_format, +) @pytest.fixture @@ -134,6 +138,16 @@ def prompt_with_model() -> dict: } +@pytest.fixture +def chat_prompt_template(): + return ChatPromptTemplate.from_messages( + [ + ("system", "You are a chatbot"), + ("user", "{question}"), + ] + ) + + def test_current_tenant_is_owner(langsmith_client: Client): settings = langsmith_client._get_settings() assert langsmith_client._current_tenant_is_owner(settings["tenant_handle"]) @@ -502,3 +516,37 @@ def test_list_prompts_sorting( for name in prompt_names: langsmith_client.delete_prompt(name) + + +def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate): + invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) + + res = convert_prompt_to_openai_format( + invoked, + ) + + assert res == { + "messages": [ + {"content": "You are a chatbot", "role": "system"}, + {"content": "What is the meaning of life?", "role": "user"}, + ], + "model": "gpt-3.5-turbo", + "stream": False, + "n": 1, + "temperature": 0.7, + } + + +def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate): + invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) + + res = convert_prompt_to_anthropic_format( + invoked, + ) + + assert res == { + "model": "claude-2", + "max_tokens": 1024, + "messages": [{"role": "user", "content": "What is the meaning of life?"}], + "system": "You are a chatbot", + } diff --git a/python/tests/unit_tests/test_prompts.py b/python/tests/unit_tests/test_prompts.py deleted file mode 100644 index e88b0f67d..000000000 --- a/python/tests/unit_tests/test_prompts.py +++ /dev/null @@ -1,50 +0,0 @@ -import pytest -from langchain_core.prompts import ChatPromptTemplate - -from langsmith.client import convert_to_anthropic_format, convert_to_openai_format - - -@pytest.fixture -def chat_prompt_template(): - return ChatPromptTemplate.from_messages( - [ - ("system", "You are a chatbot"), - ("user", "{question}"), - ] - ) - - -def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate): - invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) - - res = convert_to_openai_format( - invoked, - ) - - assert res == { - "messages": [ - {"content": "You are a chatbot", "role": "system"}, - {"content": "What is the meaning of life?", "role": "user"}, - ], - "model": "gpt-3.5-turbo", - "stream": False, - "n": 1, - "temperature": 0.7, - } - - -def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate): - invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) - - res = convert_to_anthropic_format( - invoked, - ) - - print("Res: ", res) - - assert res == { - "model": "claude-2", - "max_tokens": 1024, - "messages": [{"role": "user", "content": "What is the meaning of life?"}], - "system": "You are a chatbot", - } From ff1ff50e4673a2a123f3f9e95eb89124740ab82c Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 10:16:21 -0700 Subject: [PATCH 267/373] add dependencies --- .github/workflows/python_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index 5a45962ae..98020f18e 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -44,7 +44,7 @@ jobs: - name: Install dependencies run: | poetry install --with dev,lint - poetry run pip install -U langchain langchain-core + poetry run pip install -U langchain langchain-core langchain_anthropic langchain_openai - name: Build ${{ matrix.python-version }} run: poetry build - name: Lint ${{ matrix.python-version }} From 046797dd706f862056ddaa4ed9890ec83f3dffce Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 10:18:47 -0700 Subject: [PATCH 268/373] format --- python/langsmith/wrappers/_openai.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 45aec0932..5b6798e8d 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,13 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"]["name"] += ( - chunk.function.name - ) + message["tool_calls"][index]["function"][ + "name" + ] += chunk.function.name if chunk.function.arguments: - message["tool_calls"][index]["function"]["arguments"] += ( - chunk.function.arguments - ) + message["tool_calls"][index]["function"][ + "arguments" + ] += chunk.function.arguments return { "index": choices[0].index, "finish_reason": next( From 0b58ebc3c3dfc0c24e23e2dec6b22fd89483926c Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 16 Jul 2024 10:53:07 -0700 Subject: [PATCH 269/373] Fix lint --- js/src/tests/traceable_langchain.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index 308586ffa..3d4136414 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -1,4 +1,4 @@ -import { getCurrentRunTree, traceable } from "../traceable.js"; +import { traceable } from "../traceable.js"; import { getAssumedTreeFromCalls } from "./utils/tree.js"; import { mockClient } from "./utils/mock_client.js"; import { FakeChatModel } from "@langchain/core/utils/testing"; @@ -8,7 +8,7 @@ import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain"; import { BaseMessage, HumanMessage } from "@langchain/core/messages"; import { awaitAllCallbacks } from "@langchain/core/callbacks/promises"; import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js"; -import { RunnableLambda, RunnableMap } from "@langchain/core/runnables"; +import { RunnableLambda } from "@langchain/core/runnables"; describe("to langchain", () => { const llm = new FakeChatModel({}); From d638451b61df9f7c315b19ee58b9147d0d55e98b Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 16 Jul 2024 11:51:51 -0700 Subject: [PATCH 270/373] Cleanup --- js/src/tests/traceable_langchain.test.ts | 199 +++++++++++------------ 1 file changed, 98 insertions(+), 101 deletions(-) diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index 3d4136414..5fd0ea412 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -312,107 +312,6 @@ describe("to traceable", () => { edges: [], }); }); - - test("invoke inside runnable lambda", async () => { - const { client, callSpy, langChainTracer } = mockClient(); - - const lc = RunnableLambda.from(async () => "Hello from LangChain"); - const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); - - const childA = RunnableLambda.from(async () => { - const results: string[] = []; - results.push(await lc.invoke({})); - results.push(await ls()); - return results.join("\n"); - }); - - const childB = traceable( - async () => [await lc.invoke({}), await ls()].join("\n"), - { name: "childB" } - ); - - const rootLC = RunnableLambda.from(async () => { - return [ - await childA.invoke({}, { runName: "childA" }), - await childB(), - ].join("\n"); - }); - - expect( - await rootLC.invoke( - {}, - { callbacks: [langChainTracer], runName: "rootLC" } - ) - ).toEqual( - [ - "Hello from LangChain", - "Hello from LangSmith", - "Hello from LangChain", - "Hello from LangSmith", - ].join("\n") - ); - - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "rootLC:0", - "childA:1", - "RunnableLambda:2", - "traceable:3", - "childB:4", - "RunnableLambda:5", - "traceable:6", - ], - edges: [ - ["rootLC:0", "childA:1"], - ["childA:1", "RunnableLambda:2"], - ["childA:1", "traceable:3"], - ["rootLC:0", "childB:4"], - ["childB:4", "RunnableLambda:5"], - ["childB:4", "traceable:6"], - ], - }); - - callSpy.mockClear(); - - const rootLS = traceable( - async () => { - return [ - await childA.invoke({}, { runName: "childA" }), - await childB(), - ].join("\n"); - }, - { name: "rootLS", client, tracingEnabled: true } - ); - - expect(await rootLS()).toEqual( - [ - "Hello from LangChain", - "Hello from LangSmith", - "Hello from LangChain", - "Hello from LangSmith", - ].join("\n") - ); - - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "rootLS:0", - "childA:1", - "RunnableLambda:2", - "traceable:3", - "childB:4", - "RunnableLambda:5", - "traceable:6", - ], - edges: [ - ["rootLS:0", "childA:1"], - ["childA:1", "RunnableLambda:2"], - ["childA:1", "traceable:3"], - ["rootLS:0", "childB:4"], - ["childB:4", "RunnableLambda:5"], - ["childB:4", "traceable:6"], - ], - }); - }); }); test("explicit nested", async () => { @@ -495,3 +394,101 @@ test("explicit nested", async () => { ], }); }); + +test("automatic tracing", async () => { + const { client, callSpy, langChainTracer } = mockClient(); + + const lc = RunnableLambda.from(async () => "Hello from LangChain"); + const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); + + const childA = RunnableLambda.from(async () => { + const results: string[] = []; + results.push(await lc.invoke({})); + results.push(await ls()); + return results.join("\n"); + }); + + const childB = traceable( + async () => [await lc.invoke({}), await ls()].join("\n"), + { name: "childB" } + ); + + const rootLC = RunnableLambda.from(async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }); + + expect( + await rootLC.invoke({}, { callbacks: [langChainTracer], runName: "rootLC" }) + ).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLC:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLC:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLC:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); + + callSpy.mockClear(); + + const rootLS = traceable( + async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }, + { name: "rootLS", client, tracingEnabled: true } + ); + + expect(await rootLS()).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLS:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLS:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLS:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); +}); From 77ee8a5da5b4c2242786c5b3bdbcd274f20bde9d Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 11:55:47 -0700 Subject: [PATCH 271/373] comments --- python/langsmith/client.py | 35 +++++++++++++------ python/langsmith/schemas.py | 2 +- python/langsmith/utils.py | 2 +- .../tests/integration_tests/test_prompts.py | 4 +-- 4 files changed, 29 insertions(+), 14 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 42b8c2e37..c19178e20 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4664,6 +4664,7 @@ def _evaluate_strings( **kwargs, ) + @functools.lru_cache(maxsize=1) def _get_settings(self) -> dict: """Get the settings for the current tenant. @@ -5025,7 +5026,7 @@ def update_prompt( response.raise_for_status() return response.json() - def delete_prompt(self, prompt_identifier: str) -> Any: + def delete_prompt(self, prompt_identifier: str) -> None: """Delete a prompt. Args: @@ -5042,15 +5043,14 @@ def delete_prompt(self, prompt_identifier: str) -> Any: raise self._owner_conflict_error("delete a prompt", owner) response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}") + response.raise_for_status() - return response - - def pull_prompt_object( + def pull_prompt_commit( self, prompt_identifier: str, *, include_model: Optional[bool] = False, - ) -> ls_schemas.PromptObject: + ) -> ls_schemas.PromptCommit: """Pull a prompt object from the LangSmith API. Args: @@ -5083,7 +5083,7 @@ def pull_prompt_object( f"{'?include_model=true' if include_model else ''}" ), ) - return ls_schemas.PromptObject( + return ls_schemas.PromptCommit( **{"owner": owner, "repo": prompt_name, **response.json()} ) @@ -5103,23 +5103,38 @@ def pull_prompt( try: from langchain_core.load.load import loads from langchain_core.prompts import BasePromptTemplate + from langchain_core.runnables.base import RunnableSequence except ImportError: raise ImportError( "The client.pull_prompt function requires the langchain_core" "package to run.\nInstall with `pip install langchain_core`" ) - prompt_object = self.pull_prompt_object( + prompt_object = self.pull_prompt_commit( prompt_identifier, include_model=include_model ) prompt = loads(json.dumps(prompt_object.manifest)) - if isinstance(prompt, BasePromptTemplate) or isinstance( - prompt.first, BasePromptTemplate + if ( + isinstance(prompt, BasePromptTemplate) + or isinstance(prompt, RunnableSequence) + and isinstance(prompt.first, BasePromptTemplate) ): prompt_template = ( - prompt if isinstance(prompt, BasePromptTemplate) else prompt.first + prompt + if isinstance(prompt, BasePromptTemplate) + else ( + prompt.first + if isinstance(prompt, RunnableSequence) + and isinstance(prompt.first, BasePromptTemplate) + else None + ) ) + if prompt_template is None: + raise ls_utils.LangSmithError( + "Prompt object is not a valid prompt template." + ) + if prompt_template.metadata is None: prompt_template.metadata = {} prompt_template.metadata.update( diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 8970f114d..f7eb3d955 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -746,7 +746,7 @@ def metadata(self) -> dict[str, Any]: return self.extra["metadata"] -class PromptObject(BaseModel): +class PromptCommit(BaseModel): """Represents a Prompt with a manifest. Attributes: diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 9fecdc04f..2456af92a 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -571,7 +571,7 @@ def deepish_copy(val: T) -> T: return _middle_copy(val, memo) -def is_version_greater_or_equal(current_version, target_version): +def is_version_greater_or_equal(current_version: str, target_version: str) -> bool: """Check if the current version is greater or equal to the target version.""" from packaging import version diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 5e371e768..9d3db4a2b 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -207,8 +207,8 @@ def test_pull_prompt_object( prompt_name = f"test_prompt_{uuid4().hex[:8]}" langsmith_client.push_prompt(prompt_name, object=prompt_template_1) - manifest = langsmith_client.pull_prompt_object(prompt_name) - assert isinstance(manifest, ls_schemas.PromptObject) + manifest = langsmith_client.pull_prompt_commit(prompt_name) + assert isinstance(manifest, ls_schemas.PromptCommit) assert manifest.repo == prompt_name langsmith_client.delete_prompt(prompt_name) From 96202fec3e515abc3cf47c32fbeaaf9ab549681e Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 16 Jul 2024 12:02:02 -0700 Subject: [PATCH 272/373] Use global symbol for ALS instead --- js/package.json | 2 +- js/src/singletons/traceable.ts | 17 +- js/src/tests/traceable_langchain.test.ts | 190 +++++++++++++---------- js/src/traceable.ts | 9 +- 4 files changed, 119 insertions(+), 99 deletions(-) diff --git a/js/package.json b/js/package.json index f6bb02d18..a3a2d979e 100644 --- a/js/package.json +++ b/js/package.json @@ -261,4 +261,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} diff --git a/js/src/singletons/traceable.ts b/js/src/singletons/traceable.ts index c750bc8ac..0cdd1f936 100644 --- a/js/src/singletons/traceable.ts +++ b/js/src/singletons/traceable.ts @@ -17,20 +17,21 @@ class MockAsyncLocalStorage implements AsyncLocalStorageInterface { } } -class AsyncLocalStorageProvider { - private asyncLocalStorage: AsyncLocalStorageInterface = - new MockAsyncLocalStorage(); +const TRACING_ALS_KEY = Symbol.for("ls:tracing_async_local_storage"); - private hasBeenInitialized = false; +const mockAsyncLocalStorage = new MockAsyncLocalStorage(); +class AsyncLocalStorageProvider { getInstance(): AsyncLocalStorageInterface { - return this.asyncLocalStorage; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return (globalThis as any)[TRACING_ALS_KEY] ?? mockAsyncLocalStorage; } initializeGlobalInstance(instance: AsyncLocalStorageInterface) { - if (!this.hasBeenInitialized) { - this.hasBeenInitialized = true; - this.asyncLocalStorage = instance; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + if ((globalThis as any)[TRACING_ALS_KEY] === undefined) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (globalThis as any)[TRACING_ALS_KEY] = instance; } } } diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index 5fd0ea412..a4f587388 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -395,100 +395,122 @@ test("explicit nested", async () => { }); }); -test("automatic tracing", async () => { - const { client, callSpy, langChainTracer } = mockClient(); +describe("automatic tracing", () => { + it("root langchain", async () => { + const { callSpy, langChainTracer } = mockClient(); + + const lc = RunnableLambda.from(async () => "Hello from LangChain"); + const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); + + const childA = RunnableLambda.from(async () => { + const results: string[] = []; + results.push(await lc.invoke({})); + results.push(await ls()); + return results.join("\n"); + }); - const lc = RunnableLambda.from(async () => "Hello from LangChain"); - const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); + const childB = traceable( + async () => [await lc.invoke({}), await ls()].join("\n"), + { name: "childB" } + ); - const childA = RunnableLambda.from(async () => { - const results: string[] = []; - results.push(await lc.invoke({})); - results.push(await ls()); - return results.join("\n"); - }); + const rootLC = RunnableLambda.from(async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }); - const childB = traceable( - async () => [await lc.invoke({}), await ls()].join("\n"), - { name: "childB" } - ); + expect( + await rootLC.invoke( + {}, + { callbacks: [langChainTracer], runName: "rootLC" } + ) + ).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); - const rootLC = RunnableLambda.from(async () => { - return [ - await childA.invoke({}, { runName: "childA" }), - await childB(), - ].join("\n"); + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLC:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLC:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLC:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); }); - expect( - await rootLC.invoke({}, { callbacks: [langChainTracer], runName: "rootLC" }) - ).toEqual( - [ - "Hello from LangChain", - "Hello from LangSmith", - "Hello from LangChain", - "Hello from LangSmith", - ].join("\n") - ); + it("root traceable", async () => { + const { client, callSpy } = mockClient(); - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "rootLC:0", - "childA:1", - "RunnableLambda:2", - "traceable:3", - "childB:4", - "RunnableLambda:5", - "traceable:6", - ], - edges: [ - ["rootLC:0", "childA:1"], - ["childA:1", "RunnableLambda:2"], - ["childA:1", "traceable:3"], - ["rootLC:0", "childB:4"], - ["childB:4", "RunnableLambda:5"], - ["childB:4", "traceable:6"], - ], - }); + const lc = RunnableLambda.from(async () => "Hello from LangChain"); + const ls = traceable(() => "Hello from LangSmith", { name: "traceable" }); - callSpy.mockClear(); + const childA = RunnableLambda.from(async () => { + const results: string[] = []; + results.push(await lc.invoke({})); + results.push(await ls()); + return results.join("\n"); + }); - const rootLS = traceable( - async () => { - return [ - await childA.invoke({}, { runName: "childA" }), - await childB(), - ].join("\n"); - }, - { name: "rootLS", client, tracingEnabled: true } - ); + const childB = traceable( + async () => [await lc.invoke({}), await ls()].join("\n"), + { name: "childB" } + ); - expect(await rootLS()).toEqual( - [ - "Hello from LangChain", - "Hello from LangSmith", - "Hello from LangChain", - "Hello from LangSmith", - ].join("\n") - ); + const rootLS = traceable( + async () => { + return [ + await childA.invoke({}, { runName: "childA" }), + await childB(), + ].join("\n"); + }, + { name: "rootLS", client, tracingEnabled: true } + ); - expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ - nodes: [ - "rootLS:0", - "childA:1", - "RunnableLambda:2", - "traceable:3", - "childB:4", - "RunnableLambda:5", - "traceable:6", - ], - edges: [ - ["rootLS:0", "childA:1"], - ["childA:1", "RunnableLambda:2"], - ["childA:1", "traceable:3"], - ["rootLS:0", "childB:4"], - ["childB:4", "RunnableLambda:5"], - ["childB:4", "traceable:6"], - ], + expect(await rootLS()).toEqual( + [ + "Hello from LangChain", + "Hello from LangSmith", + "Hello from LangChain", + "Hello from LangSmith", + ].join("\n") + ); + + expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({ + nodes: [ + "rootLS:0", + "childA:1", + "RunnableLambda:2", + "traceable:3", + "childB:4", + "RunnableLambda:5", + "traceable:6", + ], + edges: [ + ["rootLS:0", "childA:1"], + ["childA:1", "RunnableLambda:2"], + ["childA:1", "traceable:3"], + ["rootLS:0", "childB:4"], + ["childB:4", "RunnableLambda:5"], + ["childB:4", "traceable:6"], + ], + }); }); }); diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 54fbe7af6..384dc11f9 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -25,12 +25,9 @@ import { } from "./utils/asserts.js"; // make sure we also properly initialise the LangChain context storage -const myInstance = new AsyncLocalStorage(); -const als: AsyncLocalStorage = - (globalThis as any).__lc_tracing_async_local_storage_v2 ?? myInstance; -(globalThis as any).__lc_tracing_async_local_storage_v2 = als; - -AsyncLocalStorageProviderSingleton.initializeGlobalInstance(als); +AsyncLocalStorageProviderSingleton.initializeGlobalInstance( + new AsyncLocalStorage() +); const handleRunInputs = (rawInputs: unknown[]): KVMap => { const firstInput = rawInputs[0]; From a89b514c7cf954e85a5e632fa767fbe4c11da75f Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 12:56:58 -0700 Subject: [PATCH 273/373] cache --- python/langsmith/client.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index c19178e20..b726d926e 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -472,6 +472,7 @@ class Client: "_hide_outputs", "_info", "_write_api_urls", + "_settings", ] def __init__( @@ -614,6 +615,8 @@ def __init__( else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" ) + self._settings = None + def _repr_html_(self) -> str: """Return an HTML representation of the instance with a link to the URL. @@ -701,6 +704,18 @@ def info(self) -> ls_schemas.LangSmithInfo: self._info = ls_schemas.LangSmithInfo() return self._info + def _get_settings(self) -> dict: + """Get the settings for the current tenant. + + Returns: + dict: The settings for the current tenant. + """ + if self._settings is None: + response = self.request_with_retries("GET", "/settings") + self._settings = response.json() + + return self._settings + def request_with_retries( self, /, @@ -4664,16 +4679,6 @@ def _evaluate_strings( **kwargs, ) - @functools.lru_cache(maxsize=1) - def _get_settings(self) -> dict: - """Get the settings for the current tenant. - - Returns: - dict: The settings for the current tenant. - """ - response = self.request_with_retries("GET", "/settings") - return response.json() - def _current_tenant_is_owner(self, owner: str) -> bool: """Check if the current workspace has the same handle as owner. From cb58952a65f365ffb5d294611398ae71162edf0f Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 12:59:48 -0700 Subject: [PATCH 274/373] fix types --- python/langsmith/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 6dc454fcc..959365132 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -615,7 +615,7 @@ def __init__( else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" ) - self._settings = None + self._settings = self._get_settings() def _repr_html_(self) -> str: """Return an HTML representation of the instance with a link to the URL. From 690b1ea4d2cfcb823508b2c1287ac98d2c89daf2 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:06:50 -0700 Subject: [PATCH 275/373] Fixes (#875) Fixup: - Top level type hints to show new exports - multi-write url feedback token creation --- python/langsmith/__init__.py | 4 ++++ python/langsmith/client.py | 2 +- python/langsmith/run_trees.py | 1 + python/pyproject.toml | 2 +- 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/python/langsmith/__init__.py b/python/langsmith/__init__.py index 1af040e7c..b865c4754 100644 --- a/python/langsmith/__init__.py +++ b/python/langsmith/__init__.py @@ -16,6 +16,9 @@ tracing_context, ) from langsmith.run_trees import RunTree + from langsmith.utils import ( + ContextThreadPoolExecutor, + ) def __getattr__(name: str) -> Any: @@ -114,4 +117,5 @@ def __getattr__(name: str) -> Any: "tracing_context", "get_tracing_context", "get_current_run_tree", + "ContextThreadPoolExecutor", ] diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 1eae92745..89644a16c 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4083,7 +4083,7 @@ def req(api_url: str, api_key: Optional[str]) -> list: f"{api_url}/feedback/tokens", request_kwargs={ "data": body, - "header": { + "headers": { **self._headers, X_API_KEY: api_key or self.api_key, }, diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index c2df73964..e41e7eaa2 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -352,6 +352,7 @@ def from_runnable_config( kwargs["outputs"] = run.outputs kwargs["start_time"] = run.start_time kwargs["end_time"] = run.end_time + kwargs["tags"] = sorted(set(run.tags or [] + kwargs.get("tags", []))) extra_ = kwargs.setdefault("extra", {}) metadata_ = extra_.setdefault("metadata", {}) metadata_.update(run.metadata) diff --git a/python/pyproject.toml b/python/pyproject.toml index ec6123c5b..6a7e7cb10 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.86" +version = "0.1.87" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 667833615ca2c7b2243feafa0018693cfad44ec0 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 13:07:29 -0700 Subject: [PATCH 276/373] fix types --- python/langsmith/client.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 959365132..4011ae287 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -615,7 +615,7 @@ def __init__( else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" ) - self._settings = self._get_settings() + self._settings: Union[dict, None] = None def _repr_html_(self) -> str: """Return an HTML representation of the instance with a link to the URL. @@ -712,7 +712,8 @@ def _get_settings(self) -> dict: """ if self._settings is None: response = self.request_with_retries("GET", "/settings") - self._settings = response.json() + settings: dict = response.json() + self._settings = settings return self._settings From 2ade88b54a88a6280b520359575371c6f2b67cce Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 14:32:18 -0700 Subject: [PATCH 277/373] fixes --- python/langsmith/client.py | 83 +++++++++++-------- python/langsmith/schemas.py | 12 +++ .../tests/integration_tests/test_prompts.py | 8 +- 3 files changed, 63 insertions(+), 40 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 4011ae287..d46bd75c5 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -615,7 +615,7 @@ def __init__( else ls_utils.get_env_var("HIDE_OUTPUTS") == "true" ) - self._settings: Union[dict, None] = None + self._settings: Union[ls_schemas.LangSmithSettings, None] = None def _repr_html_(self) -> str: """Return an HTML representation of the instance with a link to the URL. @@ -704,7 +704,7 @@ def info(self) -> ls_schemas.LangSmithInfo: self._info = ls_schemas.LangSmithInfo() return self._info - def _get_settings(self) -> dict: + def _get_settings(self) -> ls_schemas.LangSmithSettings: """Get the settings for the current tenant. Returns: @@ -712,8 +712,8 @@ def _get_settings(self) -> dict: """ if self._settings is None: response = self.request_with_retries("GET", "/settings") - settings: dict = response.json() - self._settings = settings + ls_utils.raise_for_status_with_text(response) + self._settings = ls_schemas.LangSmithSettings(**response.json()) return self._settings @@ -4690,14 +4690,14 @@ def _current_tenant_is_owner(self, owner: str) -> bool: bool: True if the current tenant is the owner, False otherwise. """ settings = self._get_settings() - return owner == "-" or settings["tenant_handle"] == owner + return owner == "-" or settings.tenant_handle == owner def _owner_conflict_error( self, action: str, owner: str ) -> ls_utils.LangSmithUserError: return ls_utils.LangSmithUserError( f"Cannot {action} for another tenant.\n" - f"Current tenant: {self._get_settings()['tenant_handle']},\n" + f"Current tenant: {self._get_settings().tenant_handle},\n" f"Requested tenant: {owner}" ) @@ -4765,7 +4765,7 @@ def _get_prompt_url(self, prompt_identifier: str) -> str: settings = self._get_settings() return ( f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}" - f"?organizationId={settings['id']}" + f"?organizationId={settings.id}" ) def _prompt_exists(self, prompt_identifier: str) -> bool: @@ -4875,7 +4875,7 @@ def create_prompt( *, description: Optional[str] = None, readme: Optional[str] = None, - tags: Optional[List[str]] = None, + tags: Optional[Sequence[str]] = None, is_public: bool = False, ) -> ls_schemas.Prompt: """Create a new prompt. @@ -4886,7 +4886,7 @@ def create_prompt( prompt_name (str): The name of the prompt. description (Optional[str]): A description of the prompt. readme (Optional[str]): A readme for the prompt. - tags (Optional[List[str]]): A list of tags for the prompt. + tags (Optional[Sequence[str]]): A list of tags for the prompt. is_public (bool): Whether the prompt should be public. Defaults to False. Returns: @@ -4897,7 +4897,7 @@ def create_prompt( HTTPError: If the server request fails. """ settings = self._get_settings() - if is_public and not settings.get("tenant_handle"): + if is_public and not settings.tenant_handle: raise ls_utils.LangSmithUserError( "Cannot create a public prompt without first\n" "creating a LangChain Hub handle. " @@ -4909,7 +4909,7 @@ def create_prompt( if not self._current_tenant_is_owner(owner=owner): raise self._owner_conflict_error("create a prompt", owner) - json: Dict[str, Union[str, bool, List[str]]] = { + json: Dict[str, Union[str, bool, Sequence[str]]] = { "repo_handle": prompt_name, "description": description or "", "readme": readme or "", @@ -4926,7 +4926,7 @@ def create_commit( prompt_identifier: str, object: Any, *, - parent_commit_hash: Optional[str] = "latest", + parent_commit_hash: Optional[str] = None, ) -> str: """Create a commit for an existing prompt. @@ -4934,7 +4934,7 @@ def create_commit( prompt_identifier (str): The identifier of the prompt. object (Any): The LangChain object to commit. parent_commit_hash (Optional[str]): The hash of the parent commit. - Defaults to "latest". + Defaults to latest commit. Returns: str: The url of the prompt commit. @@ -4962,7 +4962,7 @@ def create_commit( owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier) prompt_owner_and_name = f"{owner}/{prompt_name}" - if parent_commit_hash == "latest": + if parent_commit_hash == "latest" or parent_commit_hash is None: parent_commit_hash = self._get_latest_commit_hash(prompt_owner_and_name) request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict} @@ -4980,7 +4980,7 @@ def update_prompt( *, description: Optional[str] = None, readme: Optional[str] = None, - tags: Optional[List[str]] = None, + tags: Optional[Sequence[str]] = None, is_public: Optional[bool] = None, is_archived: Optional[bool] = None, ) -> Dict[str, Any]: @@ -4992,7 +4992,7 @@ def update_prompt( prompt_identifier (str): The identifier of the prompt to update. description (Optional[str]): New description for the prompt. readme (Optional[str]): New readme for the prompt. - tags (Optional[List[str]]): New list of tags for the prompt. + tags (Optional[Sequence[str]]): New list of tags for the prompt. is_public (Optional[bool]): New public status for the prompt. is_archived (Optional[bool]): New archived status for the prompt. @@ -5004,7 +5004,7 @@ def update_prompt( HTTPError: If the server request fails. """ settings = self._get_settings() - if is_public and not settings.get("tenant_handle"): + if is_public and not settings.tenant_handle: raise ValueError( "Cannot create a public prompt without first\n" "creating a LangChain Hub handle. " @@ -5012,7 +5012,7 @@ def update_prompt( "https://smith.langchain.com/prompts" ) - json: Dict[str, Union[str, bool, List[str]]] = {} + json: Dict[str, Union[str, bool, Sequence[str]]] = {} if description is not None: json["description"] = description @@ -5158,11 +5158,11 @@ def push_prompt( prompt_identifier: str, *, object: Optional[Any] = None, - parent_commit_hash: Optional[str] = "latest", + parent_commit_hash: str = "latest", is_public: bool = False, - description: Optional[str] = "", - readme: Optional[str] = "", - tags: Optional[List[str]] = [], + description: Optional[str] = None, + readme: Optional[str] = None, + tags: Optional[Sequence[str]] = None, ) -> str: """Push a prompt to the LangSmith API. @@ -5174,14 +5174,14 @@ def push_prompt( Args: prompt_identifier (str): The identifier of the prompt. object (Optional[Any]): The LangChain object to push. - parent_commit_hash (Optional[str]): The parent commit hash. + parent_commit_hash (str): The parent commit hash. Defaults to "latest". is_public (bool): Whether the prompt should be public. Defaults to False. description (Optional[str]): A description of the prompt. Defaults to an empty string. readme (Optional[str]): A readme for the prompt. Defaults to an empty string. - tags (Optional[List[str]]): A list of tags for the prompt. + tags (Optional[Sequence[str]]): A list of tags for the prompt. Defaults to an empty list. Returns: @@ -5367,7 +5367,8 @@ def _tracing_sub_thread_func( def convert_prompt_to_openai_format( - messages: Any, stop: Optional[List[str]] = None, **kwargs: Any + messages: Any, + model_kwargs: Optional[Dict[str, Any]] = None, ) -> dict: """Convert a prompt to OpenAI format. @@ -5375,11 +5376,15 @@ def convert_prompt_to_openai_format( Args: messages (Any): The messages to convert. - stop (Optional[List[str]]): Stop sequences for the prompt. - **kwargs: Additional arguments for the conversion. + model_kwargs (Optional[Dict[str, Any]]): Model configuration arguments including + `stop` and any other required arguments. Defaults to None. Returns: dict: The prompt in OpenAI format. + + Raises: + ImportError: If the `langchain_openai` package is not installed. + ls_utils.LangSmithError: If there is an error during the conversion process. """ try: from langchain_openai import ChatOpenAI @@ -5391,17 +5396,18 @@ def convert_prompt_to_openai_format( openai = ChatOpenAI() + model_kwargs = model_kwargs or {} + stop = model_kwargs.pop("stop", None) + try: - return openai._get_request_payload(messages, stop=stop, **kwargs) + return openai._get_request_payload(messages, stop=stop, **model_kwargs) except Exception as e: raise ls_utils.LangSmithError(f"Error converting to OpenAI format: {e}") def convert_prompt_to_anthropic_format( messages: Any, - model_name: str = "claude-2", - stop: Optional[List[str]] = None, - **kwargs: Any, + model_kwargs: Optional[Dict[str, Any]] = None, ) -> dict: """Convert a prompt to Anthropic format. @@ -5409,9 +5415,9 @@ def convert_prompt_to_anthropic_format( Args: messages (Any): The messages to convert. - model_name (Optional[str]): The model name to use. Defaults to "claude-2". - stop (Optional[List[str]]): Stop sequences for the prompt. - **kwargs: Additional arguments for the conversion. + model_kwargs (Optional[Dict[str, Any]]): + Model configuration arguments including `model_name` and `stop`. + Defaults to None. Returns: dict: The prompt in Anthropic format. @@ -5425,11 +5431,16 @@ def convert_prompt_to_anthropic_format( "Install with `pip install langchain_anthropic`" ) + model_kwargs = model_kwargs or {} + model_name = model_kwargs.pop("model_name", "claude-3-haiku-20240307") + stop = model_kwargs.pop("stop", None) + timeout = model_kwargs.pop("timeout", None) + anthropic = ChatAnthropic( - model_name=model_name, timeout=None, stop=stop, base_url=None, api_key=None + model_name=model_name, timeout=timeout, stop=stop, **model_kwargs ) try: - return anthropic._get_request_payload(messages, stop=stop, **kwargs) + return anthropic._get_request_payload(messages, stop=stop) except Exception as e: raise ls_utils.LangSmithError(f"Error converting to Anthropic format: {e}") diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index f7eb3d955..1bf5787d9 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -667,6 +667,18 @@ class LangSmithInfo(BaseModel): Example.update_forward_refs() +class LangSmithSettings(BaseModel): + """Settings for the LangSmith tenant.""" + + id: str + """The ID of the tenant.""" + display_name: str + """The display name of the tenant.""" + created_at: datetime + """The creation time of the tenant.""" + tenant_handle: Optional[str] = None + + class FeedbackIngestToken(BaseModel): """Represents the schema for a feedback ingest token. diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 538d7abd7..ed47244e0 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -150,7 +150,7 @@ def chat_prompt_template(): def test_current_tenant_is_owner(langsmith_client: Client): settings = langsmith_client._get_settings() - assert langsmith_client._current_tenant_is_owner(settings["tenant_handle"]) + assert langsmith_client._current_tenant_is_owner(settings.tenant_handle) assert langsmith_client._current_tenant_is_owner("-") assert not langsmith_client._current_tenant_is_owner("non_existent_owner") @@ -244,7 +244,7 @@ def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemp assert pulled_prompt == pulled_prompt_2 # test pulling with tenant handle and name - tenant_handle = langsmith_client._get_settings()["tenant_handle"] + tenant_handle = langsmith_client._get_settings().tenant_handle pulled_prompt_3 = langsmith_client.pull_prompt(f"{tenant_handle}/{prompt_name}") assert pulled_prompt.metadata and pulled_prompt_3.metadata assert ( @@ -254,7 +254,7 @@ def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemp assert pulled_prompt_3.metadata["lc_hub_owner"] == tenant_handle # test pulling with handle, name and commit hash - tenant_handle = langsmith_client._get_settings()["tenant_handle"] + tenant_handle = langsmith_client._get_settings().tenant_handle pulled_prompt_4 = langsmith_client.pull_prompt( f"{tenant_handle}/{prompt_name}:latest" ) @@ -545,7 +545,7 @@ def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate): ) assert res == { - "model": "claude-2", + "model": "claude-3-haiku-20240307", "max_tokens": 1024, "messages": [{"role": "user", "content": "What is the meaning of life?"}], "system": "You are a chatbot", From 03328ec8eaeaef571df2b542fbedead68488a930 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 14:39:21 -0700 Subject: [PATCH 278/373] lint --- python/tests/integration_tests/test_prompts.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index ed47244e0..6a669c299 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -150,7 +150,7 @@ def chat_prompt_template(): def test_current_tenant_is_owner(langsmith_client: Client): settings = langsmith_client._get_settings() - assert langsmith_client._current_tenant_is_owner(settings.tenant_handle) + assert langsmith_client._current_tenant_is_owner(settings.tenant_handle or "-") assert langsmith_client._current_tenant_is_owner("-") assert not langsmith_client._current_tenant_is_owner("non_existent_owner") @@ -540,12 +540,10 @@ def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate): def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate): invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"}) - res = convert_prompt_to_anthropic_format( - invoked, - ) + res = convert_prompt_to_anthropic_format(invoked, {"model_name": "claude-2"}) assert res == { - "model": "claude-3-haiku-20240307", + "model": "claude-2", "max_tokens": 1024, "messages": [{"role": "user", "content": "What is the meaning of life?"}], "system": "You are a chatbot", From 977a53edf7f5a939b1c54ec251578608522417dd Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 16 Jul 2024 14:47:59 -0700 Subject: [PATCH 279/373] Update the `Symbol.for("lc:child_config")` if it's present --- js/src/langchain.ts | 18 ++++++++----- js/src/run_trees.ts | 62 ++++++++++++++++++++++++++++++++++++++------- 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index a3a4de845..2dfb8c338 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -77,12 +77,18 @@ export async function getLangchainCallbacks( } if (langChainTracer != null) { - Object.assign(langChainTracer, { - runMap, - client: runTree.client, - projectName: runTree.project_name || langChainTracer.projectName, - exampleId: runTree.reference_example_id || langChainTracer.exampleId, - }); + if (langChainTracer.updateFromRunTree) { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore @langchain/core can use a different version of LangSmith + langChainTracer.updateFromRunTree(runTree); + } else { + Object.assign(langChainTracer, { + runMap, + client: runTree.client, + projectName: runTree.project_name || langChainTracer.projectName, + exampleId: runTree.reference_example_id || langChainTracer.exampleId, + }); + } } return callbacks; diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index b1219d819..ac52f1c31 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -80,16 +80,19 @@ export interface RunnableConfigLike { interface CallbackManagerLike { handlers: TracerLike[]; getParentRunId?: () => string | undefined; + copy?: () => CallbackManagerLike; } interface TracerLike { name: string; } -interface LangChainTracerLike extends TracerLike { + +export interface LangChainTracerLike extends TracerLike { name: "langchain_tracer"; projectName: string; getRun?: (id: string) => RunTree | undefined; client: Client; + updateFromRunTree?: (runTree: RunTree) => void; } interface HeadersLike { @@ -236,6 +239,36 @@ export class RunTree implements BaseRun { child_execution_order: child_execution_order, }); + type ExtraWithSymbol = Record; + const LC_CHILD = Symbol.for("lc:child_config"); + + const presentConfig = + (config.extra as ExtraWithSymbol | undefined)?.[LC_CHILD] ?? + (this.extra as ExtraWithSymbol)[LC_CHILD]; + + // tracing for LangChain is defined by the _parentRunId and runMap of the tracer + if (isRunnableConfigLike(presentConfig)) { + const newConfig: RunnableConfigLike = { ...presentConfig }; + const callbacks: CallbackManagerLike | unknown[] | undefined = + isCallbackManagerLike(newConfig.callbacks) + ? newConfig.callbacks.copy?.() + : undefined; + + if (callbacks) { + // update the parent run id + Object.assign(callbacks, { _parentRunId: child.id }); + + // only populate if we're in a newer LC.JS version + callbacks.handlers + ?.find(isLangChainTracerLike) + ?.updateFromRunTree?.(child); + + newConfig.callbacks = callbacks; + } + + (child.extra as ExtraWithSymbol)[LC_CHILD] = newConfig; + } + // propagate child_execution_order upwards const visited = new Set(); let current: RunTree | undefined = this as RunTree; @@ -475,15 +508,26 @@ export function isRunTree(x?: unknown): x is RunTree { ); } -function containsLangChainTracerLike(x?: unknown): x is LangChainTracerLike[] { +function isLangChainTracerLike(x: unknown): x is LangChainTracerLike { return ( - Array.isArray(x) && - x.some((callback: unknown) => { - return ( - typeof (callback as LangChainTracerLike).name === "string" && - (callback as LangChainTracerLike).name === "langchain_tracer" - ); - }) + typeof x === "object" && + x != null && + typeof (x as LangChainTracerLike).name === "string" && + (x as LangChainTracerLike).name === "langchain_tracer" + ); +} + +function containsLangChainTracerLike(x: unknown): x is LangChainTracerLike[] { + return ( + Array.isArray(x) && x.some((callback) => isLangChainTracerLike(callback)) + ); +} + +export function isCallbackManagerLike(x: unknown): x is CallbackManagerLike { + return ( + typeof x === "object" && + x != null && + Array.isArray((x as CallbackManagerLike).handlers) ); } From def3dbf60cd3619460cfaa7fc6c60491bfee43c7 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 16 Jul 2024 15:09:12 -0700 Subject: [PATCH 280/373] Skip tests which depend on @langchain/core 0.2.17 --- js/src/tests/traceable_langchain.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/js/src/tests/traceable_langchain.test.ts b/js/src/tests/traceable_langchain.test.ts index a4f587388..45986dfe6 100644 --- a/js/src/tests/traceable_langchain.test.ts +++ b/js/src/tests/traceable_langchain.test.ts @@ -395,7 +395,8 @@ test("explicit nested", async () => { }); }); -describe("automatic tracing", () => { +// skip until the @langchain/core 0.2.17 is out +describe.skip("automatic tracing", () => { it("root langchain", async () => { const { callSpy, langChainTracer } = mockClient(); From 4e7bcef6e0806eef2ea0e076ca00a86bf75e505b Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 16 Jul 2024 15:20:30 -0700 Subject: [PATCH 281/373] Fix interop --- js/src/langchain.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/js/src/langchain.ts b/js/src/langchain.ts index 2dfb8c338..6eca684e7 100644 --- a/js/src/langchain.ts +++ b/js/src/langchain.ts @@ -77,7 +77,10 @@ export async function getLangchainCallbacks( } if (langChainTracer != null) { - if (langChainTracer.updateFromRunTree) { + if ( + "updateFromRunTree" in langChainTracer && + typeof langChainTracer === "function" + ) { // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore @langchain/core can use a different version of LangSmith langChainTracer.updateFromRunTree(runTree); From af33f38d7ed84294fb329b6ac2e5bdc983713ead Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 16 Jul 2024 17:11:32 -0700 Subject: [PATCH 282/373] update version --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index ec6123c5b..7efeed844 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.86" +version = "0.1.88" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 616fdedd4ac8eeeb2be0b5aa64e87716ad1db29e Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 16 Jul 2024 19:25:34 -0700 Subject: [PATCH 283/373] Remove comments --- js/src/traceable.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 384dc11f9..ee977e58f 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -24,7 +24,6 @@ import { isPromiseMethod, } from "./utils/asserts.js"; -// make sure we also properly initialise the LangChain context storage AsyncLocalStorageProviderSingleton.initializeGlobalInstance( new AsyncLocalStorage() ); @@ -477,8 +476,6 @@ export function traceable any>( onEnd(currentRunTree); } } - - // TODO: update child_execution_order of the parent run await postRunPromise; await currentRunTree?.patchRun(); } From b006c915766abde58afb4b085d979b8d02852cb0 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Tue, 16 Jul 2024 19:26:32 -0700 Subject: [PATCH 284/373] Bump to 0.1.38 --- js/package.json | 4 ++-- js/src/index.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index a3a2d979e..a93b2ad24 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.37", + "version": "0.1.38", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -261,4 +261,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/index.ts b/js/src/index.ts index 429988932..75c978d6d 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.37"; +export const __version__ = "0.1.38"; From 3ed7078877fae4c0411f1d0c0b227bac90892763 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 17 Jul 2024 11:41:40 -0700 Subject: [PATCH 285/373] fix(js): pass other traceable options in wrapSDK --- js/src/wrappers/openai.ts | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index 0ea56f882..5652ce4e0 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -280,7 +280,7 @@ export const wrapOpenAI = ( const _wrapClient = ( sdk: T, runName: string, - options?: { client?: Client } + options?: Omit ): T => { return new Proxy(sdk, { get(target, propKey, receiver) { @@ -312,6 +312,10 @@ const _wrapClient = ( }); }; +type WrapSDKOptions = Partial< + Omit & { runName: string } +>; + /** * Wrap an arbitrary SDK, enabling automatic LangSmith tracing. * Method signatures are unchanged. @@ -325,9 +329,14 @@ const _wrapClient = ( */ export const wrapSDK = ( sdk: T, - options?: { client?: Client; runName?: string } + options?: WrapSDKOptions ): T => { - return _wrapClient(sdk, options?.runName ?? sdk.constructor?.name, { - client: options?.client, - }); + const traceableOptions = options ? { ...options } : undefined; + if (traceableOptions != null) delete traceableOptions.runName; + + return _wrapClient( + sdk, + options?.runName ?? sdk.constructor?.name, + traceableOptions + ); }; From 4147a60a757ca71b8d046906cc65628e2b90a917 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 17 Jul 2024 14:12:06 -0700 Subject: [PATCH 286/373] CVar Propagation in evals (#877) --- python/langsmith/_expect.py | 9 +-- python/langsmith/_internal/_aiter.py | 13 +++- python/langsmith/_testing.py | 3 +- python/langsmith/beta/_evals.py | 5 +- python/langsmith/evaluation/_arunner.py | 56 ++++++++++-------- python/langsmith/evaluation/_runner.py | 69 +++++++++++++--------- python/langsmith/run_helpers.py | 4 +- python/pyproject.toml | 2 +- python/tests/evaluation/test_evaluation.py | 1 - 9 files changed, 93 insertions(+), 69 deletions(-) diff --git a/python/langsmith/_expect.py b/python/langsmith/_expect.py index fe459e409..967390597 100644 --- a/python/langsmith/_expect.py +++ b/python/langsmith/_expect.py @@ -46,7 +46,6 @@ def test_output_semantically_close(): from __future__ import annotations import atexit -import concurrent.futures import inspect from typing import ( TYPE_CHECKING, @@ -91,15 +90,13 @@ def __init__( client: Optional[ls_client.Client], key: str, value: Any, - _executor: Optional[concurrent.futures.ThreadPoolExecutor] = None, + _executor: Optional[ls_utils.ContextThreadPoolExecutor] = None, run_id: Optional[str] = None, ): self._client = client self.key = key self.value = value - self._executor = _executor or concurrent.futures.ThreadPoolExecutor( - max_workers=3 - ) + self._executor = _executor or ls_utils.ContextThreadPoolExecutor(max_workers=3) rt = rh.get_current_run_tree() self._run_id = rt.trace_id if rt else run_id @@ -255,7 +252,7 @@ class _Expect: def __init__(self, *, client: Optional[ls_client.Client] = None): self._client = client - self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) + self.executor = ls_utils.ContextThreadPoolExecutor(max_workers=3) atexit.register(self.executor.shutdown, wait=True) def embedding_distance( diff --git a/python/langsmith/_internal/_aiter.py b/python/langsmith/_internal/_aiter.py index 1088ed07d..a2f0701a1 100644 --- a/python/langsmith/_internal/_aiter.py +++ b/python/langsmith/_internal/_aiter.py @@ -279,8 +279,13 @@ async def process_item(item): async def process_generator(): tasks = [] + accepts_context = asyncio_accepts_context() async for item in generator: - task = asyncio.create_task(process_item(item)) + if accepts_context: + context = contextvars.copy_context() + task = asyncio.create_task(process_item(item), context=context) + else: + task = asyncio.create_task(process_item(item)) tasks.append(task) if n is not None and len(tasks) >= n: done, pending = await asyncio.wait( @@ -319,3 +324,9 @@ async def aio_to_thread(func, /, *args, **kwargs): ctx = contextvars.copy_context() func_call = functools.partial(ctx.run, func, *args, **kwargs) return await loop.run_in_executor(None, func_call) + + +@functools.lru_cache(maxsize=1) +def asyncio_accepts_context(): + """Check if the current asyncio event loop accepts a context argument.""" + return accepts_context(asyncio.create_task) diff --git a/python/langsmith/_testing.py b/python/langsmith/_testing.py index 42cec872b..3d5ac9c3b 100644 --- a/python/langsmith/_testing.py +++ b/python/langsmith/_testing.py @@ -1,7 +1,6 @@ from __future__ import annotations import atexit -import concurrent.futures import datetime import functools import inspect @@ -392,7 +391,7 @@ def __init__( self._experiment = experiment self._dataset = dataset self._version: Optional[datetime.datetime] = None - self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + self._executor = ls_utils.ContextThreadPoolExecutor(max_workers=1) atexit.register(_end_tests, self) @property diff --git a/python/langsmith/beta/_evals.py b/python/langsmith/beta/_evals.py index f41bc8785..03b099fff 100644 --- a/python/langsmith/beta/_evals.py +++ b/python/langsmith/beta/_evals.py @@ -4,7 +4,6 @@ """ import collections -import concurrent.futures import datetime import itertools import uuid @@ -218,6 +217,8 @@ def compute_test_metrics( Returns: None: This function does not return any value. """ + from langsmith import ContextThreadPoolExecutor + evaluators_: List[ls_eval.RunEvaluator] = [] for func in evaluators: if isinstance(func, ls_eval.RunEvaluator): @@ -230,7 +231,7 @@ def compute_test_metrics( ) client = client or Client() traces = _load_nested_traces(project_name, client) - with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrency) as executor: + with ContextThreadPoolExecutor(max_workers=max_concurrency) as executor: results = executor.map( client.evaluate_run, *zip(*_outer_product(traces, evaluators_)) ) diff --git a/python/langsmith/evaluation/_arunner.py b/python/langsmith/evaluation/_arunner.py index 3c4973c15..7cc50bffa 100644 --- a/python/langsmith/evaluation/_arunner.py +++ b/python/langsmith/evaluation/_arunner.py @@ -622,7 +622,12 @@ async def _arun_evaluators( **{"experiment": self.experiment_name}, } with rh.tracing_context( - **{**current_context, "project_name": "evaluators", "metadata": metadata} + **{ + **current_context, + "project_name": "evaluators", + "metadata": metadata, + "enabled": True, + } ): run = current_results["run"] example = current_results["example"] @@ -676,11 +681,11 @@ async def _aapply_summary_evaluators( **current_context, "project_name": "evaluators", "metadata": metadata, + "enabled": True, } ): for evaluator in summary_evaluators: try: - # TODO: Support async evaluators summary_eval_result = evaluator(runs, examples) flattened_results = self.client._select_eval_results( summary_eval_result, @@ -808,30 +813,31 @@ def _get_run(r: run_trees.RunTree) -> None: nonlocal run run = r - try: - await fn( - example.inputs, - langsmith_extra=rh.LangSmithExtra( - reference_example_id=example.id, - on_end=_get_run, - project_name=experiment_name, - metadata={ - **metadata, - "example_version": ( - example.modified_at.isoformat() - if example.modified_at - else example.created_at.isoformat() - ), - }, - client=client, - ), + with rh.tracing_context(enabled=True): + try: + await fn( + example.inputs, + langsmith_extra=rh.LangSmithExtra( + reference_example_id=example.id, + on_end=_get_run, + project_name=experiment_name, + metadata={ + **metadata, + "example_version": ( + example.modified_at.isoformat() + if example.modified_at + else example.created_at.isoformat() + ), + }, + client=client, + ), + ) + except Exception as e: + logger.error(f"Error running target function: {e}") + return _ForwardResults( + run=cast(schemas.Run, run), + example=example, ) - except Exception as e: - logger.error(f"Error running target function: {e}") - return _ForwardResults( - run=cast(schemas.Run, run), - example=example, - ) def _ensure_async_traceable( diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index f7470e92a..6b73c3b4f 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -689,7 +689,9 @@ def evaluate_and_submit_feedback( return result tqdm = _load_tqdm() - with cf.ThreadPoolExecutor(max_workers=max_concurrency or 1) as executor: + with ls_utils.ContextThreadPoolExecutor( + max_workers=max_concurrency or 1 + ) as executor: futures = [] for example_id, runs_list in tqdm(runs_dict.items()): results[example_id] = { @@ -1207,7 +1209,7 @@ def _predict( ) else: - with cf.ThreadPoolExecutor(max_concurrency) as executor: + with ls_utils.ContextThreadPoolExecutor(max_concurrency) as executor: futures = [ executor.submit( _forward, @@ -1239,7 +1241,12 @@ def _run_evaluators( }, } with rh.tracing_context( - **{**current_context, "project_name": "evaluators", "metadata": metadata} + **{ + **current_context, + "project_name": "evaluators", + "metadata": metadata, + "enabled": True, + } ): run = current_results["run"] example = current_results["example"] @@ -1280,10 +1287,13 @@ def _score( (e.g. from a previous prediction step) """ if max_concurrency == 0: + context = copy_context() for current_results in self.get_results(): - yield self._run_evaluators(evaluators, current_results) + yield context.run(self._run_evaluators, evaluators, current_results) else: - with cf.ThreadPoolExecutor(max_workers=max_concurrency) as executor: + with ls_utils.ContextThreadPoolExecutor( + max_workers=max_concurrency + ) as executor: futures = [] for current_results in self.get_results(): futures.append( @@ -1305,7 +1315,7 @@ def _apply_summary_evaluators( runs.append(run) examples.append(example) aggregate_feedback = [] - with cf.ThreadPoolExecutor() as executor: + with ls_utils.ContextThreadPoolExecutor() as executor: project_id = self._get_experiment().id current_context = rh.get_tracing_context() metadata = { @@ -1447,30 +1457,31 @@ def _get_run(r: run_trees.RunTree) -> None: nonlocal run run = r - try: - fn( - example.inputs, - langsmith_extra=rh.LangSmithExtra( - reference_example_id=example.id, - on_end=_get_run, - project_name=experiment_name, - metadata={ - **metadata, - "example_version": ( - example.modified_at.isoformat() - if example.modified_at - else example.created_at.isoformat() - ), - }, - client=client, - ), + with rh.tracing_context(enabled=True): + try: + fn( + example.inputs, + langsmith_extra=rh.LangSmithExtra( + reference_example_id=example.id, + on_end=_get_run, + project_name=experiment_name, + metadata={ + **metadata, + "example_version": ( + example.modified_at.isoformat() + if example.modified_at + else example.created_at.isoformat() + ), + }, + client=client, + ), + ) + except Exception as e: + logger.error(f"Error running target function: {e}") + return _ForwardResults( + run=cast(schemas.Run, run), + example=example, ) - except Exception as e: - logger.error(f"Error running target function: {e}") - return _ForwardResults( - run=cast(schemas.Run, run), - example=example, - ) def _resolve_data( diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 0a6bdc212..1e2adb087 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -442,7 +442,7 @@ async def async_wrapper( ) try: - accepts_context = aitertools.accepts_context(asyncio.create_task) + accepts_context = aitertools.asyncio_accepts_context() if func_accepts_parent_run: kwargs["run_tree"] = run_container["new_run"] if not func_accepts_config: @@ -492,7 +492,7 @@ async def async_generator_wrapper( kwargs.pop("config", None) async_gen_result = func(*args, **kwargs) # Can't iterate through if it's a coroutine - accepts_context = aitertools.accepts_context(asyncio.create_task) + accepts_context = aitertools.asyncio_accepts_context() if inspect.iscoroutine(async_gen_result): if accepts_context: async_gen_result = await asyncio.create_task( diff --git a/python/pyproject.toml b/python/pyproject.toml index 7efeed844..08132026d 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.88" +version = "0.1.89" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/evaluation/test_evaluation.py b/python/tests/evaluation/test_evaluation.py index ecb371806..e05f9e920 100644 --- a/python/tests/evaluation/test_evaluation.py +++ b/python/tests/evaluation/test_evaluation.py @@ -41,7 +41,6 @@ def predict(inputs: dict) -> dict: }, num_repetitions=3, ) - results.wait() assert len(results) == 30 examples = client.list_examples(dataset_name=dataset_name) for example in examples: From 83b004cd26bbe3e14d3516f481af15cc2c06be0b Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Wed, 17 Jul 2024 15:13:58 -0700 Subject: [PATCH 287/373] Fix lint --- js/src/wrappers/openai.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index 5652ce4e0..7870e460e 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -1,6 +1,6 @@ import { OpenAI } from "openai"; import type { APIPromise } from "openai/core"; -import type { Client, RunTreeConfig } from "../index.js"; +import type { RunTreeConfig } from "../index.js"; import { isTraceableFunction, traceable } from "../traceable.js"; // Extra leniency around types in case multiple OpenAI SDK versions get installed From 02a60d7e0cad6bc66fa4cdeb0fffd5e3f3042ca0 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 17 Jul 2024 16:42:56 -0700 Subject: [PATCH 288/373] Add context propgation of session name (#879) To trace to new projects naturally. Also makes it easier to use distributed tracing when evaluating. 1. Add project name to headers 2. Load parent using project name in headers, cvar, rtcvar, or explicitly passed in 3. Add a couple tests --- python/langsmith/run_helpers.py | 32 ++++++++++++++++--- python/langsmith/run_trees.py | 14 +++++++- python/pyproject.toml | 2 +- python/tests/integration_tests/fake_server.py | 15 +++++++-- .../test_context_propagation.py | 1 + .../tests/integration_tests/test_prompts.py | 4 +-- python/tests/unit_tests/test_run_helpers.py | 4 ++- 7 files changed, 60 insertions(+), 12 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 1e2adb087..4afa8e69e 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -685,6 +685,19 @@ def generator_wrapper( return decorator +def _get_project_name(project_name: Optional[str]) -> Optional[str]: + prt = _PARENT_RUN_TREE.get() + return ( + # Maintain tree consistency first + _PROJECT_NAME.get() + or (prt.session_name if prt else None) + # Then check the passed in value + or project_name + # fallback to the default for the environment + or utils.get_tracer_project() + ) + + @contextlib.contextmanager def trace( name: str, @@ -714,7 +727,6 @@ def trace( is_disabled = old_ctx.get("enabled", True) is False outer_tags = _TAGS.get() outer_metadata = _METADATA.get() - outer_project = _PROJECT_NAME.get() or utils.get_tracer_project() parent_run_ = _get_parent_run( {"parent": parent, "run_tree": kwargs.get("run_tree"), "client": client} ) @@ -726,7 +738,7 @@ def trace( extra_outer = extra or {} extra_outer["metadata"] = metadata - project_name_ = project_name or outer_project + project_name_ = _get_project_name(project_name) # If it's disabled, we break the tree if parent_run_ is not None and not is_disabled: new_run = parent_run_.create_child( @@ -975,12 +987,19 @@ def _get_parent_run( return parent if isinstance(parent, dict): return run_trees.RunTree.from_headers( - parent, client=langsmith_extra.get("client") + parent, + client=langsmith_extra.get("client"), + # Precedence: headers -> cvar -> explicit -> env var + project_name=_get_project_name(langsmith_extra.get("project_name")), ) if isinstance(parent, str): - return run_trees.RunTree.from_dotted_order( - parent, client=langsmith_extra.get("client") + dort = run_trees.RunTree.from_dotted_order( + parent, + client=langsmith_extra.get("client"), + # Precedence: cvar -> explicit -> env var + project_name=_get_project_name(langsmith_extra.get("project_name")), ) + return dort run_tree = langsmith_extra.get("run_tree") if run_tree: return run_tree @@ -1032,6 +1051,9 @@ def _setup_run( project_cv = _PROJECT_NAME.get() selected_project = ( project_cv # From parent trace + or ( + parent_run_.session_name if parent_run_ else None + ) # from parent run attempt 2 (not managed by traceable) or langsmith_extra.get("project_name") # at invocation time or container_input["project_name"] # at decorator time or utils.get_tracer_project() # default diff --git a/python/langsmith/run_trees.py b/python/langsmith/run_trees.py index e41e7eaa2..66887ada6 100644 --- a/python/langsmith/run_trees.py +++ b/python/langsmith/run_trees.py @@ -410,6 +410,8 @@ def from_headers(cls, headers: Dict[str, str], **kwargs: Any) -> Optional[RunTre init_args["extra"]["metadata"] = metadata tags = sorted(set(baggage.tags + init_args.get("tags", []))) init_args["tags"] = tags + if baggage.project_name: + init_args["project_name"] = baggage.project_name return RunTree(**init_args) @@ -421,6 +423,7 @@ def to_headers(self) -> Dict[str, str]: baggage = _Baggage( metadata=self.extra.get("metadata", {}), tags=self.tags, + project_name=self.session_name, ) headers["baggage"] = baggage.to_header() return headers @@ -433,10 +436,12 @@ def __init__( self, metadata: Optional[Dict[str, str]] = None, tags: Optional[List[str]] = None, + project_name: Optional[str] = None, ): """Initialize the Baggage object.""" self.metadata = metadata or {} self.tags = tags or [] + self.project_name = project_name @classmethod def from_header(cls, header_value: Optional[str]) -> _Baggage: @@ -445,6 +450,7 @@ def from_header(cls, header_value: Optional[str]) -> _Baggage: return cls() metadata = {} tags = [] + project_name = None try: for item in header_value.split(","): key, value = item.split("=", 1) @@ -452,10 +458,12 @@ def from_header(cls, header_value: Optional[str]) -> _Baggage: metadata = json.loads(urllib.parse.unquote(value)) elif key == f"{LANGSMITH_PREFIX}tags": tags = urllib.parse.unquote(value).split(",") + elif key == f"{LANGSMITH_PREFIX}project": + project_name = urllib.parse.unquote(value) except Exception as e: logger.warning(f"Error parsing baggage header: {e}") - return cls(metadata=metadata, tags=tags) + return cls(metadata=metadata, tags=tags, project_name=project_name) def to_header(self) -> str: """Return the Baggage object as a header value.""" @@ -470,6 +478,10 @@ def to_header(self) -> str: items.append( f"{LANGSMITH_PREFIX}tags={urllib.parse.quote(serialized_tags)}" ) + if self.project_name: + items.append( + f"{LANGSMITH_PREFIX}project={urllib.parse.quote(self.project_name)}" + ) return ",".join(items) diff --git a/python/pyproject.toml b/python/pyproject.toml index 08132026d..984c1b39b 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.89" +version = "0.1.90" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/integration_tests/fake_server.py b/python/tests/integration_tests/fake_server.py index 93850d9da..f42f328f2 100644 --- a/python/tests/integration_tests/fake_server.py +++ b/python/tests/integration_tests/fake_server.py @@ -14,6 +14,7 @@ def fake_function(): assert parent_run is not None assert "did-propagate" in span.tags or [] assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -25,6 +26,7 @@ def fake_function_two(foo: str): assert parent_run is not None assert "did-propagate" in (span.tags or []) assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -36,6 +38,7 @@ def fake_function_three(foo: str): assert parent_run is not None assert "did-propagate" in (span.tags or []) assert span.metadata["some-cool-value"] == 42 + assert span.session_name == "distributed-tracing" return "Fake function response" @@ -47,8 +50,16 @@ async def fake_route(request: Request): parent=request.headers, ): fake_function() - fake_function_two("foo", langsmith_extra={"parent": request.headers}) + fake_function_two( + "foo", + langsmith_extra={ + "parent": request.headers, + "project_name": "Definitely-not-your-grandpas-project", + }, + ) - with tracing_context(parent=request.headers): + with tracing_context( + parent=request.headers, project_name="Definitely-not-your-grandpas-project" + ): fake_function_three("foo") return {"message": "Fake route response"} diff --git a/python/tests/integration_tests/test_context_propagation.py b/python/tests/integration_tests/test_context_propagation.py index 32cd1f74d..096f8bb5d 100644 --- a/python/tests/integration_tests/test_context_propagation.py +++ b/python/tests/integration_tests/test_context_propagation.py @@ -54,6 +54,7 @@ async def test_tracing_fake_server(fake_server): langsmith_extra={ "metadata": {"some-cool-value": 42}, "tags": ["did-propagate"], + "project_name": "distributed-tracing", }, ) assert result["message"] == "Fake route response" diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 6a669c299..80f6e5c4c 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -524,8 +524,7 @@ def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate): res = convert_prompt_to_openai_format( invoked, ) - - assert res == { + expected = { "messages": [ {"content": "You are a chatbot", "role": "system"}, {"content": "What is the meaning of life?", "role": "user"}, @@ -535,6 +534,7 @@ def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate): "n": 1, "temperature": 0.7, } + assert {k: res[k] for k in expected.keys()} == expected def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate): diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index ee2029145..434c10bca 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -1045,6 +1045,7 @@ def my_grandchild_tool(text: str, callbacks: Any = None) -> str: run = lct.run_map[str(gc_run_id)] assert run.name == "my_grandchild_tool" assert run.run_type == "tool" + assert lct.project_name == "foo" parent_run = lct.run_map[str(run.parent_run_id)] assert parent_run assert parent_run.name == "my_traceable" @@ -1063,6 +1064,7 @@ def my_traceable(text: str) -> str: assert rt.parent_run_id assert rt.parent_run assert rt.parent_run.run_type == "tool" + assert rt.session_name == "foo" return my_grandchild_tool.invoke({"text": text}, {"run_id": gc_run_id}) @tool @@ -1071,7 +1073,7 @@ def my_tool(text: str) -> str: return my_traceable(text) mock_client = _get_mock_client() - tracer = LangChainTracer(client=mock_client) + tracer = LangChainTracer(client=mock_client, project_name="foo") my_tool.invoke({"text": "hello"}, {"callbacks": [tracer]}) From 9bf79171d7600752011784a849c4cb637b8a0234 Mon Sep 17 00:00:00 2001 From: Brian Vander Schaaf Date: Thu, 18 Jul 2024 11:08:27 -0400 Subject: [PATCH 289/373] chore: add EU API URl to docs & infer web URL --- js/README.md | 1 + js/package.json | 2 +- js/src/client.ts | 3 +++ js/src/tests/client.test.ts | 9 +++++++++ python/README.md | 1 + python/langsmith/client.py | 2 ++ python/tests/unit_tests/test_client.py | 3 +++ 7 files changed, 20 insertions(+), 1 deletion(-) diff --git a/js/README.md b/js/README.md index 9eba64647..b8d337bdd 100644 --- a/js/README.md +++ b/js/README.md @@ -53,6 +53,7 @@ Tracing can be activated by setting the following environment variables or by ma ```typescript process.env["LANGSMITH_TRACING"] = "true"; process.env["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"; +process.env["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com"; // If signed up in the EU region process.env["LANGCHAIN_API_KEY"] = ""; // process.env["LANGCHAIN_PROJECT"] = "My Project Name"; // Optional: "default" is used if not set ``` diff --git a/js/package.json b/js/package.json index f6bb02d18..a3a2d979e 100644 --- a/js/package.json +++ b/js/package.json @@ -261,4 +261,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} diff --git a/js/src/client.ts b/js/src/client.ts index 6cba0c43b..b68105c41 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -481,6 +481,9 @@ export class Client { } else if (this.apiUrl.split(".", 1)[0].includes("dev")) { this.webUrl = "https://dev.smith.langchain.com"; return this.webUrl; + } else if (this.apiUrl.split(".", 1)[0].includes("eu")) { + this.webUrl = "https://eu.smith.langchain.com"; + return this.webUrl; } else { this.webUrl = "https://smith.langchain.com"; return this.webUrl; diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index 245c9487e..000dd460b 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -115,6 +115,15 @@ describe("Client", () => { expect(result).toBe("https://dev.smith.langchain.com"); }); + it("should return 'https://eu.smith.langchain.com' if apiUrl contains 'eu'", () => { + const client = new Client({ + apiUrl: "https://eu.smith.langchain.com/api", + apiKey: "test-api-key", + }); + const result = (client as any).getHostUrl(); + expect(result).toBe("https://eu.smith.langchain.com"); + }); + it("should return 'https://smith.langchain.com' for any other apiUrl", () => { const client = new Client({ apiUrl: "https://smith.langchain.com/api", diff --git a/python/README.md b/python/README.md index 97fbfb296..85de1e11a 100644 --- a/python/README.md +++ b/python/README.md @@ -70,6 +70,7 @@ Tracing can be activated by setting the following environment variables or by ma import os os.environ["LANGSMITH_TRACING_V2"] = "true" os.environ["LANGSMITH_ENDPOINT"] = "https://api.smith.langchain.com" +# os.environ["LANGSMITH_ENDPOINT"] = "https://eu.api.smith.langchain.com" # If signed up in the EU region os.environ["LANGSMITH_API_KEY"] = "" # os.environ["LANGSMITH_PROJECT"] = "My Project Name" # Optional: "default" is used if not set ``` diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 3ddcc9df0..d37d04438 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -654,6 +654,8 @@ def _host_url(self) -> str: elif parsed_url.path.endswith("/api"): new_path = parsed_url.path.rsplit("/api", 1)[0] link = urllib_parse.urlunparse(parsed_url._replace(path=new_path)) + elif parsed_url.netloc.startswith("eu."): + link = "https://eu.smith.langchain.com" elif parsed_url.netloc.startswith("dev."): link = "https://dev.smith.langchain.com" else: diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index a653cf704..0d247d836 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -898,6 +898,9 @@ def test_host_url(_: MagicMock) -> None: client = Client(api_url="http://localhost:8000", api_key="API_KEY") assert client._host_url == "http://localhost" + client = Client(api_url="https://eu.api.smith.langchain.com", api_key="API_KEY") + assert client._host_url == "https://eu.smith.langchain.com" + client = Client(api_url="https://dev.api.smith.langchain.com", api_key="API_KEY") assert client._host_url == "https://dev.smith.langchain.com" From 7f0e26f0fb91be5d9d1c682366b4b8dd8879d186 Mon Sep 17 00:00:00 2001 From: Brian Vander Schaaf Date: Thu, 18 Jul 2024 11:11:28 -0400 Subject: [PATCH 290/373] fix readme --- js/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/README.md b/js/README.md index b8d337bdd..7aa73a1c9 100644 --- a/js/README.md +++ b/js/README.md @@ -53,7 +53,7 @@ Tracing can be activated by setting the following environment variables or by ma ```typescript process.env["LANGSMITH_TRACING"] = "true"; process.env["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"; -process.env["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com"; // If signed up in the EU region +// process.env["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com"; // If signed up in the EU region process.env["LANGCHAIN_API_KEY"] = ""; // process.env["LANGCHAIN_PROJECT"] = "My Project Name"; // Optional: "default" is used if not set ``` From 8025656980e1c6410b0ff00cb6ce4ab259ff0c7a Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 18 Jul 2024 11:31:55 -0700 Subject: [PATCH 291/373] feat(datasets): add support for bulk updating examples --- js/package.json | 2 +- js/src/client.ts | 22 +++++++ js/src/index.ts | 2 +- js/src/schemas.ts | 4 ++ js/src/tests/client.int.test.ts | 36 +++++++++- python/langsmith/client.py | 66 +++++++++++++++++++ python/pyproject.toml | 2 +- python/tests/integration_tests/test_client.py | 36 +++++++++- 8 files changed, 165 insertions(+), 5 deletions(-) diff --git a/js/package.json b/js/package.json index f6bb02d18..a93b2ad24 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.37", + "version": "0.1.38", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/client.ts b/js/src/client.ts index 6cba0c43b..75ca4a993 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -10,6 +10,7 @@ import { Example, ExampleCreate, ExampleUpdate, + ExampleUpdateWithId, Feedback, FeedbackConfig, FeedbackIngestToken, @@ -2300,6 +2301,27 @@ export class Client { return result; } + public async updateExamples(update: ExampleUpdateWithId[]): Promise { + const response = await this.caller.call( + fetch, + `${this.apiUrl}/examples/bulk`, + { + method: "PATCH", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(update), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + if (!response.ok) { + throw new Error( + `Failed to update examples: ${response.status} ${response.statusText}` + ); + } + const result = await response.json(); + return result; + } + public async listDatasetSplits({ datasetId, datasetName, diff --git a/js/src/index.ts b/js/src/index.ts index 429988932..75c978d6d 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.37"; +export const __version__ = "0.1.38"; diff --git a/js/src/schemas.ts b/js/src/schemas.ts index 6cba693ef..0f1ebc126 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -247,6 +247,10 @@ export interface ExampleUpdate { metadata?: KVMap; split?: string | string[]; } + +export interface ExampleUpdateWithId extends ExampleUpdate { + id: string; +} export interface BaseDataset { name: string; description: string; diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 55d0fc898..7275369aa 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -113,11 +113,45 @@ test.concurrent("Test LangSmith Client Dataset CRD", async () => { const newExampleValue2 = await client.readExample(example.id); expect(newExampleValue2.inputs.col1).toBe("updatedExampleCol3"); expect(newExampleValue2.metadata?.dataset_split).toStrictEqual(["my_split3"]); + + const newExample = await client.createExample( + { col1: "newAddedExampleCol1" }, + { col2: "newAddedExampleCol2" }, + { datasetId: newDataset.id } + ); + const newExampleValue_ = await client.readExample(newExample.id); + expect(newExampleValue_.inputs.col1).toBe("newAddedExampleCol1"); + expect(newExampleValue_.outputs?.col2).toBe("newAddedExampleCol2"); + + await client.updateExamples([ + { + id: newExample.id, + inputs: { col1: "newUpdatedExampleCol1" }, + outputs: { col2: "newUpdatedExampleCol2" }, + metadata: { foo: "baz" }, + }, + { + id: example.id, + inputs: { col1: "newNewUpdatedExampleCol" }, + outputs: { col2: "newNewUpdatedExampleCol2" }, + metadata: { foo: "qux" }, + }, + ]); + const updatedExample = await client.readExample(newExample.id); + expect(updatedExample.inputs.col1).toBe("newUpdatedExampleCol1"); + expect(updatedExample.outputs?.col2).toBe("newUpdatedExampleCol2"); + expect(updatedExample.metadata?.foo).toBe("baz"); + + const updatedExample2 = await client.readExample(example.id); + expect(updatedExample2.inputs.col1).toBe("newNewUpdatedExampleCol"); + expect(updatedExample2.outputs?.col2).toBe("newNewUpdatedExampleCol2"); + expect(updatedExample2.metadata?.foo).toBe("qux"); + await client.deleteExample(example.id); const examples2 = await toArray( client.listExamples({ datasetId: newDataset.id }) ); - expect(examples2.length).toBe(1); + expect(examples2.length).toBe(2); await client.deleteDataset({ datasetId }); const rawDataset = await client.createDataset(fileName, { diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 3ddcc9df0..54ed54df5 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3336,6 +3336,72 @@ def update_example( ls_utils.raise_for_status_with_text(response) return response.json() + def update_examples( + self, + *, + example_ids: Sequence[ID_TYPE], + inputs: Optional[Sequence[Optional[Dict[str, Any]]]] = None, + outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, + metadata: Optional[Sequence[Optional[Dict]]] = None, + splits: Optional[Sequence[Optional[str | List[str]]]] = None, + dataset_id: Optional[ID_TYPE] = None, + ) -> None: + """Update multiple examples. + + Parameters + ---------- + example_ids : Sequence[ID_TYPE] + The IDs of the examples to update. + inputs : Optional[Sequence[Optional[Dict[str, Any]]], default=None + The input values for the examples. + outputs : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None + The output values for the examples. + metadata : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None + The metadata for the examples. + split : Optional[Sequence[Optional[str | List[str]]]], default=None + The splits for the examples, which are divisions + of your dataset such as 'train', 'test', or 'validation'. + dataset_id : Optional[ID_TYPE], default=None + The ID of the dataset that contains the examples. + + Returns: + ------- + None + """ + examples = [ + { + "id": id_, + "inputs": in_, + "outputs": out_, + "dataset_id": dataset_id_, + "metadata": metadata_, + "split": split_, + } + for id_, in_, out_, metadata_, split_, dataset_id_ in zip( + example_ids, + inputs or [None] * len(example_ids), + outputs or [None] * len(example_ids), + metadata or [None] * len(example_ids), + splits or [None] * len(example_ids), + [dataset_id] * len(example_ids) or [None] * len(example_ids), + ) + ] + response = self.request_with_retries( + "PATCH", + "/examples/bulk", + headers={**self._headers, "Content-Type": "application/json"}, + data=( + _dumps_json( + [ + {k: v for k, v in example.items() if v is not None} + for example in examples + ] + ) + ), + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + def delete_example(self, example_id: ID_TYPE) -> None: """Delete an example by ID. diff --git a/python/pyproject.toml b/python/pyproject.toml index 7efeed844..08132026d 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.88" +version = "0.1.89" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index f706407ab..ea01b257c 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -98,11 +98,45 @@ def test_datasets(langchain_client: Client) -> None: assert updated_example_value.outputs["col2"] == "updatedExampleCol2" assert (updated_example_value.metadata or {}).get("foo") == "bar" + new_example = langchain_client.create_example( + inputs={"col1": "newAddedExampleCol1"}, + outputs={"col2": "newAddedExampleCol2"}, + dataset_id=new_dataset.id, + ) + example_value = langchain_client.read_example(new_example.id) + assert example_value.inputs is not None + assert example_value.inputs["col1"] == "newAddedExampleCol1" + assert example_value.outputs is not None + assert example_value.outputs["col2"] == "newAddedExampleCol2" + + langchain_client.update_examples( + example_ids=[new_example.id, example.id], + inputs=[{"col1": "newUpdatedExampleCol1"}, {"col1": "newNewUpdatedExampleCol"}], + outputs=[ + {"col2": "newUpdatedExampleCol2"}, + {"col2": "newNewUpdatedExampleCol2"}, + ], + metadata=[{"foo": "baz"}, {"foo": "qux"}], + ) + updated_example = langchain_client.read_example(new_example.id) + assert updated_example.id == new_example.id + assert updated_example.inputs["col1"] == "newUpdatedExampleCol1" + assert updated_example.outputs is not None + assert updated_example.outputs["col2"] == "newUpdatedExampleCol2" + assert (updated_example.metadata or {}).get("foo") == "baz" + + updated_example = langchain_client.read_example(example.id) + assert updated_example.id == example.id + assert updated_example.inputs["col1"] == "newNewUpdatedExampleCol" + assert updated_example.outputs is not None + assert updated_example.outputs["col2"] == "newNewUpdatedExampleCol2" + assert (updated_example.metadata or {}).get("foo") == "qux" + langchain_client.delete_example(example.id) examples2 = list( langchain_client.list_examples(dataset_id=new_dataset.id) # type: ignore ) - assert len(examples2) == 1 + assert len(examples2) == 2 langchain_client.delete_dataset(dataset_id=dataset_id) From c84e0ba051fa33321c3eb55d000494d76bd174ac Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 18 Jul 2024 11:33:20 -0700 Subject: [PATCH 292/373] bump version --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index 984c1b39b..f2e871815 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.90" +version = "0.1.91" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 17f031ac847d10b21b87b3821f499fabee57f0df Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 18 Jul 2024 11:56:43 -0700 Subject: [PATCH 293/373] Update client.py --- python/langsmith/client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 54ed54df5..1df0b2296 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3344,7 +3344,7 @@ def update_examples( outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None, metadata: Optional[Sequence[Optional[Dict]]] = None, splits: Optional[Sequence[Optional[str | List[str]]]] = None, - dataset_id: Optional[ID_TYPE] = None, + dataset_ids: Optional[Sequence[Optional[ID_TYPE]]] = None, ) -> None: """Update multiple examples. @@ -3361,8 +3361,8 @@ def update_examples( split : Optional[Sequence[Optional[str | List[str]]]], default=None The splits for the examples, which are divisions of your dataset such as 'train', 'test', or 'validation'. - dataset_id : Optional[ID_TYPE], default=None - The ID of the dataset that contains the examples. + dataset_ids : Optional[Sequence[Optional[ID_TYPE]]], default=None + The IDs of the datasets to move the examples to. Returns: ------- @@ -3383,7 +3383,7 @@ def update_examples( outputs or [None] * len(example_ids), metadata or [None] * len(example_ids), splits or [None] * len(example_ids), - [dataset_id] * len(example_ids) or [None] * len(example_ids), + dataset_ids or [None] * len(example_ids), ) ] response = self.request_with_retries( From c563ae80b8bb65408572b65e7d35fe5bab68c219 Mon Sep 17 00:00:00 2001 From: jacoblee93 Date: Thu, 18 Jul 2024 12:16:21 -0700 Subject: [PATCH 294/373] Format --- js/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/package.json b/js/package.json index a93b2ad24..45d3394fe 100644 --- a/js/package.json +++ b/js/package.json @@ -261,4 +261,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} From a04867710ed034621647c67034eb8fa3b9e1a996 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 18 Jul 2024 12:24:17 -0700 Subject: [PATCH 295/373] Avoid reexporting --- js/src/run_trees.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/js/src/run_trees.ts b/js/src/run_trees.ts index ac52f1c31..4427305e0 100644 --- a/js/src/run_trees.ts +++ b/js/src/run_trees.ts @@ -87,7 +87,7 @@ interface TracerLike { name: string; } -export interface LangChainTracerLike extends TracerLike { +interface LangChainTracerLike extends TracerLike { name: "langchain_tracer"; projectName: string; getRun?: (id: string) => RunTree | undefined; @@ -523,7 +523,7 @@ function containsLangChainTracerLike(x: unknown): x is LangChainTracerLike[] { ); } -export function isCallbackManagerLike(x: unknown): x is CallbackManagerLike { +function isCallbackManagerLike(x: unknown): x is CallbackManagerLike { return ( typeof x === "object" && x != null && From 79f2bc5b9a55a58e3f316dc444278991f7eff35b Mon Sep 17 00:00:00 2001 From: Brian Vander Schaaf Date: Thu, 18 Jul 2024 11:08:27 -0400 Subject: [PATCH 296/373] chore: add EU API URl to docs & infer web URL --- js/README.md | 1 + js/src/client.ts | 3 +++ js/src/tests/client.test.ts | 9 +++++++++ python/README.md | 1 + python/langsmith/client.py | 2 ++ python/tests/unit_tests/test_client.py | 3 +++ 6 files changed, 19 insertions(+) diff --git a/js/README.md b/js/README.md index 9eba64647..b8d337bdd 100644 --- a/js/README.md +++ b/js/README.md @@ -53,6 +53,7 @@ Tracing can be activated by setting the following environment variables or by ma ```typescript process.env["LANGSMITH_TRACING"] = "true"; process.env["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"; +process.env["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com"; // If signed up in the EU region process.env["LANGCHAIN_API_KEY"] = ""; // process.env["LANGCHAIN_PROJECT"] = "My Project Name"; // Optional: "default" is used if not set ``` diff --git a/js/src/client.ts b/js/src/client.ts index 6cba0c43b..b68105c41 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -481,6 +481,9 @@ export class Client { } else if (this.apiUrl.split(".", 1)[0].includes("dev")) { this.webUrl = "https://dev.smith.langchain.com"; return this.webUrl; + } else if (this.apiUrl.split(".", 1)[0].includes("eu")) { + this.webUrl = "https://eu.smith.langchain.com"; + return this.webUrl; } else { this.webUrl = "https://smith.langchain.com"; return this.webUrl; diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index 245c9487e..000dd460b 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -115,6 +115,15 @@ describe("Client", () => { expect(result).toBe("https://dev.smith.langchain.com"); }); + it("should return 'https://eu.smith.langchain.com' if apiUrl contains 'eu'", () => { + const client = new Client({ + apiUrl: "https://eu.smith.langchain.com/api", + apiKey: "test-api-key", + }); + const result = (client as any).getHostUrl(); + expect(result).toBe("https://eu.smith.langchain.com"); + }); + it("should return 'https://smith.langchain.com' for any other apiUrl", () => { const client = new Client({ apiUrl: "https://smith.langchain.com/api", diff --git a/python/README.md b/python/README.md index 97fbfb296..85de1e11a 100644 --- a/python/README.md +++ b/python/README.md @@ -70,6 +70,7 @@ Tracing can be activated by setting the following environment variables or by ma import os os.environ["LANGSMITH_TRACING_V2"] = "true" os.environ["LANGSMITH_ENDPOINT"] = "https://api.smith.langchain.com" +# os.environ["LANGSMITH_ENDPOINT"] = "https://eu.api.smith.langchain.com" # If signed up in the EU region os.environ["LANGSMITH_API_KEY"] = "" # os.environ["LANGSMITH_PROJECT"] = "My Project Name" # Optional: "default" is used if not set ``` diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 3ddcc9df0..d37d04438 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -654,6 +654,8 @@ def _host_url(self) -> str: elif parsed_url.path.endswith("/api"): new_path = parsed_url.path.rsplit("/api", 1)[0] link = urllib_parse.urlunparse(parsed_url._replace(path=new_path)) + elif parsed_url.netloc.startswith("eu."): + link = "https://eu.smith.langchain.com" elif parsed_url.netloc.startswith("dev."): link = "https://dev.smith.langchain.com" else: diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index a653cf704..0d247d836 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -898,6 +898,9 @@ def test_host_url(_: MagicMock) -> None: client = Client(api_url="http://localhost:8000", api_key="API_KEY") assert client._host_url == "http://localhost" + client = Client(api_url="https://eu.api.smith.langchain.com", api_key="API_KEY") + assert client._host_url == "https://eu.smith.langchain.com" + client = Client(api_url="https://dev.api.smith.langchain.com", api_key="API_KEY") assert client._host_url == "https://dev.smith.langchain.com" From a502116a9a98eb6737d27354533f8632961e4f36 Mon Sep 17 00:00:00 2001 From: Brian Vander Schaaf Date: Thu, 18 Jul 2024 11:11:28 -0400 Subject: [PATCH 297/373] fix readme --- js/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/README.md b/js/README.md index b8d337bdd..7aa73a1c9 100644 --- a/js/README.md +++ b/js/README.md @@ -53,7 +53,7 @@ Tracing can be activated by setting the following environment variables or by ma ```typescript process.env["LANGSMITH_TRACING"] = "true"; process.env["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"; -process.env["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com"; // If signed up in the EU region +// process.env["LANGCHAIN_ENDPOINT"] = "https://eu.api.smith.langchain.com"; // If signed up in the EU region process.env["LANGCHAIN_API_KEY"] = ""; // process.env["LANGCHAIN_PROJECT"] = "My Project Name"; // Optional: "default" is used if not set ``` From 902007218e231cd990b0e5ebf0680e03d57ebcd0 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 18 Jul 2024 13:35:54 -0700 Subject: [PATCH 298/373] fix(datasets): fix return value of update_examples --- python/langsmith/client.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index f6703e714..203527392 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3347,7 +3347,7 @@ def update_examples( metadata: Optional[Sequence[Optional[Dict]]] = None, splits: Optional[Sequence[Optional[str | List[str]]]] = None, dataset_ids: Optional[Sequence[Optional[ID_TYPE]]] = None, - ) -> None: + ) -> Dict[str, Any]: """Update multiple examples. Parameters @@ -3368,7 +3368,8 @@ def update_examples( Returns: ------- - None + Dict[str, Any] + The response from the server (specifies the number of examples updated). """ examples = [ { From c249862936961ea3287122f86ac5a237cab16089 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Thu, 18 Jul 2024 13:36:42 -0700 Subject: [PATCH 299/373] Update pyproject.toml --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index f2e871815..3ed59d26f 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.91" +version = "0.1.92" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 726387df50baf2104cd807165a52734535cadf9f Mon Sep 17 00:00:00 2001 From: Ankush Gola Date: Thu, 18 Jul 2024 17:59:52 -0700 Subject: [PATCH 300/373] add optional explanation_description --- python/langsmith/evaluation/llm_evaluator.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py index 1b2d39cfc..d0ef4fec3 100644 --- a/python/langsmith/evaluation/llm_evaluator.py +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -4,6 +4,7 @@ from pydantic import BaseModel +import langsmith.beta._utils as beta_utils from langsmith.evaluation import EvaluationResult, EvaluationResults, RunEvaluator from langsmith.schemas import Example, Run @@ -15,6 +16,7 @@ class CategoricalScoreConfig(BaseModel): choices: List[str] description: str include_explanation: bool = False + explanation_description: Optional[str] = None class ContinuousScoreConfig(BaseModel): @@ -25,6 +27,7 @@ class ContinuousScoreConfig(BaseModel): max: float = 1 description: str include_explanation: bool = False + explanation_description: Optional[str] = None def _create_score_json_schema( @@ -52,7 +55,11 @@ def _create_score_json_schema( if score_config.include_explanation: properties["explanation"] = { "type": "string", - "description": "The explanation for the score.", + "description": ( + "The explanation for the score." + if score_config.explanation_description is None + else score_config.explanation_description + ), } return { @@ -194,6 +201,7 @@ def _initialize( chat_model = chat_model.with_structured_output(self.score_schema) self.runnable = self.prompt | chat_model + @beta_utils.warn_beta def evaluate_run( self, run: Run, example: Optional[Example] = None ) -> Union[EvaluationResult, EvaluationResults]: @@ -202,6 +210,7 @@ def evaluate_run( output: dict = cast(dict, self.runnable.invoke(variables)) return self._parse_output(output) + @beta_utils.warn_beta async def aevaluate_run( self, run: Run, example: Optional[Example] = None ) -> Union[EvaluationResult, EvaluationResults]: From 9ba160d95ea8e3c7a4c52fcae84c62f361598a65 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 18 Jul 2024 23:04:21 -0700 Subject: [PATCH 301/373] Upgrade devDeps --- js/package.json | 11 +++---- js/yarn.lock | 81 ++++++++++++++++++++++++++++--------------------- 2 files changed, 50 insertions(+), 42 deletions(-) diff --git a/js/package.json b/js/package.json index f6bb02d18..8d988da81 100644 --- a/js/package.json +++ b/js/package.json @@ -103,9 +103,9 @@ "@babel/preset-env": "^7.22.4", "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", - "langchain": "^0.2.0", - "@langchain/core": "^0.2.0", - "@langchain/langgraph": "^0.0.19", + "langchain": "^0.2.10", + "@langchain/core": "^0.2.17", + "@langchain/langgraph": "^0.0.29", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.59.8", @@ -141,9 +141,6 @@ "optional": true } }, - "resolutions": { - "@langchain/core": "0.2.0" - }, "lint-staged": { "**/*.{ts,tsx}": [ "prettier --write --ignore-unknown", @@ -261,4 +258,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} diff --git a/js/yarn.lock b/js/yarn.lock index 8e4cee5e8..cf459cb03 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -1300,40 +1300,41 @@ "@jridgewell/resolve-uri" "3.1.0" "@jridgewell/sourcemap-codec" "1.4.14" -"@langchain/core@0.2.0", "@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>0.1.56 <0.3.0", "@langchain/core@^0.1.61", "@langchain/core@^0.2.0", "@langchain/core@~0.2.0": - version "0.2.0" - resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.0.tgz#19c6374a5ad80daf8e14cb58582bc988109a1403" - integrity sha512-UbCJUp9eh2JXd9AW/vhPbTgtZoMgTqJgSan5Wf/EP27X8JM65lWdCOpJW+gHyBXvabbyrZz3/EGaptTUL5gutw== +"@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>=0.2.11 <0.3.0", "@langchain/core@>=0.2.16 <0.3.0", "@langchain/core@^0.2.17": + version "0.2.17" + resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.17.tgz#dfd44a2ccf79cef88ba765741a1c277bc22e483f" + integrity sha512-WnFiZ7R/ZUVeHO2IgcSL7Tu+CjApa26Iy99THJP5fax/NF8UQCc/ZRcw2Sb/RUuRPVm6ALDass0fSQE1L9YNJg== dependencies: ansi-styles "^5.0.0" camelcase "6" decamelize "1.2.0" js-tiktoken "^1.0.12" - langsmith "~0.1.7" + langsmith "~0.1.30" ml-distance "^4.0.0" mustache "^4.2.0" p-queue "^6.6.2" p-retry "4" - uuid "^9.0.0" + uuid "^10.0.0" zod "^3.22.4" zod-to-json-schema "^3.22.3" -"@langchain/langgraph@^0.0.19": - version "0.0.19" - resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.19.tgz#c1cfeee7d0e2b91dd31cba7144f8a7283babc61d" - integrity sha512-V0t40qbwUyzEpL3Q0jHPVTVljdLc3YJCHIF9Q+sw9HRWwfBO1nWJHHbCxgVzeJ2NsX1X/dUyNkq8LbSEsTYpTQ== +"@langchain/langgraph@^0.0.29": + version "0.0.29" + resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.29.tgz#eda31d101e7a75981e0929661c41ab2461ff8640" + integrity sha512-BSFFJarkXqrMdH9yH6AIiBCw4ww0VsXXpBwqaw+9/7iulW0pBFRSkWXHjEYnmsdCRgyIxoP8vYQAQ8Jtu3qzZA== dependencies: - "@langchain/core" "^0.1.61" - uuid "^9.0.1" + "@langchain/core" ">=0.2.16 <0.3.0" + uuid "^10.0.0" + zod "^3.23.8" -"@langchain/openai@~0.0.28": - version "0.0.33" - resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.0.33.tgz#af88d815ff0095018c879d3a1a5a32b2795b5c69" - integrity sha512-hTBo9y9bHtFvMT5ySBW7TrmKhLSA91iNahigeqAFBVrLmBDz+6rzzLFc1mpq6JEAR3fZKdaUXqso3nB23jIpTw== +"@langchain/openai@>=0.1.0 <0.3.0": + version "0.2.4" + resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.2.4.tgz#02d210d2aacdaf654bceb686b3ec49517fb3b1ea" + integrity sha512-PQGmnnKbsC8odwjGbYf2aHAQEZ/uVXYtXqKnwk7BTVMZlFnt+Rt9eigp940xMKAadxHzqtKJpSd7Xf6G+LI6KA== dependencies: - "@langchain/core" ">0.1.56 <0.3.0" + "@langchain/core" ">=0.2.16 <0.3.0" js-tiktoken "^1.0.12" - openai "^4.41.1" + openai "^4.49.1" zod "^3.22.4" zod-to-json-schema "^3.22.3" @@ -3488,24 +3489,24 @@ kleur@^3.0.3: resolved "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== -langchain@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.2.0.tgz#555d84538962720cd7223f6c3ca4bd060978ebf3" - integrity sha512-8c7Dg9OIPk4lFIQGyfOytXbUGLLSsxs9MV53cLODspkOGzaUpwy5FGBie30SrOxIEFJo+FDaJgpDAFO3Xi4NMw== +langchain@^0.2.10: + version "0.2.10" + resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.2.10.tgz#35b74038e54650efbd9fe7d9d59765fe2790bb47" + integrity sha512-i0fC+RlX/6w6HKPWL3N5zrhrkijvpe2Xu4t/qbWzq4uFf8WBfPwmNFom3RtO2RatuPnHLm8mViU6nw8YBDiVwA== dependencies: - "@langchain/core" "~0.2.0" - "@langchain/openai" "~0.0.28" + "@langchain/core" ">=0.2.11 <0.3.0" + "@langchain/openai" ">=0.1.0 <0.3.0" "@langchain/textsplitters" "~0.0.0" binary-extensions "^2.2.0" js-tiktoken "^1.0.12" js-yaml "^4.1.0" jsonpointer "^5.0.1" langchainhub "~0.0.8" - langsmith "~0.1.7" + langsmith "~0.1.30" ml-distance "^4.0.0" openapi-types "^12.1.3" p-retry "4" - uuid "^9.0.0" + uuid "^10.0.0" yaml "^2.2.1" zod "^3.22.4" zod-to-json-schema "^3.22.3" @@ -3515,10 +3516,10 @@ langchainhub@~0.0.8: resolved "https://registry.yarnpkg.com/langchainhub/-/langchainhub-0.0.10.tgz#7579440a3255d67571b7046f3910593c5664f064" integrity sha512-mOVso7TGTMSlvTTUR1b4zUIMtu8zgie/pcwRm1SeooWwuHYMQovoNXjT6gEjvWEZ6cjt4gVH+1lu2tp1/phyIQ== -langsmith@~0.1.7: - version "0.1.25" - resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.25.tgz#3d06b6fc62abb1a6fc16540d40ddb48bd795f128" - integrity sha512-Hft4Y1yoMgFgCUXVQklRZ7ndmLQ/6FmRZE9P3u5BRdMq5Fa0hpg8R7jd7bLLBXkAjqcFvWo0AGhpb8MMY5FAiA== +langsmith@~0.1.30: + version "0.1.38" + resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.38.tgz#51c50db3110ffff15f522d0486dbeb069c82ca45" + integrity sha512-h8UHgvtGzIoo/52oN7gZlAPP+7FREFnZYFJ7HSPOYej9DE/yQMg6qjgIn9RwjhUgWWQlmvRN6fM3kqbCCDX5EQ== dependencies: "@types/uuid" "^9.0.1" commander "^10.0.1" @@ -3796,10 +3797,10 @@ onetime@^5.1.2: dependencies: mimic-fn "^2.1.0" -openai@^4.38.5, openai@^4.41.1: - version "4.47.1" - resolved "https://registry.yarnpkg.com/openai/-/openai-4.47.1.tgz#1d23c7a8eb3d7bcdc69709cd905f4c9af0181dba" - integrity sha512-WWSxhC/69ZhYWxH/OBsLEirIjUcfpQ5+ihkXKp06hmeYXgBBIUCa9IptMzYx6NdkiOCsSGYCnTIsxaic3AjRCQ== +openai@^4.38.5, openai@^4.49.1: + version "4.52.7" + resolved "https://registry.yarnpkg.com/openai/-/openai-4.52.7.tgz#e32b000142287a9e8eda8512ba28df33d11ec1f1" + integrity sha512-dgxA6UZHary6NXUHEDj5TWt8ogv0+ibH+b4pT5RrWMjiRZVylNwLcw/2ubDrX5n0oUmHX/ZgudMJeemxzOvz7A== dependencies: "@types/node" "^18.11.18" "@types/node-fetch" "^2.6.4" @@ -4477,7 +4478,12 @@ uri-js@^4.2.2: dependencies: punycode "^2.1.0" -uuid@^9.0.0, uuid@^9.0.1: +uuid@^10.0.0: + version "10.0.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-10.0.0.tgz#5a95aa454e6e002725c79055fd42aaba30ca6294" + integrity sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ== + +uuid@^9.0.0: version "9.0.1" resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== @@ -4640,3 +4646,8 @@ zod@^3.22.4: version "3.22.4" resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.4.tgz#f31c3a9386f61b1f228af56faa9255e845cf3fff" integrity sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg== + +zod@^3.23.8: + version "3.23.8" + resolved "https://registry.yarnpkg.com/zod/-/zod-3.23.8.tgz#e37b957b5d52079769fb8097099b592f0ef4067d" + integrity sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g== From e7214ecd193b05fa145d5476dbcb5052a92692c7 Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Thu, 18 Jul 2024 23:05:03 -0700 Subject: [PATCH 302/373] Deprecate runName --- js/src/wrappers/openai.ts | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index 7870e460e..ba898f131 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -288,10 +288,9 @@ const _wrapClient = ( if (typeof originalValue === "function") { return traceable( originalValue.bind(target), - Object.assign( - { name: [runName, propKey.toString()].join("."), run_type: "llm" }, - options - ) + Object.assign({ run_type: "llm" }, options, { + name: [runName, propKey.toString()].join("."), + }) ); } else if ( originalValue != null && @@ -313,7 +312,12 @@ const _wrapClient = ( }; type WrapSDKOptions = Partial< - Omit & { runName: string } + RunTreeConfig & { + /** + * @deprecated Use `name` instead. + */ + runName: string; + } >; /** @@ -332,11 +336,14 @@ export const wrapSDK = ( options?: WrapSDKOptions ): T => { const traceableOptions = options ? { ...options } : undefined; - if (traceableOptions != null) delete traceableOptions.runName; + if (traceableOptions != null) { + delete traceableOptions.runName; + delete traceableOptions.name; + } return _wrapClient( sdk, - options?.runName ?? sdk.constructor?.name, + options?.name ?? options?.runName ?? sdk.constructor?.name, traceableOptions ); }; From 6b094bdb02c30ec05f748e655b80037de4341d0e Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Fri, 19 Jul 2024 11:35:01 -0700 Subject: [PATCH 303/373] Fix doctest (#888) --- python/langsmith/evaluation/_runner.py | 2 ++ python/langsmith/evaluation/integrations/_langchain.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/python/langsmith/evaluation/_runner.py b/python/langsmith/evaluation/_runner.py index 6b73c3b4f..f85dcc482 100644 --- a/python/langsmith/evaluation/_runner.py +++ b/python/langsmith/evaluation/_runner.py @@ -199,6 +199,7 @@ def evaluate( Using the `evaluate` API with an off-the-shelf LangChain evaluator: >>> from langsmith.evaluation import LangChainStringEvaluator + >>> from langchain_openai import ChatOpenAI >>> def prepare_criteria_data(run: Run, example: Example): ... return { ... "prediction": run.outputs["output"], @@ -218,6 +219,7 @@ def evaluate( ... "usefulness": "The prediction is useful if it is correct" ... " and/or asks a useful followup question." ... }, + ... "llm": ChatOpenAI(model="gpt-4o"), ... }, ... prepare_data=prepare_criteria_data, ... ), diff --git a/python/langsmith/evaluation/integrations/_langchain.py b/python/langsmith/evaluation/integrations/_langchain.py index 510e79c12..9478ef653 100644 --- a/python/langsmith/evaluation/integrations/_langchain.py +++ b/python/langsmith/evaluation/integrations/_langchain.py @@ -44,6 +44,7 @@ class LangChainStringEvaluator: Converting a LangChainStringEvaluator to a RunEvaluator: >>> from langsmith.evaluation import LangChainStringEvaluator + >>> from langchain_openai import ChatOpenAI >>> evaluator = LangChainStringEvaluator( ... "criteria", ... config={ @@ -51,6 +52,7 @@ class LangChainStringEvaluator: ... "usefulness": "The prediction is useful if" ... " it is correct and/or asks a useful followup question." ... }, + ... "llm": ChatOpenAI(model="gpt-4o"), ... }, ... ) >>> run_evaluator = evaluator.as_run_evaluator() @@ -111,6 +113,7 @@ class LangChainStringEvaluator: ... "accuracy": "Score 1: Completely inaccurate\nScore 5: Somewhat accurate\nScore 10: Completely accurate" ... }, ... "normalize_by": 10, + ... "llm": ChatAnthropic(model="claude-3-opus-20240229"), ... }, ... prepare_data=prepare_data, ... ) From 8773ab7ac670806b8e4b457156d61584e8df46e2 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Fri, 19 Jul 2024 17:45:27 -0700 Subject: [PATCH 304/373] Add run stats endpoints (#890) --- .github/workflows/js_test.yml | 4 +- js/package.json | 4 +- js/src/client.ts | 88 ++++++++++++++++++ js/src/index.ts | 2 +- js/src/tests/client.int.test.ts | 9 ++ python/langsmith/client.py | 91 +++++++++++++++++++ python/tests/integration_tests/test_client.py | 7 ++ python/tests/integration_tests/test_runs.py | 72 ++++++++++----- 8 files changed, 248 insertions(+), 29 deletions(-) diff --git a/.github/workflows/js_test.yml b/.github/workflows/js_test.yml index 172ed9034..1778178cc 100644 --- a/.github/workflows/js_test.yml +++ b/.github/workflows/js_test.yml @@ -81,7 +81,7 @@ jobs: strategy: matrix: os: [ubuntu-latest] - node-version: [18.x, 19.x, 20.x, 21.x, 22.x] + node-version: [18.x, 20.x, "22.4.1"] # See Node.js release schedule at https://nodejs.org/en/about/releases/ include: - os: windows-latest @@ -107,4 +107,4 @@ jobs: - name: Check version run: yarn run check-version - name: Test - run: yarn run test \ No newline at end of file + run: yarn run test diff --git a/js/package.json b/js/package.json index 45d3394fe..90e3086bb 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.38", + "version": "0.1.39", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -261,4 +261,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/client.ts b/js/src/client.ts index 05752d578..14746a17a 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -1229,6 +1229,94 @@ export class Client { } } + public async getRunStats({ + id, + trace, + parentRun, + runType, + projectNames, + projectIds, + referenceExampleIds, + startTime, + endTime, + error, + query, + filter, + traceFilter, + treeFilter, + isRoot, + dataSourceType, + }: { + id?: string[]; + trace?: string; + parentRun?: string; + runType?: string; + projectNames?: string[]; + projectIds?: string[]; + referenceExampleIds?: string[]; + startTime?: string; + endTime?: string; + error?: boolean; + query?: string; + filter?: string; + traceFilter?: string; + treeFilter?: string; + isRoot?: boolean; + dataSourceType?: string; + }): Promise { + let projectIds_ = projectIds || []; + if (projectNames) { + projectIds_ = [ + ...(projectIds || []), + ...(await Promise.all( + projectNames.map((name) => + this.readProject({ projectName: name }).then( + (project) => project.id + ) + ) + )), + ]; + } + + const payload = { + id, + trace, + parent_run: parentRun, + run_type: runType, + session: projectIds_, + reference_example: referenceExampleIds, + start_time: startTime, + end_time: endTime, + error, + query, + filter, + trace_filter: traceFilter, + tree_filter: treeFilter, + is_root: isRoot, + data_source_type: dataSourceType, + }; + + // Remove undefined values from the payload + const filteredPayload = Object.fromEntries( + Object.entries(payload).filter(([_, value]) => value !== undefined) + ); + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/runs/stats`, + { + method: "POST", + headers: this.headers, + body: JSON.stringify(filteredPayload), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + const result = await response.json(); + return result; + } + public async shareRun( runId: string, { shareId }: { shareId?: string } = {} diff --git a/js/src/index.ts b/js/src/index.ts index 75c978d6d..73f1007da 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.38"; +export const __version__ = "0.1.39"; diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 7275369aa..29200ce57 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -739,3 +739,12 @@ test.concurrent("list runs limit arg works", async () => { } } }); + +test.concurrent("Test run stats", async () => { + const client = new Client(); + const stats = await client.getRunStats({ + projectNames: ["default"], + runType: "llm", + }); + expect(stats).toBeDefined(); +}); diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 203527392..be40dfb02 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -1768,6 +1768,93 @@ def list_runs( if limit is not None and i + 1 >= limit: break + def get_run_stats( + self, + *, + id: Optional[List[ID_TYPE]] = None, + trace: Optional[ID_TYPE] = None, + parent_run: Optional[ID_TYPE] = None, + run_type: Optional[str] = None, + project_names: Optional[List[str]] = None, + project_ids: Optional[List[ID_TYPE]] = None, + reference_example_ids: Optional[List[ID_TYPE]] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + error: Optional[bool] = None, + query: Optional[str] = None, + filter: Optional[str] = None, + trace_filter: Optional[str] = None, + tree_filter: Optional[str] = None, + is_root: Optional[bool] = None, + data_source_type: Optional[str] = None, + ) -> Dict[str, Any]: + """Get aggregate statistics over queried runs. + + Takes in similar query parameters to `list_runs` and returns statistics + based on the runs that match the query. + + Args: + id (Optional[List[ID_TYPE]]): List of run IDs to filter by. + trace (Optional[ID_TYPE]): Trace ID to filter by. + parent_run (Optional[ID_TYPE]): Parent run ID to filter by. + run_type (Optional[str]): Run type to filter by. + projects (Optional[List[ID_TYPE]]): List of session IDs to filter by. + reference_example (Optional[List[ID_TYPE]]): List of reference example IDs to filter by. + start_time (Optional[str]): Start time to filter by. + end_time (Optional[str]): End time to filter by. + error (Optional[bool]): Filter by error status. + query (Optional[str]): Query string to filter by. + filter (Optional[str]): Filter string to apply. + trace_filter (Optional[str]): Trace filter string to apply. + tree_filter (Optional[str]): Tree filter string to apply. + is_root (Optional[bool]): Filter by root run status. + data_source_type (Optional[str]): Data source type to filter by. + + Returns: + Dict[str, Any]: A dictionary containing the run statistics. + """ # noqa: E501 + from concurrent.futures import ThreadPoolExecutor, as_completed # type: ignore + + project_ids = project_ids or [] + if project_names: + with ThreadPoolExecutor() as executor: + futures = [ + executor.submit(self.read_project, project_name=name) + for name in project_names + ] + for future in as_completed(futures): + project_ids.append(future.result().id) + payload = { + "id": id, + "trace": trace, + "parent_run": parent_run, + "run_type": run_type, + "session": project_ids, + "reference_example": reference_example_ids, + "start_time": start_time, + "end_time": end_time, + "error": error, + "query": query, + "filter": filter, + "trace_filter": trace_filter, + "tree_filter": tree_filter, + "is_root": is_root, + "data_source_type": data_source_type, + } + + # Remove None values from the payload + payload = {k: v for k, v in payload.items() if v is not None} + + response = self.request_with_retries( + "POST", + "/runs/stats", + request_kwargs={ + "data": _dumps_json(payload), + }, + ) + ls_utils.raise_for_status_with_text(response) + return response.json() + def get_run_url( self, *, @@ -1777,6 +1864,10 @@ def get_run_url( ) -> str: """Get the URL for a run. + Not recommended for use within your agent runtime. + More for use interacting with runs after the fact + for data analysis or ETL workloads. + Parameters ---------- run : Run diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index ea01b257c..89d57da26 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -698,3 +698,10 @@ def test_surrogates(): run_type="llm", end_time=datetime.datetime.now(datetime.timezone.utc), ) + + +def test_runs_stats(): + langchain_client = Client() + # We always have stuff in the "default" project... + stats = langchain_client.get_run_stats(project_names=["default"], run_type="llm") + assert stats diff --git a/python/tests/integration_tests/test_runs.py b/python/tests/integration_tests/test_runs.py index 405571dee..fbf87ea92 100644 --- a/python/tests/integration_tests/test_runs.py +++ b/python/tests/integration_tests/test_runs.py @@ -117,7 +117,6 @@ async def my_run(text: str): filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' poll_runs_until_count(langchain_client, project_names[0], 1, filter_=filter_) - poll_runs_until_count(langchain_client, project_names[1], 1, filter_=filter_) runs = list( langchain_client.list_runs( project_name=project_names, @@ -296,20 +295,29 @@ async def my_llm(prompt: str) -> str: assert len(runs_) == 8 -async def test_sync_generator(langchain_client: Client): +def test_sync_generator(langchain_client: Client): project_name = "__My Tracer Project - test_sync_generator" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") def my_generator(num: int) -> Generator[str, None, None]: for i in range(num): yield f"Yielded {i}" - results = list(my_generator(5, langsmith_extra=dict(project_name=project_name))) + results = list( + my_generator( + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + ) assert results == ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=_filter + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_generator" @@ -318,10 +326,9 @@ def my_generator(num: int) -> Generator[str, None, None]: } -async def test_sync_generator_reduce_fn(langchain_client: Client): +def test_sync_generator_reduce_fn(langchain_client: Client): project_name = "__My Tracer Project - test_sync_generator_reduce_fn" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex def reduce_fn(outputs: list) -> dict: return {"my_output": " ".join(outputs)} @@ -331,10 +338,20 @@ def my_generator(num: int) -> Generator[str, None, None]: for i in range(num): yield f"Yielded {i}" - results = list(my_generator(5, langsmith_extra=dict(project_name=project_name))) + results = list( + my_generator( + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), + ) + ) + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' assert results == ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=filter_ + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_generator" @@ -347,8 +364,7 @@ def my_generator(num: int) -> Generator[str, None, None]: async def test_async_generator(langchain_client: Client): project_name = "__My Tracer Project - test_async_generator" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex @traceable(run_type="chain") async def my_async_generator(num: int) -> AsyncGenerator[str, None]: @@ -359,7 +375,10 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: results = [ item async for item in my_async_generator( - 5, langsmith_extra=dict(project_name=project_name) + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), ) ] assert results == [ @@ -369,8 +388,11 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: "Async yielded 3", "Async yielded 4", ] - poll_runs_until_count(langchain_client, project_name, 1, max_retries=20) - runs = list(langchain_client.list_runs(project_name=project_name)) + _filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' + poll_runs_until_count( + langchain_client, project_name, 1, max_retries=20, filter_=_filter + ) + runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_async_generator" @@ -387,8 +409,7 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: async def test_async_generator_reduce_fn(langchain_client: Client): project_name = "__My Tracer Project - test_async_generator_reduce_fn" - if langchain_client.has_project(project_name): - langchain_client.delete_project(project_name=project_name) + run_meta = uuid.uuid4().hex def reduce_fn(outputs: list) -> dict: return {"my_output": " ".join(outputs)} @@ -402,7 +423,10 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: results = [ item async for item in my_async_generator( - 5, langsmith_extra=dict(project_name=project_name) + 5, + langsmith_extra=dict( + project_name=project_name, metadata={"test_run": run_meta} + ), ) ] assert results == [ @@ -412,11 +436,11 @@ async def my_async_generator(num: int) -> AsyncGenerator[str, None]: "Async yielded 3", "Async yielded 4", ] - + filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))' poll_runs_until_count( - langchain_client, project_name, 1, max_retries=20, sleep_time=5 + langchain_client, project_name, 1, max_retries=20, sleep_time=5, filter_=filter_ ) - runs = list(langchain_client.list_runs(project_name=project_name)) + runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_)) run = runs[0] assert run.run_type == "chain" assert run.name == "my_async_generator" From 0afc018ed5b4473b93ca0467b29fe64c908e0e87 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Fri, 19 Jul 2024 18:34:05 -0700 Subject: [PATCH 305/373] Async trace context manager (#887) Exposed via the same `trace` CM. Would resolve https://github.com/langchain-ai/langsmith-sdk/issues/882 --- python/langsmith/run_helpers.py | 357 +++++++++++++++----- python/pyproject.toml | 2 +- python/tests/unit_tests/test_run_helpers.py | 34 ++ 3 files changed, 301 insertions(+), 92 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 4afa8e69e..1131400bd 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -43,6 +43,8 @@ from langsmith.env import _runtime_env if TYPE_CHECKING: + from types import TracebackType + from langchain_core.runnables import Runnable LOGGER = logging.getLogger(__name__) @@ -685,6 +687,270 @@ def generator_wrapper( return decorator +class trace: + """Manage a langsmith run in context. + + This class can be used as both a synchronous and asynchronous context manager. + + Parameters: + ----------- + name : str + Name of the run + run_type : ls_client.RUN_TYPE_T, optional + Type of run (e.g., "chain", "llm", "tool"). Defaults to "chain". + inputs : Optional[Dict], optional + Initial input data for the run + project_name : Optional[str], optional + Associates the run with a specific project, overriding defaults + parent : Optional[Union[run_trees.RunTree, str, Mapping]], optional + Parent run, accepts RunTree, dotted order string, or tracing headers + tags : Optional[List[str]], optional + Categorization labels for the run + metadata : Optional[Mapping[str, Any]], optional + Arbitrary key-value pairs for run annotation + client : Optional[ls_client.Client], optional + LangSmith client for specifying a different tenant, + setting custom headers, or modifying API endpoint + run_id : Optional[ls_client.ID_TYPE], optional + Preset identifier for the run + reference_example_id : Optional[ls_client.ID_TYPE], optional + You typically won't set this. It associates this run with a dataset example. + This is only valid for root runs (not children) in an evaluation context. + exceptions_to_handle : Optional[Tuple[Type[BaseException], ...]], optional + Typically not set. Exception types to ignore in what is sent up to LangSmith + extra : Optional[Dict], optional + Typically not set. Use 'metadata' instead. Extra data to be sent to LangSmith. + + Examples: + --------- + Synchronous usage: + >>> with trace("My Operation", run_type="tool", tags=["important"]) as run: + ... result = "foo" # Do some_operation() + ... run.metadata["some-key"] = "some-value" + ... run.end(outputs={"result": result}) + + Asynchronous usage: + >>> async def main(): + ... async with trace("Async Operation", run_type="tool", tags=["async"]) as run: + ... result = "foo" # Can await some_async_operation() + ... run.metadata["some-key"] = "some-value" + ... # "end" just adds the outputs and sets error to None + ... # The actual patching of the run happens when the context exits + ... run.end(outputs={"result": result}) + >>> asyncio.run(main()) + + Allowing pytest.skip in a test: + >>> import sys + >>> import pytest + >>> with trace("OS-Specific Test", exceptions_to_handle=(pytest.skip.Exception,)): + ... if sys.platform == "win32": + ... pytest.skip("Not supported on Windows") + ... result = "foo" # e.g., do some unix_specific_operation() + """ + + def __init__( + self, + name: str, + run_type: ls_client.RUN_TYPE_T = "chain", + *, + inputs: Optional[Dict] = None, + extra: Optional[Dict] = None, + project_name: Optional[str] = None, + parent: Optional[Union[run_trees.RunTree, str, Mapping]] = None, + tags: Optional[List[str]] = None, + metadata: Optional[Mapping[str, Any]] = None, + client: Optional[ls_client.Client] = None, + run_id: Optional[ls_client.ID_TYPE] = None, + reference_example_id: Optional[ls_client.ID_TYPE] = None, + exceptions_to_handle: Optional[Tuple[Type[BaseException], ...]] = None, + **kwargs: Any, + ): + """Initialize the trace context manager. + + Warns if unsupported kwargs are passed. + """ + if kwargs: + warnings.warn( + "The `trace` context manager no longer supports the following kwargs: " + f"{sorted(kwargs.keys())}.", + DeprecationWarning, + ) + self.name = name + self.run_type = run_type + self.inputs = inputs + self.extra = extra + self.project_name = project_name + self.parent = parent + # The run tree is deprecated. Keeping for backwards compat. + # Will fully merge within parent later. + self.run_tree = kwargs.get("run_tree") + self.tags = tags + self.metadata = metadata + self.client = client + self.run_id = run_id + self.reference_example_id = reference_example_id + self.exceptions_to_handle = exceptions_to_handle + self.new_run: Optional[run_trees.RunTree] = None + self.old_ctx: Optional[dict] = None + + def _setup(self) -> run_trees.RunTree: + """Set up the tracing context and create a new run. + + This method initializes the tracing context, merges tags and metadata, + creates a new run (either as a child of an existing run or as a new root run), + and sets up the necessary context variables. + + Returns: + run_trees.RunTree: The newly created run. + """ + self.old_ctx = get_tracing_context() + is_disabled = self.old_ctx.get("enabled", True) is False + outer_tags = _TAGS.get() + outer_metadata = _METADATA.get() + parent_run_ = _get_parent_run( + { + "parent": self.parent, + "run_tree": self.run_tree, + "client": self.client, + } + ) + + tags_ = sorted(set((self.tags or []) + (outer_tags or []))) + metadata = { + **(self.metadata or {}), + **(outer_metadata or {}), + "ls_method": "trace", + } + + extra_outer = self.extra or {} + extra_outer["metadata"] = metadata + + project_name_ = _get_project_name(self.project_name) + + if parent_run_ is not None and not is_disabled: + self.new_run = parent_run_.create_child( + name=self.name, + run_id=self.run_id, + run_type=self.run_type, + extra=extra_outer, + inputs=self.inputs, + tags=tags_, + ) + else: + self.new_run = run_trees.RunTree( + name=self.name, + id=ls_client._ensure_uuid(self.run_id), + reference_example_id=ls_client._ensure_uuid( + self.reference_example_id, accept_null=True + ), + run_type=self.run_type, + extra=extra_outer, + project_name=project_name_ or "default", + inputs=self.inputs or {}, + tags=tags_, + client=self.client, # type: ignore[arg-type] + ) + + if not is_disabled: + self.new_run.post() + _TAGS.set(tags_) + _METADATA.set(metadata) + _PARENT_RUN_TREE.set(self.new_run) + _PROJECT_NAME.set(project_name_) + + return self.new_run + + def _teardown( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + """Clean up the tracing context and finalize the run. + + This method handles exceptions, ends the run if necessary, + patches the run if it's not disabled, and resets the tracing context. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + if self.new_run is None: + warnings.warn("Tracing context was not set up properly.", RuntimeWarning) + return + if exc_type is not None: + if self.exceptions_to_handle and issubclass( + exc_type, self.exceptions_to_handle + ): + tb = None + else: + tb = utils._format_exc() + tb = f"{exc_type.__name__}: {exc_value}\n\n{tb}" + self.new_run.end(error=tb) + if self.old_ctx is not None: + is_disabled = self.old_ctx.get("enabled", True) is False + if not is_disabled: + self.new_run.patch() + + _set_tracing_context(self.old_ctx) + else: + warnings.warn("Tracing context was not set up properly.", RuntimeWarning) + + def __enter__(self) -> run_trees.RunTree: + """Enter the context manager synchronously. + + Returns: + run_trees.RunTree: The newly created run. + """ + return self._setup() + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + """Exit the context manager synchronously. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + self._teardown(exc_type, exc_value, traceback) + + async def __aenter__(self) -> run_trees.RunTree: + """Enter the context manager asynchronously. + + Returns: + run_trees.RunTree: The newly created run. + """ + return await aitertools.aio_to_thread(self._setup) + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + """Exit the context manager asynchronously. + + Args: + exc_type: The type of the exception that occurred, if any. + exc_value: The exception instance that occurred, if any. + traceback: The traceback object associated with the exception, if any. + """ + if exc_type is not None: + await asyncio.shield( + aitertools.aio_to_thread(self._teardown, exc_type, exc_value, traceback) + ) + else: + await aitertools.aio_to_thread( + self._teardown, exc_type, exc_value, traceback + ) + + def _get_project_name(project_name: Optional[str]) -> Optional[str]: prt = _PARENT_RUN_TREE.get() return ( @@ -698,97 +964,6 @@ def _get_project_name(project_name: Optional[str]) -> Optional[str]: ) -@contextlib.contextmanager -def trace( - name: str, - run_type: ls_client.RUN_TYPE_T = "chain", - *, - inputs: Optional[Dict] = None, - extra: Optional[Dict] = None, - project_name: Optional[str] = None, - parent: Optional[Union[run_trees.RunTree, str, Mapping]] = None, - tags: Optional[List[str]] = None, - metadata: Optional[Mapping[str, Any]] = None, - client: Optional[ls_client.Client] = None, - run_id: Optional[ls_client.ID_TYPE] = None, - reference_example_id: Optional[ls_client.ID_TYPE] = None, - exceptions_to_handle: Optional[Tuple[Type[BaseException], ...]] = None, - **kwargs: Any, -) -> Generator[run_trees.RunTree, None, None]: - """Context manager for creating a run tree.""" - if kwargs: - # In case someone was passing an executor before. - warnings.warn( - "The `trace` context manager no longer supports the following kwargs: " - f"{sorted(kwargs.keys())}.", - DeprecationWarning, - ) - old_ctx = get_tracing_context() - is_disabled = old_ctx.get("enabled", True) is False - outer_tags = _TAGS.get() - outer_metadata = _METADATA.get() - parent_run_ = _get_parent_run( - {"parent": parent, "run_tree": kwargs.get("run_tree"), "client": client} - ) - - # Merge context variables - tags_ = sorted(set((tags or []) + (outer_tags or []))) - metadata = {**(metadata or {}), **(outer_metadata or {}), "ls_method": "trace"} - - extra_outer = extra or {} - extra_outer["metadata"] = metadata - - project_name_ = _get_project_name(project_name) - # If it's disabled, we break the tree - if parent_run_ is not None and not is_disabled: - new_run = parent_run_.create_child( - name=name, - run_id=run_id, - run_type=run_type, - extra=extra_outer, - inputs=inputs, - tags=tags_, - ) - else: - new_run = run_trees.RunTree( - name=name, - id=ls_client._ensure_uuid(run_id), - reference_example_id=ls_client._ensure_uuid( - reference_example_id, accept_null=True - ), - run_type=run_type, - extra=extra_outer, - project_name=project_name_, # type: ignore[arg-type] - inputs=inputs or {}, - tags=tags_, - client=client, # type: ignore[arg-type] - ) - if not is_disabled: - new_run.post() - _TAGS.set(tags_) - _METADATA.set(metadata) - _PARENT_RUN_TREE.set(new_run) - _PROJECT_NAME.set(project_name_) - - try: - yield new_run - except (Exception, KeyboardInterrupt, BaseException) as e: - if exceptions_to_handle and isinstance(e, exceptions_to_handle): - tb = None - else: - tb = utils._format_exc() - tb = f"{e.__class__.__name__}: {e}\n\n{tb}" - new_run.end(error=tb) - if not is_disabled: - new_run.patch() - raise e - finally: - # Reset the old context - _set_tracing_context(old_ctx) - if not is_disabled: - new_run.patch() - - def as_runnable(traceable_fn: Callable) -> Runnable: """Convert a function wrapped by the LangSmith @traceable decorator to a Runnable. diff --git a/python/pyproject.toml b/python/pyproject.toml index 3ed59d26f..f6f9fa609 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.92" +version = "0.1.93" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 434c10bca..4bbc182c9 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -961,6 +961,40 @@ def _get_run(r: RunTree) -> None: assert child_runs[0].inputs == {"a": 1, "b": 2} +async def test_traceable_to_atrace(): + @traceable + async def parent_fn(a: int, b: int) -> int: + async with langsmith.trace( + name="child_fn", inputs={"a": a, "b": b} + ) as run_tree: + result = a + b + run_tree.end(outputs={"result": result}) + return result + + run: Optional[RunTree] = None # type: ignore + + def _get_run(r: RunTree) -> None: + nonlocal run + run = r + + with tracing_context(enabled=True): + result = await parent_fn( + 1, 2, langsmith_extra={"on_end": _get_run, "client": _get_mock_client()} + ) + + assert result == 3 + assert run is not None + run = cast(RunTree, run) + assert run.name == "parent_fn" + assert run.outputs == {"output": 3} + assert run.inputs == {"a": 1, "b": 2} + child_runs = run.child_runs + assert child_runs + assert len(child_runs) == 1 + assert child_runs[0].name == "child_fn" + assert child_runs[0].inputs == {"a": 1, "b": 2} + + def test_trace_to_traceable(): @traceable def child_fn(a: int, b: int) -> int: From 48abee55baa3cf5ac4070ba1d940de2d67de83bc Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 23 Jul 2024 15:53:46 -0700 Subject: [PATCH 306/373] feat: add hub js sdk to langsmith js sdk --- js/src/client.ts | 565 ++++++++++++++++++ js/src/schemas.ts | 56 ++ js/src/tests/client.int.test.ts | 168 +++++- js/src/tests/client.test.ts | 28 + js/src/utils/prompts.ts | 40 ++ python/langsmith/client.py | 43 +- .../tests/integration_tests/test_prompts.py | 1 + 7 files changed, 887 insertions(+), 14 deletions(-) create mode 100644 js/src/utils/prompts.ts diff --git a/js/src/client.ts b/js/src/client.ts index 14746a17a..f867706b1 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -16,6 +16,12 @@ import { FeedbackIngestToken, KVMap, LangChainBaseMessage, + LangSmithSettings, + LikePromptResponse, + ListPromptsResponse, + Prompt, + PromptCommit, + PromptSortField, Run, RunCreate, RunUpdate, @@ -43,6 +49,7 @@ import { import { __version__ } from "./index.js"; import { assertUuid } from "./utils/_uuid.js"; import { warnOnce } from "./utils/warn.js"; +import { isVersionGreaterOrEqual, parsePromptIdentifier } from "./utils/prompts.js"; interface ClientConfig { apiUrl?: string; @@ -418,6 +425,8 @@ export class Client { private fetchOptions: RequestInit; + private settings: LangSmithSettings; + constructor(config: ClientConfig = {}) { const defaultConfig = Client.getDefaultClientConfig(); @@ -746,6 +755,13 @@ export class Client { return true; } + protected async _getSettings() { + if (!this.settings) { + this.settings = await this._get("/settings"); + } + return this.settings; + } + public async createRun(run: CreateRunParams): Promise { if (!this._filterForSampling([run]).length) { return; @@ -2921,4 +2937,553 @@ export class Client { ); return results; } + + protected async _currentTenantIsOwner(owner: string): Promise { + const settings = await this._getSettings(); + return owner == "-" || settings.tenantHandle === owner; + } + + protected async _ownerConflictError( + action: string, owner: string + ): Promise { + const settings = await this._getSettings(); + return new Error( + `Cannot ${action} for another tenant.\n + Current tenant: ${settings.tenantHandle}\n + Requested tenant: ${owner}` + ); + } + + protected async _getLatestCommitHash( + promptOwnerAndName: string, + ): Promise { + const commitsResp = await this.listCommits(promptOwnerAndName, { limit: 1 }); + const commits = commitsResp.commits; + console.log('commits number', commits) + if (commits.length === 0) { + return undefined; + } + return commits[0].commit_hash; + } + + protected async _likeOrUnlikePrompt( + promptIdentifier: string, + like: boolean + ): Promise { + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const response = await this.caller.call( + fetch, + `${this.apiUrl}/likes/${owner}/${promptName}`, + { + method: "POST", + body: JSON.stringify({ like: like }), + headers: { ...this.headers, "Content-Type": "application/json" }, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (!response.ok) { + throw new Error( + `Failed to ${like ? "like" : "unlike"} prompt: ${response.status} ${await response.text()}` + ); + } + + return await response.json(); + } + + protected async _getPromptUrl(promptIdentifier: string): Promise { + console.log('print ing promt id', promptIdentifier) + const [owner, promptName, commitHash] = parsePromptIdentifier(promptIdentifier); + if (!(await this._currentTenantIsOwner(owner))) { + if (commitHash !== 'latest') { + return `${this.getHostUrl()}/hub/${owner}/${promptName}/${commitHash.substring(0, 8)}`; + } else { + return `${this.getHostUrl()}/hub/${owner}/${promptName}`; + } + } else { + const settings = await this._getSettings(); + if (commitHash !== 'latest') { + return `${this.getHostUrl()}/prompts/${promptName}/${commitHash.substring(0, 8)}?organizationId=${settings.id}`; + } else { + return `${this.getHostUrl()}/prompts/${promptName}?organizationId=${settings.id}`; + } + } + } + + public async promptExists( + promptIdentifier: string + ): Promise { + const prompt = await this.getPrompt(promptIdentifier); + return !!prompt + } + + public async likePrompt(promptIdentifier: string): Promise { + return this._likeOrUnlikePrompt(promptIdentifier, true); + } + + public async unlikePrompt(promptIdentifier: string): Promise { + return this._likeOrUnlikePrompt(promptIdentifier, false); + } + + public async listCommits( + promptOwnerAndName: string, + options?: { + limit?: number; + offset?: number; + }, + ) { + const { limit = 100, offset = 0 } = options ?? {}; + const res = await this.caller.call( + fetch, + `${this.apiUrl}/commits/${promptOwnerAndName}/?limit=${limit}&offset=${offset}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + const json = await res.json(); + if (!res.ok) { + const detail = + typeof json.detail === "string" + ? json.detail + : JSON.stringify(json.detail); + const error = new Error( + `Error ${res.status}: ${res.statusText}\n${detail}`, + ); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (error as any).statusCode = res.status; + throw error; + } + return json; + } + + public async listPrompts( + options?: { + limit?: number, + offset?: number, + isPublic?: boolean, + isArchived?: boolean, + sortField?: PromptSortField, + sortDirection?: 'desc' | 'asc', + query?: string, + } + ): Promise { + const params: Record = { + limit: (options?.limit ?? 100).toString(), + offset: (options?.offset ?? 0).toString(), + sort_field: options?.sortField ?? 'updated_at', + sort_direction: options?.sortDirection ?? 'desc', + is_archived: (!!options?.isArchived).toString(), + }; + + if (options?.isPublic !== undefined) { + params.is_public = options.isPublic.toString(); + } + + if (options?.query) { + params.query = options.query; + } + + const queryString = new URLSearchParams(params).toString(); + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/repos/?${queryString}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + const res = await response.json(); + + return { + repos: res.repos.map((result: any) => ({ + owner: result.owner, + repoHandle: result.repo_handle, + description: result.description, + id: result.id, + readme: result.readme, + tenantId: result.tenant_id, + tags: result.tags, + isPublic: result.is_public, + isArchived: result.is_archived, + createdAt: result.created_at, + updatedAt: result.updated_at, + originalRepoId: result.original_repo_id, + upstreamRepoId: result.upstream_repo_id, + fullName: result.full_name, + numLikes: result.num_likes, + numDownloads: result.num_downloads, + numViews: result.num_views, + likedByAuthUser: result.liked_by_auth_user, + lastCommitHash: result.last_commit_hash, + numCommits: result.num_commits, + originalRepoFullName: result.original_repo_full_name, + upstreamRepoFullName: result.upstream_repo_full_name, + })), + total: res.total, + }; + } + + public async getPrompt(promptIdentifier: string): Promise { + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const response = await this.caller.call( + fetch, + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (response.status === 404) { + return null; + } + + const result = await response.json(); + if (result.repo) { + return { + owner: result.repo.owner, + repoHandle: result.repo.repo_handle, + description: result.repo.description, + id: result.repo.id, + readme: result.repo.readme, + tenantId: result.repo.tenant_id, + tags: result.repo.tags, + isPublic: result.repo.is_public, + isArchived: result.repo.is_archived, + createdAt: result.repo.created_at, + updatedAt: result.repo.updated_at, + originalRepoId: result.repo.original_repo_id, + upstreamRepoId: result.repo.upstream_repo_id, + fullName: result.repo.full_name, + numLikes: result.repo.num_likes, + numDownloads: result.repo.num_downloads, + numViews: result.repo.num_views, + likedByAuthUser: result.repo.liked_by_auth_user, + lastCommitHash: result.repo.last_commit_hash, + numCommits: result.repo.num_commits, + originalRepoFullName: result.repo.original_repo_full_name, + upstreamRepoFullName: result.repo.upstream_repo_full_name, + }; + } else { + return null; + } + } + + public async createPrompt( + promptIdentifier: string, + options?: { + description?: string, + readme?: string, + tags?: string[], + isPublic?: boolean, + } + ): Promise { + const settings = await this._getSettings(); + if (options?.isPublic && !settings.tenantHandle) { + throw new Error( + `Cannot create a public prompt without first\n + creating a LangChain Hub handle. + You can add a handle by creating a public prompt at:\n + https://smith.langchain.com/prompts` + ); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + if (!await this._currentTenantIsOwner(owner)) { + throw await this._ownerConflictError("create a prompt", owner); + } + + const data = { + repo_handle: promptName, + ...(options?.description && { description: options.description }), + ...(options?.readme && { readme: options.readme }), + ...(options?.tags && { tags: options.tags }), + is_public: !!options?.isPublic, + }; + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/repos/`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + const { repo } = await response.json(); + console.log('result right here', repo); + return { + owner: repo.owner, + repoHandle: repo.repo_handle, + description: repo.description, + id: repo.id, + readme: repo.readme, + tenantId: repo.tenant_id, + tags: repo.tags, + isPublic: repo.is_public, + isArchived: repo.is_archived, + createdAt: repo.created_at, + updatedAt: repo.updated_at, + originalRepoId: repo.original_repo_id, + upstreamRepoId: repo.upstream_repo_id, + fullName: repo.full_name, + numLikes: repo.num_likes, + numDownloads: repo.num_downloads, + numViews: repo.num_views, + likedByAuthUser: repo.liked_by_auth_user, + lastCommitHash: repo.last_commit_hash, + numCommits: repo.num_commits, + originalRepoFullName: repo.original_repo_full_name, + upstreamRepoFullName: repo.upstream_repo_full_name, + }; + } + + public async createCommit( + promptIdentifier: string, + object: any, + options?: { + parentCommitHash?: string, + } + ): Promise { + if (!await this.promptExists(promptIdentifier)) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + const resolvedParentCommitHash = + (options?.parentCommitHash === "latest" || !options?.parentCommitHash) + ? await this._getLatestCommitHash(`${owner}/${promptName}`) + : options?.parentCommitHash; + + console.log('this is resolved parent commit hash', resolvedParentCommitHash); + + const payload = { + manifest: JSON.parse(JSON.stringify(object)), + parent_commit: resolvedParentCommitHash, + }; + + console.log('latest prompt anyway', await this.listCommits(`${owner}/${promptName}`)); + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/commits/${owner}/${promptName}`, + { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(payload), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (!response.ok) { + throw new Error( + `Failed to create commit: ${response.status} ${await response.text()}` + ); + } + + const result = await response.json(); + return this._getPromptUrl(`${owner}/${promptName}${result.commit_hash ? `:${result.commit_hash}` : ''}`); + } + + public async updatePrompt( + promptIdentifier: string, + options?: { + description?: string, + readme?: string, + tags?: string[], + isPublic?: boolean, + isArchived?: boolean, + } + ): Promise> { + if (!await this.promptExists(promptIdentifier)) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName] = parsePromptIdentifier(promptIdentifier); + + if (!await this._currentTenantIsOwner(owner)) { + throw await this._ownerConflictError("update a prompt", owner); + } + + const payload: Record = {}; + + if (options?.description !== undefined) payload.description = options.description; + if (options?.readme !== undefined) payload.readme = options.readme; + if (options?.tags !== undefined) payload.tags = options.tags; + if (options?.isPublic !== undefined) payload.is_public = options.isPublic; + if (options?.isArchived !== undefined) payload.is_archived = options.isArchived; + + // Check if payload is empty + if (Object.keys(payload).length === 0) { + throw new Error("No valid update options provided"); + } + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "PATCH", + body: JSON.stringify(payload), + headers: { + ...this.headers, + 'Content-Type': 'application/json', + }, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (!response.ok) { + throw new Error(`HTTP Error: ${response.status} - ${await response.text()}`); + } + + return response.json(); + } + + public async deletePrompt( + promptIdentifier: string + ): Promise { + if (!await this.promptExists(promptIdentifier)) { + throw new Error("Prompt does not exist, you must create it first."); + } + + const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); + + if (!await this._currentTenantIsOwner(owner)) { + throw await this._ownerConflictError("delete a prompt", owner); + } + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/repos/${owner}/${promptName}`, + { + method: "DELETE", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + return await response.json(); + } + + public async pullPromptCommit( + promptIdentifier: string, + options?: { + includeModel?: boolean + } + ): Promise { + const [owner, promptName, commitHash] = parsePromptIdentifier(promptIdentifier); + console.log('this is current version', this.serverInfo?.version); + const useOptimization = true //isVersionGreaterOrEqual(this.serverInfo?.version, '0.5.23'); + + let passedCommitHash = commitHash; + + if (!useOptimization && commitHash === 'latest') { + const latestCommitHash = await this._getLatestCommitHash(`${owner}/${promptName}`); + if (!latestCommitHash) { + throw new Error('No commits found'); + } else { + passedCommitHash = latestCommitHash; + } + } + + const response = await this.caller.call( + fetch, + `${this.apiUrl}/commits/${owner}/${promptName}/${passedCommitHash}${options?.includeModel ? '?include_model=true' : ''}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + if (!response.ok) { + throw new Error( + `Failed to pull prompt commit: ${response.status} ${response.statusText}` + ); + } + + const result = await response.json(); + + return { + owner, + repo: promptName, + commitHash: result.commit_hash, + manifest: result.manifest, + examples: result.examples, + }; + } + + public async pullPrompt( + promptIdentifier: string, + options?: { + includeModel?: boolean, + } + ): Promise { + const promptObject = await this.pullPromptCommit(promptIdentifier, { + includeModel: options?.includeModel + }); + const prompt = JSON.stringify(promptObject.manifest); + // need to add load from lc js + return prompt; + } + + public async pushPrompt( + promptIdentifier: string, + options?: { + object?: any, + parentCommitHash?: string, + isPublic?: boolean, + description?: string, + readme?: string, + tags?: string[], + } + ): Promise { + // Create or update prompt metadata + console.log('prompt exists', await this.promptExists(promptIdentifier)); + if (await this.promptExists(promptIdentifier)) { + await this.updatePrompt(promptIdentifier, { + description: options?.description, + readme: options?.readme, + tags: options?.tags, + isPublic: options?.isPublic, + }); + } else { + await this.createPrompt( + promptIdentifier, + { + description: options?.description, + readme: options?.readme, + tags: options?.tags, + isPublic: options?.isPublic, + } + ); + } + + if (options?.object === null) { + return await this._getPromptUrl(promptIdentifier); + } + + // Create a commit with the new manifest + const url = await this.createCommit(promptIdentifier, options?.object, { + parentCommitHash: options?.parentCommitHash, + }); + return url; + } } diff --git a/js/src/schemas.ts b/js/src/schemas.ts index 0f1ebc126..bbee1f9be 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -406,3 +406,59 @@ export interface InvocationParamsSchema { ls_max_tokens?: number; ls_stop?: string[]; } + +export interface PromptCommit { + owner: string; + repo: string; + commitHash: string; + manifest: Record; + examples: Array>; +} + +export interface Prompt { + repoHandle: string; + description?: string; + readme?: string; + id: string; + tenantId: string; + createdAt: string; + updatedAt: string; + isPublic: boolean; + isArchived: boolean; + tags: string[]; + originalRepoId?: string; + upstreamRepoId?: string; + owner?: string; + fullName: string; + numLikes: number; + numDownloads: number; + numViews: number; + likedByAuthUser: boolean; + lastCommitHash?: string; + numCommits: number; + originalRepoFullName?: string; + upstreamRepoFullName?: string; +} + +export interface ListPromptsResponse { + repos: Prompt[]; + total: number; +} + +export enum PromptSortField { + NumDownloads = 'num_downloads', + NumViews = 'num_views', + UpdatedAt = 'updated_at', + NumLikes = 'num_likes', +} + +export interface LikePromptResponse { + likes: number; +} + +export interface LangSmithSettings { + id: string; + displayName: string; + createdAt: string; + tenantHandle?: string; +} \ No newline at end of file diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 29200ce57..8e4f47ff3 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -1,5 +1,5 @@ import { Dataset, Run } from "../schemas.js"; -import { FunctionMessage, HumanMessage } from "@langchain/core/messages"; +import { FunctionMessage, HumanMessage, SystemMessage } from "@langchain/core/messages"; import { Client } from "../client.js"; import { v4 as uuidv4 } from "uuid"; @@ -10,6 +10,7 @@ import { toArray, waitUntil, } from "./utils.js"; +import { ChatPromptTemplate } from "@langchain/core/prompts"; type CheckOutputsType = boolean | ((run: Run) => boolean); async function waitUntilRunFound( @@ -748,3 +749,168 @@ test.concurrent("Test run stats", async () => { }); expect(stats).toBeDefined(); }); + +test("Test list prompts", async () => { + const client = new Client(); + const response = await client.listPrompts({ limit: 10, offset: 0 }); + expect(response.repos.length).toBeLessThanOrEqual(10); + expect(response.total).toBeGreaterThanOrEqual(response.repos.length); +}); + +test("Test get prompt", async () => { + const client = new Client(); + const promptName = `test_prompt_${uuidv4().slice(0, 8)}`; + const promptTemplate = ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], { templateFormat: "mustache" }); + + const url = await client.pushPrompt(promptName, { object: promptTemplate }); + expect(url).toBeDefined(); + + const prompt = await client.getPrompt(promptName); + expect(prompt).toBeDefined(); + expect(prompt?.repoHandle).toBe(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test prompt exists", async () => { + const client = new Client(); + const nonExistentPrompt = `non_existent_${uuidv4().slice(0, 8)}`; + expect(await client.promptExists(nonExistentPrompt)).toBe(false); + + const existentPrompt = `existent_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(existentPrompt, { object: ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], { templateFormat: "mustache" })}); + expect(await client.promptExists(existentPrompt)).toBe(true); + + await client.deletePrompt(existentPrompt); +}); + +test("Test update prompt", async () => { + const client = new Client(); + + const promptName = `test_update_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { object: ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], { templateFormat: "mustache" })}); + + const updatedData = await client.updatePrompt(promptName, { + description: "Updated description", + isPublic: true, + tags: ["test", "update"], + }); + + expect(updatedData).toBeDefined(); + + const updatedPrompt = await client.getPrompt(promptName); + expect(updatedPrompt?.description).toBe("Updated description"); + expect(updatedPrompt?.isPublic).toBe(true); + expect(updatedPrompt?.tags).toEqual(expect.arrayContaining(["test", "update"])); + + await client.deletePrompt(promptName); +}); + +test("Test delete prompt", async () => { + const client = new Client(); + + const promptName = `test_delete_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { object: ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], { templateFormat: "mustache" })}); + + expect(await client.promptExists(promptName)).toBe(true); + await client.deletePrompt(promptName); + expect(await client.promptExists(promptName)).toBe(false); +}); + +test("Test create commit", async () => { + const client = new Client(); + + const promptName = `test_create_commit_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { object: ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], { templateFormat: "mustache" })}); + + const newTemplate = ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "My question is: {{question}}" }), + ], { templateFormat: "mustache" }); + const commitUrl = await client.createCommit(promptName, newTemplate); + + expect(commitUrl).toBeDefined(); + expect(commitUrl).toContain(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test like and unlike prompt", async () => { + const client = new Client(); + + const promptName = `test_like_prompt_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { object: ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], { templateFormat: "mustache" })}); + + await client.likePrompt(promptName); + let prompt = await client.getPrompt(promptName); + expect(prompt?.numLikes).toBe(1); + + await client.unlikePrompt(promptName); + prompt = await client.getPrompt(promptName); + expect(prompt?.numLikes).toBe(0); + + await client.deletePrompt(promptName); +}); + +test("Test pull prompt commit", async () => { + const client = new Client(); + + const promptName = `test_pull_commit_${uuidv4().slice(0, 8)}`; + const initialTemplate = ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], { templateFormat: "mustache" }); + await client.pushPrompt(promptName, { object: initialTemplate }); + + const promptCommit = await client.pullPromptCommit(promptName); + expect(promptCommit).toBeDefined(); + expect(promptCommit.repo).toBe(promptName); + + await client.deletePrompt(promptName); +}); + +test("Test push and pull prompt", async () => { + const client = new Client(); + + const promptName = `test_push_pull_${uuidv4().slice(0, 8)}`; + const template = ChatPromptTemplate.fromMessages([ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], { templateFormat: "mustache" }); + + await client.pushPrompt(promptName, { + object: template, + description: "Test description", + readme: "Test readme", + tags: ["test", "tag"] + }); + + const pulledPrompt = await client.pullPrompt(promptName); + expect(pulledPrompt).toBeDefined(); + + const promptInfo = await client.getPrompt(promptName); + expect(promptInfo?.description).toBe("Test description"); + expect(promptInfo?.readme).toBe("Test readme"); + expect(promptInfo?.tags).toEqual(expect.arrayContaining(["test", "tag"])); + expect(promptInfo?.isPublic).toBe(false); + + await client.deletePrompt(promptName); +}); diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index 000dd460b..381dc734a 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -6,6 +6,7 @@ import { getLangChainEnvVars, getLangChainEnvVarsMetadata, } from "../utils/env.js"; +import { parsePromptIdentifier } from "../utils/prompts.js"; describe("Client", () => { describe("createLLMExample", () => { @@ -175,4 +176,31 @@ describe("Client", () => { }); }); }); + + describe('parsePromptIdentifier', () => { + it('should parse valid identifiers correctly', () => { + expect(parsePromptIdentifier('name')).toEqual(['-', 'name', 'latest']); + expect(parsePromptIdentifier('owner/name')).toEqual(['owner', 'name', 'latest']); + expect(parsePromptIdentifier('owner/name:commit')).toEqual(['owner', 'name', 'commit']); + expect(parsePromptIdentifier('name:commit')).toEqual(['-', 'name', 'commit']); + }); + + it('should throw an error for invalid identifiers', () => { + const invalidIdentifiers = [ + '', + '/', + ':', + 'owner/', + '/name', + 'owner//name', + 'owner/name/', + 'owner/name/extra', + ':commit', + ]; + + invalidIdentifiers.forEach(identifier => { + expect(() => parsePromptIdentifier(identifier)).toThrowError(`Invalid identifier format: ${identifier}`); + }); + }); + }); }); diff --git a/js/src/utils/prompts.ts b/js/src/utils/prompts.ts new file mode 100644 index 000000000..01f16c29b --- /dev/null +++ b/js/src/utils/prompts.ts @@ -0,0 +1,40 @@ +import { parse as parseVersion } from 'semver'; + +export function isVersionGreaterOrEqual(current_version: string, target_version: string): boolean { + const current = parseVersion(current_version); + const target = parseVersion(target_version); + + if (!current || !target) { + throw new Error('Invalid version format.'); + } + + return current.compare(target) >= 0; +} + +export function parsePromptIdentifier(identifier: string): [string, string, string] { + if ( + !identifier || + identifier.split('/').length > 2 || + identifier.startsWith('/') || + identifier.endsWith('/') || + identifier.split(':').length > 2 + ) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + + const [ownerNamePart, commitPart] = identifier.split(':'); + const commit = commitPart || 'latest'; + + if (ownerNamePart.includes('/')) { + const [owner, name] = ownerNamePart.split('/', 2); + if (!owner || !name) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + return [owner, name, commit]; + } else { + if (!ownerNamePart) { + throw new Error(`Invalid identifier format: ${identifier}`); + } + return ['-', ownerNamePart, commit]; + } +} diff --git a/python/langsmith/client.py b/python/langsmith/client.py index be40dfb02..f23136a1e 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4920,13 +4920,22 @@ def _get_prompt_url(self, prompt_identifier: str) -> str: ) if not self._current_tenant_is_owner(owner): - return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}" + if commit_hash is not 'latest': + return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}" + else: + return f"{self._host_url}/hub/{owner}/{prompt_name}" settings = self._get_settings() - return ( - f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}" - f"?organizationId={settings.id}" - ) + if commit_hash is not 'latest': + return ( + f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}" + f"?organizationId={settings.id}" + ) + else: + return ( + f"{self._host_url}/prompts/{prompt_name}" + f"?organizationId={settings.id}" + ) def _prompt_exists(self, prompt_identifier: str) -> bool: """Check if a prompt exists. @@ -4964,6 +4973,16 @@ def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: """ return self._like_or_unlike_prompt(prompt_identifier, like=False) + def list_commits( + prompt_owner_and_name: str, + limit: Optional[int] = 1, + offset: Optional[int] = 0, + ) -> Sequence[ls_schemas.PromptCommit]: + """List commits for a prompt. + """ + + return '' + def list_prompts( self, *, @@ -5110,6 +5129,7 @@ def create_commit( try: from langchain_core.load.dump import dumps + from langchain_core.load.load import loads except ImportError: raise ImportError( "The client.create_commit function requires the langchain_core" @@ -5163,14 +5183,11 @@ def update_prompt( ValueError: If the prompt_identifier is empty. HTTPError: If the server request fails. """ - settings = self._get_settings() - if is_public and not settings.tenant_handle: - raise ValueError( - "Cannot create a public prompt without first\n" - "creating a LangChain Hub handle. " - "You can add a handle by creating a public prompt at:\n" - "https://smith.langchain.com/prompts" - ) + if not self.prompt_exists(prompt_identifier): + raise ls_utils.LangSmithNotFoundError("Prompt does not exist, you must create it first.") + + if not self._current_tenant_is_owner(owner): + raise self._owner_conflict_error("update a prompt", owner) json: Dict[str, Union[str, bool, Sequence[str]]] = {} diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 80f6e5c4c..607d64fcc 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -159,6 +159,7 @@ def test_list_prompts(langsmith_client: Client): response = langsmith_client.list_prompts(limit=10, offset=0) assert isinstance(response, ls_schemas.ListPromptsResponse) assert len(response.repos) <= 10 + assert response.total >= len(response.repos) def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): From 064591238b374fa7edda0695eeec44d39bc6d364 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 23 Jul 2024 16:06:12 -0700 Subject: [PATCH 307/373] st --- js/src/client.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index f867706b1..284f8df8f 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -3388,8 +3388,8 @@ export class Client { } ): Promise { const [owner, promptName, commitHash] = parsePromptIdentifier(promptIdentifier); - console.log('this is current version', this.serverInfo?.version); - const useOptimization = true //isVersionGreaterOrEqual(this.serverInfo?.version, '0.5.23'); + const serverInfo = await this._getServerInfo() + const useOptimization = isVersionGreaterOrEqual(serverInfo.version, '0.5.23'); let passedCommitHash = commitHash; From f96aa6d96999b78d64d7569b282b1f42fd55469f Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 23 Jul 2024 16:07:10 -0700 Subject: [PATCH 308/373] version --- js/src/client.ts | 8 -------- js/src/tests/client.int.test.ts | 2 +- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index 284f8df8f..c8702bdf8 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -2959,7 +2959,6 @@ export class Client { ): Promise { const commitsResp = await this.listCommits(promptOwnerAndName, { limit: 1 }); const commits = commitsResp.commits; - console.log('commits number', commits) if (commits.length === 0) { return undefined; } @@ -2993,7 +2992,6 @@ export class Client { } protected async _getPromptUrl(promptIdentifier: string): Promise { - console.log('print ing promt id', promptIdentifier) const [owner, promptName, commitHash] = parsePromptIdentifier(promptIdentifier); if (!(await this._currentTenantIsOwner(owner))) { if (commitHash !== 'latest') { @@ -3224,7 +3222,6 @@ export class Client { ); const { repo } = await response.json(); - console.log('result right here', repo); return { owner: repo.owner, repoHandle: repo.repo_handle, @@ -3268,15 +3265,11 @@ export class Client { ? await this._getLatestCommitHash(`${owner}/${promptName}`) : options?.parentCommitHash; - console.log('this is resolved parent commit hash', resolvedParentCommitHash); - const payload = { manifest: JSON.parse(JSON.stringify(object)), parent_commit: resolvedParentCommitHash, }; - console.log('latest prompt anyway', await this.listCommits(`${owner}/${promptName}`)); - const response = await this.caller.call( fetch, `${this.apiUrl}/commits/${owner}/${promptName}`, @@ -3456,7 +3449,6 @@ export class Client { } ): Promise { // Create or update prompt metadata - console.log('prompt exists', await this.promptExists(promptIdentifier)); if (await this.promptExists(promptIdentifier)) { await this.updatePrompt(promptIdentifier, { description: options?.description, diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 8e4f47ff3..41622f9b2 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -870,7 +870,7 @@ test("Test like and unlike prompt", async () => { await client.deletePrompt(promptName); }); -test("Test pull prompt commit", async () => { +test.only("Test pull prompt commit", async () => { const client = new Client(); const promptName = `test_pull_commit_${uuidv4().slice(0, 8)}`; From 81e6b4ce660454a987fba5cbb27ec17e36be652e Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 23 Jul 2024 16:10:30 -0700 Subject: [PATCH 309/373] rm python changes --- python/langsmith/client.py | 43 ++++++------------- .../tests/integration_tests/test_prompts.py | 1 - 2 files changed, 13 insertions(+), 31 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index f23136a1e..be40dfb02 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -4920,22 +4920,13 @@ def _get_prompt_url(self, prompt_identifier: str) -> str: ) if not self._current_tenant_is_owner(owner): - if commit_hash is not 'latest': - return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}" - else: - return f"{self._host_url}/hub/{owner}/{prompt_name}" + return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}" settings = self._get_settings() - if commit_hash is not 'latest': - return ( - f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}" - f"?organizationId={settings.id}" - ) - else: - return ( - f"{self._host_url}/prompts/{prompt_name}" - f"?organizationId={settings.id}" - ) + return ( + f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}" + f"?organizationId={settings.id}" + ) def _prompt_exists(self, prompt_identifier: str) -> bool: """Check if a prompt exists. @@ -4973,16 +4964,6 @@ def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]: """ return self._like_or_unlike_prompt(prompt_identifier, like=False) - def list_commits( - prompt_owner_and_name: str, - limit: Optional[int] = 1, - offset: Optional[int] = 0, - ) -> Sequence[ls_schemas.PromptCommit]: - """List commits for a prompt. - """ - - return '' - def list_prompts( self, *, @@ -5129,7 +5110,6 @@ def create_commit( try: from langchain_core.load.dump import dumps - from langchain_core.load.load import loads except ImportError: raise ImportError( "The client.create_commit function requires the langchain_core" @@ -5183,11 +5163,14 @@ def update_prompt( ValueError: If the prompt_identifier is empty. HTTPError: If the server request fails. """ - if not self.prompt_exists(prompt_identifier): - raise ls_utils.LangSmithNotFoundError("Prompt does not exist, you must create it first.") - - if not self._current_tenant_is_owner(owner): - raise self._owner_conflict_error("update a prompt", owner) + settings = self._get_settings() + if is_public and not settings.tenant_handle: + raise ValueError( + "Cannot create a public prompt without first\n" + "creating a LangChain Hub handle. " + "You can add a handle by creating a public prompt at:\n" + "https://smith.langchain.com/prompts" + ) json: Dict[str, Union[str, bool, Sequence[str]]] = {} diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 607d64fcc..80f6e5c4c 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -159,7 +159,6 @@ def test_list_prompts(langsmith_client: Client): response = langsmith_client.list_prompts(limit=10, offset=0) assert isinstance(response, ls_schemas.ListPromptsResponse) assert len(response.repos) <= 10 - assert response.total >= len(response.repos) def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate): From c67b8fcec794f834967dee2ce55e587304253398 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 23 Jul 2024 16:11:44 -0700 Subject: [PATCH 310/373] prettier --- js/src/client.ts | 244 ++++++++++++++++++-------------- js/src/schemas.ts | 12 +- js/src/tests/client.int.test.ts | 125 ++++++++++------ js/src/tests/client.test.ts | 50 ++++--- js/src/utils/prompts.ts | 31 ++-- 5 files changed, 274 insertions(+), 188 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index c8702bdf8..a5f893272 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -49,7 +49,10 @@ import { import { __version__ } from "./index.js"; import { assertUuid } from "./utils/_uuid.js"; import { warnOnce } from "./utils/warn.js"; -import { isVersionGreaterOrEqual, parsePromptIdentifier } from "./utils/prompts.js"; +import { + isVersionGreaterOrEqual, + parsePromptIdentifier, +} from "./utils/prompts.js"; interface ClientConfig { apiUrl?: string; @@ -2944,7 +2947,8 @@ export class Client { } protected async _ownerConflictError( - action: string, owner: string + action: string, + owner: string ): Promise { const settings = await this._getSettings(); return new Error( @@ -2955,9 +2959,11 @@ export class Client { } protected async _getLatestCommitHash( - promptOwnerAndName: string, + promptOwnerAndName: string ): Promise { - const commitsResp = await this.listCommits(promptOwnerAndName, { limit: 1 }); + const commitsResp = await this.listCommits(promptOwnerAndName, { + limit: 1, + }); const commits = commitsResp.commits; if (commits.length === 0) { return undefined; @@ -2984,7 +2990,9 @@ export class Client { if (!response.ok) { throw new Error( - `Failed to ${like ? "like" : "unlike"} prompt: ${response.status} ${await response.text()}` + `Failed to ${like ? "like" : "unlike"} prompt: ${ + response.status + } ${await response.text()}` ); } @@ -2992,35 +3000,46 @@ export class Client { } protected async _getPromptUrl(promptIdentifier: string): Promise { - const [owner, promptName, commitHash] = parsePromptIdentifier(promptIdentifier); + const [owner, promptName, commitHash] = + parsePromptIdentifier(promptIdentifier); if (!(await this._currentTenantIsOwner(owner))) { - if (commitHash !== 'latest') { - return `${this.getHostUrl()}/hub/${owner}/${promptName}/${commitHash.substring(0, 8)}`; + if (commitHash !== "latest") { + return `${this.getHostUrl()}/hub/${owner}/${promptName}/${commitHash.substring( + 0, + 8 + )}`; } else { return `${this.getHostUrl()}/hub/${owner}/${promptName}`; } } else { const settings = await this._getSettings(); - if (commitHash !== 'latest') { - return `${this.getHostUrl()}/prompts/${promptName}/${commitHash.substring(0, 8)}?organizationId=${settings.id}`; + if (commitHash !== "latest") { + return `${this.getHostUrl()}/prompts/${promptName}/${commitHash.substring( + 0, + 8 + )}?organizationId=${settings.id}`; } else { - return `${this.getHostUrl()}/prompts/${promptName}?organizationId=${settings.id}`; + return `${this.getHostUrl()}/prompts/${promptName}?organizationId=${ + settings.id + }`; } } } - public async promptExists( - promptIdentifier: string - ): Promise { + public async promptExists(promptIdentifier: string): Promise { const prompt = await this.getPrompt(promptIdentifier); - return !!prompt + return !!prompt; } - public async likePrompt(promptIdentifier: string): Promise { + public async likePrompt( + promptIdentifier: string + ): Promise { return this._likeOrUnlikePrompt(promptIdentifier, true); } - public async unlikePrompt(promptIdentifier: string): Promise { + public async unlikePrompt( + promptIdentifier: string + ): Promise { return this._likeOrUnlikePrompt(promptIdentifier, false); } @@ -3029,12 +3048,12 @@ export class Client { options?: { limit?: number; offset?: number; - }, + } ) { const { limit = 100, offset = 0 } = options ?? {}; const res = await this.caller.call( fetch, - `${this.apiUrl}/commits/${promptOwnerAndName}/?limit=${limit}&offset=${offset}`, + `${this.apiUrl}/commits/${promptOwnerAndName}/?limit=${limit}&offset=${offset}`, { method: "GET", headers: this.headers, @@ -3049,7 +3068,7 @@ export class Client { ? json.detail : JSON.stringify(json.detail); const error = new Error( - `Error ${res.status}: ${res.statusText}\n${detail}`, + `Error ${res.status}: ${res.statusText}\n${detail}` ); // eslint-disable-next-line @typescript-eslint/no-explicit-any (error as any).statusCode = res.status; @@ -3058,35 +3077,33 @@ export class Client { return json; } - public async listPrompts( - options?: { - limit?: number, - offset?: number, - isPublic?: boolean, - isArchived?: boolean, - sortField?: PromptSortField, - sortDirection?: 'desc' | 'asc', - query?: string, - } - ): Promise { + public async listPrompts(options?: { + limit?: number; + offset?: number; + isPublic?: boolean; + isArchived?: boolean; + sortField?: PromptSortField; + sortDirection?: "desc" | "asc"; + query?: string; + }): Promise { const params: Record = { limit: (options?.limit ?? 100).toString(), offset: (options?.offset ?? 0).toString(), - sort_field: options?.sortField ?? 'updated_at', - sort_direction: options?.sortDirection ?? 'desc', + sort_field: options?.sortField ?? "updated_at", + sort_direction: options?.sortDirection ?? "desc", is_archived: (!!options?.isArchived).toString(), }; - + if (options?.isPublic !== undefined) { params.is_public = options.isPublic.toString(); } - + if (options?.query) { params.query = options.query; } - + const queryString = new URLSearchParams(params).toString(); - + const response = await this.caller.call( fetch, `${this.apiUrl}/repos/?${queryString}`, @@ -3099,7 +3116,7 @@ export class Client { ); const res = await response.json(); - + return { repos: res.repos.map((result: any) => ({ owner: result.owner, @@ -3127,7 +3144,7 @@ export class Client { })), total: res.total, }; - } + } public async getPrompt(promptIdentifier: string): Promise { const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); @@ -3180,10 +3197,10 @@ export class Client { public async createPrompt( promptIdentifier: string, options?: { - description?: string, - readme?: string, - tags?: string[], - isPublic?: boolean, + description?: string; + readme?: string; + tags?: string[]; + isPublic?: boolean; } ): Promise { const settings = await this._getSettings(); @@ -3197,10 +3214,10 @@ export class Client { } const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); - if (!await this._currentTenantIsOwner(owner)) { + if (!(await this._currentTenantIsOwner(owner))) { throw await this._ownerConflictError("create a prompt", owner); } - + const data = { repo_handle: promptName, ...(options?.description && { description: options.description }), @@ -3209,17 +3226,13 @@ export class Client { is_public: !!options?.isPublic, }; - const response = await this.caller.call( - fetch, - `${this.apiUrl}/repos/`, - { - method: "POST", - headers: { ...this.headers, "Content-Type": "application/json" }, - body: JSON.stringify(data), - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - } - ); + const response = await this.caller.call(fetch, `${this.apiUrl}/repos/`, { + method: "POST", + headers: { ...this.headers, "Content-Type": "application/json" }, + body: JSON.stringify(data), + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + }); const { repo } = await response.json(); return { @@ -3252,16 +3265,16 @@ export class Client { promptIdentifier: string, object: any, options?: { - parentCommitHash?: string, + parentCommitHash?: string; } ): Promise { - if (!await this.promptExists(promptIdentifier)) { + if (!(await this.promptExists(promptIdentifier))) { throw new Error("Prompt does not exist, you must create it first."); } const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); const resolvedParentCommitHash = - (options?.parentCommitHash === "latest" || !options?.parentCommitHash) + options?.parentCommitHash === "latest" || !options?.parentCommitHash ? await this._getLatestCommitHash(`${owner}/${promptName}`) : options?.parentCommitHash; @@ -3289,42 +3302,48 @@ export class Client { } const result = await response.json(); - return this._getPromptUrl(`${owner}/${promptName}${result.commit_hash ? `:${result.commit_hash}` : ''}`); + return this._getPromptUrl( + `${owner}/${promptName}${ + result.commit_hash ? `:${result.commit_hash}` : "" + }` + ); } public async updatePrompt( promptIdentifier: string, options?: { - description?: string, - readme?: string, - tags?: string[], - isPublic?: boolean, - isArchived?: boolean, + description?: string; + readme?: string; + tags?: string[]; + isPublic?: boolean; + isArchived?: boolean; } ): Promise> { - if (!await this.promptExists(promptIdentifier)) { + if (!(await this.promptExists(promptIdentifier))) { throw new Error("Prompt does not exist, you must create it first."); } - + const [owner, promptName] = parsePromptIdentifier(promptIdentifier); - - if (!await this._currentTenantIsOwner(owner)) { + + if (!(await this._currentTenantIsOwner(owner))) { throw await this._ownerConflictError("update a prompt", owner); } - + const payload: Record = {}; - - if (options?.description !== undefined) payload.description = options.description; + + if (options?.description !== undefined) + payload.description = options.description; if (options?.readme !== undefined) payload.readme = options.readme; if (options?.tags !== undefined) payload.tags = options.tags; if (options?.isPublic !== undefined) payload.is_public = options.isPublic; - if (options?.isArchived !== undefined) payload.is_archived = options.isArchived; - + if (options?.isArchived !== undefined) + payload.is_archived = options.isArchived; + // Check if payload is empty if (Object.keys(payload).length === 0) { throw new Error("No valid update options provided"); } - + const response = await this.caller.call( fetch, `${this.apiUrl}/repos/${owner}/${promptName}`, @@ -3333,30 +3352,30 @@ export class Client { body: JSON.stringify(payload), headers: { ...this.headers, - 'Content-Type': 'application/json', + "Content-Type": "application/json", }, signal: AbortSignal.timeout(this.timeout_ms), ...this.fetchOptions, } ); - + if (!response.ok) { - throw new Error(`HTTP Error: ${response.status} - ${await response.text()}`); + throw new Error( + `HTTP Error: ${response.status} - ${await response.text()}` + ); } - + return response.json(); } - public async deletePrompt( - promptIdentifier: string - ): Promise { - if (!await this.promptExists(promptIdentifier)) { + public async deletePrompt(promptIdentifier: string): Promise { + if (!(await this.promptExists(promptIdentifier))) { throw new Error("Prompt does not exist, you must create it first."); } const [owner, promptName, _] = parsePromptIdentifier(promptIdentifier); - if (!await this._currentTenantIsOwner(owner)) { + if (!(await this._currentTenantIsOwner(owner))) { throw await this._ownerConflictError("delete a prompt", owner); } @@ -3377,19 +3396,25 @@ export class Client { public async pullPromptCommit( promptIdentifier: string, options?: { - includeModel?: boolean + includeModel?: boolean; } ): Promise { - const [owner, promptName, commitHash] = parsePromptIdentifier(promptIdentifier); - const serverInfo = await this._getServerInfo() - const useOptimization = isVersionGreaterOrEqual(serverInfo.version, '0.5.23'); + const [owner, promptName, commitHash] = + parsePromptIdentifier(promptIdentifier); + const serverInfo = await this._getServerInfo(); + const useOptimization = isVersionGreaterOrEqual( + serverInfo.version, + "0.5.23" + ); let passedCommitHash = commitHash; - if (!useOptimization && commitHash === 'latest') { - const latestCommitHash = await this._getLatestCommitHash(`${owner}/${promptName}`); + if (!useOptimization && commitHash === "latest") { + const latestCommitHash = await this._getLatestCommitHash( + `${owner}/${promptName}` + ); if (!latestCommitHash) { - throw new Error('No commits found'); + throw new Error("No commits found"); } else { passedCommitHash = latestCommitHash; } @@ -3397,7 +3422,9 @@ export class Client { const response = await this.caller.call( fetch, - `${this.apiUrl}/commits/${owner}/${promptName}/${passedCommitHash}${options?.includeModel ? '?include_model=true' : ''}`, + `${this.apiUrl}/commits/${owner}/${promptName}/${passedCommitHash}${ + options?.includeModel ? "?include_model=true" : "" + }`, { method: "GET", headers: this.headers, @@ -3422,15 +3449,15 @@ export class Client { examples: result.examples, }; } - + public async pullPrompt( promptIdentifier: string, options?: { - includeModel?: boolean, + includeModel?: boolean; } ): Promise { const promptObject = await this.pullPromptCommit(promptIdentifier, { - includeModel: options?.includeModel + includeModel: options?.includeModel, }); const prompt = JSON.stringify(promptObject.manifest); // need to add load from lc js @@ -3440,12 +3467,12 @@ export class Client { public async pushPrompt( promptIdentifier: string, options?: { - object?: any, - parentCommitHash?: string, - isPublic?: boolean, - description?: string, - readme?: string, - tags?: string[], + object?: any; + parentCommitHash?: string; + isPublic?: boolean; + description?: string; + readme?: string; + tags?: string[]; } ): Promise { // Create or update prompt metadata @@ -3457,15 +3484,12 @@ export class Client { isPublic: options?.isPublic, }); } else { - await this.createPrompt( - promptIdentifier, - { - description: options?.description, - readme: options?.readme, - tags: options?.tags, - isPublic: options?.isPublic, - } - ); + await this.createPrompt(promptIdentifier, { + description: options?.description, + readme: options?.readme, + tags: options?.tags, + isPublic: options?.isPublic, + }); } if (options?.object === null) { diff --git a/js/src/schemas.ts b/js/src/schemas.ts index bbee1f9be..0e8a0bc7c 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -446,10 +446,10 @@ export interface ListPromptsResponse { } export enum PromptSortField { - NumDownloads = 'num_downloads', - NumViews = 'num_views', - UpdatedAt = 'updated_at', - NumLikes = 'num_likes', + NumDownloads = "num_downloads", + NumViews = "num_views", + UpdatedAt = "updated_at", + NumLikes = "num_likes", } export interface LikePromptResponse { @@ -460,5 +460,5 @@ export interface LangSmithSettings { id: string; displayName: string; createdAt: string; - tenantHandle?: string; -} \ No newline at end of file + tenantHandle?: string; +} diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 41622f9b2..4d16fc347 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -1,5 +1,9 @@ import { Dataset, Run } from "../schemas.js"; -import { FunctionMessage, HumanMessage, SystemMessage } from "@langchain/core/messages"; +import { + FunctionMessage, + HumanMessage, + SystemMessage, +} from "@langchain/core/messages"; import { Client } from "../client.js"; import { v4 as uuidv4 } from "uuid"; @@ -760,11 +764,14 @@ test("Test list prompts", async () => { test("Test get prompt", async () => { const client = new Client(); const promptName = `test_prompt_${uuidv4().slice(0, 8)}`; - const promptTemplate = ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "{{question}}" }), - ], { templateFormat: "mustache" }); - + const promptTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); + const url = await client.pushPrompt(promptName, { object: promptTemplate }); expect(url).toBeDefined(); @@ -781,10 +788,15 @@ test("Test prompt exists", async () => { expect(await client.promptExists(nonExistentPrompt)).toBe(false); const existentPrompt = `existent_${uuidv4().slice(0, 8)}`; - await client.pushPrompt(existentPrompt, { object: ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "{{question}}" }), - ], { templateFormat: "mustache" })}); + await client.pushPrompt(existentPrompt, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); expect(await client.promptExists(existentPrompt)).toBe(true); await client.deletePrompt(existentPrompt); @@ -794,10 +806,15 @@ test("Test update prompt", async () => { const client = new Client(); const promptName = `test_update_prompt_${uuidv4().slice(0, 8)}`; - await client.pushPrompt(promptName, { object: ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "{{question}}" }), - ], { templateFormat: "mustache" })}); + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); const updatedData = await client.updatePrompt(promptName, { description: "Updated description", @@ -810,7 +827,9 @@ test("Test update prompt", async () => { const updatedPrompt = await client.getPrompt(promptName); expect(updatedPrompt?.description).toBe("Updated description"); expect(updatedPrompt?.isPublic).toBe(true); - expect(updatedPrompt?.tags).toEqual(expect.arrayContaining(["test", "update"])); + expect(updatedPrompt?.tags).toEqual( + expect.arrayContaining(["test", "update"]) + ); await client.deletePrompt(promptName); }); @@ -819,10 +838,15 @@ test("Test delete prompt", async () => { const client = new Client(); const promptName = `test_delete_prompt_${uuidv4().slice(0, 8)}`; - await client.pushPrompt(promptName, { object: ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "{{question}}" }), - ], { templateFormat: "mustache" })}); + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); expect(await client.promptExists(promptName)).toBe(true); await client.deletePrompt(promptName); @@ -833,15 +857,23 @@ test("Test create commit", async () => { const client = new Client(); const promptName = `test_create_commit_${uuidv4().slice(0, 8)}`; - await client.pushPrompt(promptName, { object: ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "{{question}}" }), - ], { templateFormat: "mustache" })}); - - const newTemplate = ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "My question is: {{question}}" }), - ], { templateFormat: "mustache" }); + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + const newTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "My question is: {{question}}" }), + ], + { templateFormat: "mustache" } + ); const commitUrl = await client.createCommit(promptName, newTemplate); expect(commitUrl).toBeDefined(); @@ -854,10 +886,15 @@ test("Test like and unlike prompt", async () => { const client = new Client(); const promptName = `test_like_prompt_${uuidv4().slice(0, 8)}`; - await client.pushPrompt(promptName, { object: ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "{{question}}" }), - ], { templateFormat: "mustache" })}); + await client.pushPrompt(promptName, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); await client.likePrompt(promptName); let prompt = await client.getPrompt(promptName); @@ -874,10 +911,13 @@ test.only("Test pull prompt commit", async () => { const client = new Client(); const promptName = `test_pull_commit_${uuidv4().slice(0, 8)}`; - const initialTemplate = ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "{{question}}" }), - ], { templateFormat: "mustache" }); + const initialTemplate = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); await client.pushPrompt(promptName, { object: initialTemplate }); const promptCommit = await client.pullPromptCommit(promptName); @@ -891,16 +931,19 @@ test("Test push and pull prompt", async () => { const client = new Client(); const promptName = `test_push_pull_${uuidv4().slice(0, 8)}`; - const template = ChatPromptTemplate.fromMessages([ - new SystemMessage({ content: "System message" }), - new HumanMessage({ content: "{{question}}" }), - ], { templateFormat: "mustache" }); + const template = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ); await client.pushPrompt(promptName, { object: template, description: "Test description", readme: "Test readme", - tags: ["test", "tag"] + tags: ["test", "tag"], }); const pulledPrompt = await client.pullPrompt(promptName); diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index 381dc734a..54a8e68a7 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -177,29 +177,43 @@ describe("Client", () => { }); }); - describe('parsePromptIdentifier', () => { - it('should parse valid identifiers correctly', () => { - expect(parsePromptIdentifier('name')).toEqual(['-', 'name', 'latest']); - expect(parsePromptIdentifier('owner/name')).toEqual(['owner', 'name', 'latest']); - expect(parsePromptIdentifier('owner/name:commit')).toEqual(['owner', 'name', 'commit']); - expect(parsePromptIdentifier('name:commit')).toEqual(['-', 'name', 'commit']); + describe("parsePromptIdentifier", () => { + it("should parse valid identifiers correctly", () => { + expect(parsePromptIdentifier("name")).toEqual(["-", "name", "latest"]); + expect(parsePromptIdentifier("owner/name")).toEqual([ + "owner", + "name", + "latest", + ]); + expect(parsePromptIdentifier("owner/name:commit")).toEqual([ + "owner", + "name", + "commit", + ]); + expect(parsePromptIdentifier("name:commit")).toEqual([ + "-", + "name", + "commit", + ]); }); - it('should throw an error for invalid identifiers', () => { + it("should throw an error for invalid identifiers", () => { const invalidIdentifiers = [ - '', - '/', - ':', - 'owner/', - '/name', - 'owner//name', - 'owner/name/', - 'owner/name/extra', - ':commit', + "", + "/", + ":", + "owner/", + "/name", + "owner//name", + "owner/name/", + "owner/name/extra", + ":commit", ]; - invalidIdentifiers.forEach(identifier => { - expect(() => parsePromptIdentifier(identifier)).toThrowError(`Invalid identifier format: ${identifier}`); + invalidIdentifiers.forEach((identifier) => { + expect(() => parsePromptIdentifier(identifier)).toThrowError( + `Invalid identifier format: ${identifier}` + ); }); }); }); diff --git a/js/src/utils/prompts.ts b/js/src/utils/prompts.ts index 01f16c29b..53bbee3c4 100644 --- a/js/src/utils/prompts.ts +++ b/js/src/utils/prompts.ts @@ -1,32 +1,37 @@ -import { parse as parseVersion } from 'semver'; +import { parse as parseVersion } from "semver"; -export function isVersionGreaterOrEqual(current_version: string, target_version: string): boolean { +export function isVersionGreaterOrEqual( + current_version: string, + target_version: string +): boolean { const current = parseVersion(current_version); const target = parseVersion(target_version); if (!current || !target) { - throw new Error('Invalid version format.'); + throw new Error("Invalid version format."); } return current.compare(target) >= 0; } -export function parsePromptIdentifier(identifier: string): [string, string, string] { +export function parsePromptIdentifier( + identifier: string +): [string, string, string] { if ( !identifier || - identifier.split('/').length > 2 || - identifier.startsWith('/') || - identifier.endsWith('/') || - identifier.split(':').length > 2 + identifier.split("/").length > 2 || + identifier.startsWith("/") || + identifier.endsWith("/") || + identifier.split(":").length > 2 ) { throw new Error(`Invalid identifier format: ${identifier}`); } - const [ownerNamePart, commitPart] = identifier.split(':'); - const commit = commitPart || 'latest'; + const [ownerNamePart, commitPart] = identifier.split(":"); + const commit = commitPart || "latest"; - if (ownerNamePart.includes('/')) { - const [owner, name] = ownerNamePart.split('/', 2); + if (ownerNamePart.includes("/")) { + const [owner, name] = ownerNamePart.split("/", 2); if (!owner || !name) { throw new Error(`Invalid identifier format: ${identifier}`); } @@ -35,6 +40,6 @@ export function parsePromptIdentifier(identifier: string): [string, string, stri if (!ownerNamePart) { throw new Error(`Invalid identifier format: ${identifier}`); } - return ['-', ownerNamePart, commit]; + return ["-", ownerNamePart, commit]; } } From c15ccca44d396cbd1c3f2b6ce8083669d2539120 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 23 Jul 2024 16:13:33 -0700 Subject: [PATCH 311/373] add semver --- js/package.json | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/js/package.json b/js/package.json index 90e3086bb..a81a87b5d 100644 --- a/js/package.json +++ b/js/package.json @@ -97,13 +97,13 @@ "commander": "^10.0.1", "p-queue": "^6.6.2", "p-retry": "4", + "semver": "^7.6.3", "uuid": "^9.0.0" }, "devDependencies": { "@babel/preset-env": "^7.22.4", "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", - "langchain": "^0.2.0", "@langchain/core": "^0.2.0", "@langchain/langgraph": "^0.0.19", "@tsconfig/recommended": "^1.0.2", @@ -119,6 +119,7 @@ "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", + "langchain": "^0.2.0", "openai": "^4.38.5", "prettier": "^2.8.8", "ts-jest": "^29.1.0", @@ -126,9 +127,9 @@ "typescript": "^5.4.5" }, "peerDependencies": { - "openai": "*", + "@langchain/core": "*", "langchain": "*", - "@langchain/core": "*" + "openai": "*" }, "peerDependenciesMeta": { "openai": { @@ -261,4 +262,4 @@ }, "./package.json": "./package.json" } -} \ No newline at end of file +} From c70acb15d2142182fc8cc3d03723393dd4598b0f Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 23 Jul 2024 16:24:14 -0700 Subject: [PATCH 312/373] add unit test --- js/src/tests/client.test.ts | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/js/src/tests/client.test.ts b/js/src/tests/client.test.ts index 54a8e68a7..694fb1e3c 100644 --- a/js/src/tests/client.test.ts +++ b/js/src/tests/client.test.ts @@ -6,7 +6,10 @@ import { getLangChainEnvVars, getLangChainEnvVarsMetadata, } from "../utils/env.js"; -import { parsePromptIdentifier } from "../utils/prompts.js"; +import { + isVersionGreaterOrEqual, + parsePromptIdentifier, +} from "../utils/prompts.js"; describe("Client", () => { describe("createLLMExample", () => { @@ -177,6 +180,23 @@ describe("Client", () => { }); }); + describe("isVersionGreaterOrEqual", () => { + it("should return true if the version is greater or equal", () => { + // Test versions equal to 0.5.23 + expect(isVersionGreaterOrEqual("0.5.23", "0.5.23")).toBe(true); + + // Test versions greater than 0.5.23 + expect(isVersionGreaterOrEqual("0.5.24", "0.5.23")); + expect(isVersionGreaterOrEqual("0.6.0", "0.5.23")); + expect(isVersionGreaterOrEqual("1.0.0", "0.5.23")); + + // Test versions less than 0.5.23 + expect(isVersionGreaterOrEqual("0.5.22", "0.5.23")).toBe(false); + expect(isVersionGreaterOrEqual("0.5.0", "0.5.23")).toBe(false); + expect(isVersionGreaterOrEqual("0.4.99", "0.5.23")).toBe(false); + }); + }); + describe("parsePromptIdentifier", () => { it("should parse valid identifiers correctly", () => { expect(parsePromptIdentifier("name")).toEqual(["-", "name", "latest"]); From 9a305a2b8ca9fc737cbad14f532c5bdf6a2b3acf Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 25 Jul 2024 16:36:27 -0700 Subject: [PATCH 313/373] store settings as promise --- js/src/client.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index a5f893272..aede56745 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -428,7 +428,7 @@ export class Client { private fetchOptions: RequestInit; - private settings: LangSmithSettings; + private settings: Promise | null; constructor(config: ClientConfig = {}) { const defaultConfig = Client.getDefaultClientConfig(); @@ -760,9 +760,10 @@ export class Client { protected async _getSettings() { if (!this.settings) { - this.settings = await this._get("/settings"); + this.settings = this._get("/settings"); } - return this.settings; + + return await this.settings; } public async createRun(run: CreateRunParams): Promise { @@ -3103,7 +3104,6 @@ export class Client { } const queryString = new URLSearchParams(params).toString(); - const response = await this.caller.call( fetch, `${this.apiUrl}/repos/?${queryString}`, From eb8a79f0c2eab20161a862c6ecd70466a3812ca7 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 25 Jul 2024 17:17:58 -0700 Subject: [PATCH 314/373] fixes --- js/src/client.ts | 251 ++++++++++++++------------------ js/src/schemas.ts | 47 +++--- js/src/tests/client.int.test.ts | 21 +-- 3 files changed, 144 insertions(+), 175 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index aede56745..3914a2a7f 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -18,6 +18,7 @@ import { LangChainBaseMessage, LangSmithSettings, LikePromptResponse, + ListCommitsResponse, ListPromptsResponse, Prompt, PromptCommit, @@ -581,9 +582,10 @@ export class Client { const response = await this._getResponse(path, queryParams); return response.json() as T; } - private async *_getPaginated( + private async *_getPaginated( path: string, - queryParams: URLSearchParams = new URLSearchParams() + queryParams: URLSearchParams = new URLSearchParams(), + transform?: (data: TResponse) => T[] ): AsyncIterable { let offset = Number(queryParams.get("offset")) || 0; const limit = Number(queryParams.get("limit")) || 100; @@ -603,7 +605,8 @@ export class Client { `Failed to fetch ${path}: ${response.status} ${response.statusText}` ); } - const items: T[] = await response.json(); + + const items: T[] =transform ? transform(await response.json()) : await response.json(); if (items.length === 0) { break; @@ -2944,7 +2947,7 @@ export class Client { protected async _currentTenantIsOwner(owner: string): Promise { const settings = await this._getSettings(); - return owner == "-" || settings.tenantHandle === owner; + return owner == "-" || settings.tenant_handle === owner; } protected async _ownerConflictError( @@ -2954,7 +2957,7 @@ export class Client { const settings = await this._getSettings(); return new Error( `Cannot ${action} for another tenant.\n - Current tenant: ${settings.tenantHandle}\n + Current tenant: ${settings.tenant_handle}\n Requested tenant: ${owner}` ); } @@ -2962,14 +2965,36 @@ export class Client { protected async _getLatestCommitHash( promptOwnerAndName: string ): Promise { - const commitsResp = await this.listCommits(promptOwnerAndName, { - limit: 1, - }); - const commits = commitsResp.commits; - if (commits.length === 0) { + const res = await this.caller.call( + fetch, + `${this.apiUrl}/commits/${promptOwnerAndName}/?limit=${1}&offset=${0}`, + { + method: "GET", + headers: this.headers, + signal: AbortSignal.timeout(this.timeout_ms), + ...this.fetchOptions, + } + ); + + const json = await res.json(); + if (!res.ok) { + const detail = + typeof json.detail === "string" + ? json.detail + : JSON.stringify(json.detail); + const error = new Error( + `Error ${res.status}: ${res.statusText}\n${detail}` + ); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (error as any).statusCode = res.status; + throw error; + } + + if (json.commits.length === 0) { return undefined; } - return commits[0].commit_hash; + + return json.commits[0].commit_hash; } protected async _likeOrUnlikePrompt( @@ -3044,106 +3069,89 @@ export class Client { return this._likeOrUnlikePrompt(promptIdentifier, false); } - public async listCommits( - promptOwnerAndName: string, - options?: { - limit?: number; - offset?: number; + public async *listCommits(promptOwnerAndName: string): AsyncIterableIterator { + for await (const commits of this._getPaginated( + `/commits/${promptOwnerAndName}/`, + {} as URLSearchParams, + (res) => res.commits, + )) { + yield* commits; } - ) { - const { limit = 100, offset = 0 } = options ?? {}; - const res = await this.caller.call( - fetch, - `${this.apiUrl}/commits/${promptOwnerAndName}/?limit=${limit}&offset=${offset}`, - { - method: "GET", - headers: this.headers, - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, + } + + public async *listProjects2({ + projectIds, + name, + nameContains, + referenceDatasetId, + referenceDatasetName, + referenceFree, + }: { + projectIds?: string[]; + name?: string; + nameContains?: string; + referenceDatasetId?: string; + referenceDatasetName?: string; + referenceFree?: boolean; + } = {}): AsyncIterable { + const params = new URLSearchParams(); + if (projectIds !== undefined) { + for (const projectId of projectIds) { + params.append("id", projectId); } - ); - const json = await res.json(); - if (!res.ok) { - const detail = - typeof json.detail === "string" - ? json.detail - : JSON.stringify(json.detail); - const error = new Error( - `Error ${res.status}: ${res.statusText}\n${detail}` - ); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (error as any).statusCode = res.status; - throw error; } - return json; + if (name !== undefined) { + params.append("name", name); + } + if (nameContains !== undefined) { + params.append("name_contains", nameContains); + } + if (referenceDatasetId !== undefined) { + params.append("reference_dataset", referenceDatasetId); + } else if (referenceDatasetName !== undefined) { + const dataset = await this.readDataset({ + datasetName: referenceDatasetName, + }); + params.append("reference_dataset", dataset.id); + } + if (referenceFree !== undefined) { + params.append("reference_free", referenceFree.toString()); + } + for await (const projects of this._getPaginated( + "/sessions", + params + )) { + yield* projects; + } } - public async listPrompts(options?: { - limit?: number; - offset?: number; + public async *listPrompts(options?: { isPublic?: boolean; isArchived?: boolean; sortField?: PromptSortField; sortDirection?: "desc" | "asc"; query?: string; - }): Promise { - const params: Record = { - limit: (options?.limit ?? 100).toString(), - offset: (options?.offset ?? 0).toString(), - sort_field: options?.sortField ?? "updated_at", - sort_direction: options?.sortDirection ?? "desc", - is_archived: (!!options?.isArchived).toString(), - }; + }): AsyncIterableIterator { + const params = new URLSearchParams(); + params.append("sort_field", options?.sortField ?? "updated_at"); + params.append("sort_direction", options?.sortDirection ?? "desc"); + params.append("is_archived", (!!options?.isArchived).toString()); if (options?.isPublic !== undefined) { - params.is_public = options.isPublic.toString(); + params.append("is_public", options.isPublic.toString()); } if (options?.query) { - params.query = options.query; + params.append("query", options.query); } - const queryString = new URLSearchParams(params).toString(); - const response = await this.caller.call( - fetch, - `${this.apiUrl}/repos/?${queryString}`, - { - method: "GET", - headers: this.headers, - signal: AbortSignal.timeout(this.timeout_ms), - ...this.fetchOptions, - } - ); - - const res = await response.json(); - - return { - repos: res.repos.map((result: any) => ({ - owner: result.owner, - repoHandle: result.repo_handle, - description: result.description, - id: result.id, - readme: result.readme, - tenantId: result.tenant_id, - tags: result.tags, - isPublic: result.is_public, - isArchived: result.is_archived, - createdAt: result.created_at, - updatedAt: result.updated_at, - originalRepoId: result.original_repo_id, - upstreamRepoId: result.upstream_repo_id, - fullName: result.full_name, - numLikes: result.num_likes, - numDownloads: result.num_downloads, - numViews: result.num_views, - likedByAuthUser: result.liked_by_auth_user, - lastCommitHash: result.last_commit_hash, - numCommits: result.num_commits, - originalRepoFullName: result.original_repo_full_name, - upstreamRepoFullName: result.upstream_repo_full_name, - })), - total: res.total, - }; + for await (const prompts of this._getPaginated( + "/repos", + params, + (res) => res.repos, + )) { + yield* prompts; + } } public async getPrompt(promptIdentifier: string): Promise { @@ -3165,30 +3173,7 @@ export class Client { const result = await response.json(); if (result.repo) { - return { - owner: result.repo.owner, - repoHandle: result.repo.repo_handle, - description: result.repo.description, - id: result.repo.id, - readme: result.repo.readme, - tenantId: result.repo.tenant_id, - tags: result.repo.tags, - isPublic: result.repo.is_public, - isArchived: result.repo.is_archived, - createdAt: result.repo.created_at, - updatedAt: result.repo.updated_at, - originalRepoId: result.repo.original_repo_id, - upstreamRepoId: result.repo.upstream_repo_id, - fullName: result.repo.full_name, - numLikes: result.repo.num_likes, - numDownloads: result.repo.num_downloads, - numViews: result.repo.num_views, - likedByAuthUser: result.repo.liked_by_auth_user, - lastCommitHash: result.repo.last_commit_hash, - numCommits: result.repo.num_commits, - originalRepoFullName: result.repo.original_repo_full_name, - upstreamRepoFullName: result.repo.upstream_repo_full_name, - }; + return result.repo as Prompt; } else { return null; } @@ -3204,7 +3189,7 @@ export class Client { } ): Promise { const settings = await this._getSettings(); - if (options?.isPublic && !settings.tenantHandle) { + if (options?.isPublic && !settings.tenant_handle) { throw new Error( `Cannot create a public prompt without first\n creating a LangChain Hub handle. @@ -3235,30 +3220,7 @@ export class Client { }); const { repo } = await response.json(); - return { - owner: repo.owner, - repoHandle: repo.repo_handle, - description: repo.description, - id: repo.id, - readme: repo.readme, - tenantId: repo.tenant_id, - tags: repo.tags, - isPublic: repo.is_public, - isArchived: repo.is_archived, - createdAt: repo.created_at, - updatedAt: repo.updated_at, - originalRepoId: repo.original_repo_id, - upstreamRepoId: repo.upstream_repo_id, - fullName: repo.full_name, - numLikes: repo.num_likes, - numDownloads: repo.num_downloads, - numViews: repo.num_views, - likedByAuthUser: repo.liked_by_auth_user, - lastCommitHash: repo.last_commit_hash, - numCommits: repo.num_commits, - originalRepoFullName: repo.original_repo_full_name, - upstreamRepoFullName: repo.upstream_repo_full_name, - }; + return repo as Prompt; } public async createCommit( @@ -3444,7 +3406,7 @@ export class Client { return { owner, repo: promptName, - commitHash: result.commit_hash, + commit_hash: result.commit_hash, manifest: result.manifest, examples: result.examples, }; @@ -3460,7 +3422,6 @@ export class Client { includeModel: options?.includeModel, }); const prompt = JSON.stringify(promptObject.manifest); - // need to add load from lc js return prompt; } diff --git a/js/src/schemas.ts b/js/src/schemas.ts index 0e8a0bc7c..350f114ba 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -410,34 +410,34 @@ export interface InvocationParamsSchema { export interface PromptCommit { owner: string; repo: string; - commitHash: string; + commit_hash: string; manifest: Record; examples: Array>; } export interface Prompt { - repoHandle: string; + repo_handle: string; description?: string; readme?: string; id: string; - tenantId: string; - createdAt: string; - updatedAt: string; - isPublic: boolean; - isArchived: boolean; + tenant_id: string; + created_at: string; + updated_at: string; + is_public: boolean; + is_archived: boolean; tags: string[]; - originalRepoId?: string; - upstreamRepoId?: string; + original_repo_id?: string; + upstream_repo_id?: string; owner?: string; - fullName: string; - numLikes: number; - numDownloads: number; - numViews: number; - likedByAuthUser: boolean; - lastCommitHash?: string; - numCommits: number; - originalRepoFullName?: string; - upstreamRepoFullName?: string; + full_name: string; + num_likes: number; + num_downloads: number; + num_views: number; + liked_by_auth_user: boolean; + last_commit_hash?: string; + num_commits: number; + original_repo_full_name?: string; + upstream_repo_full_name?: string; } export interface ListPromptsResponse { @@ -445,6 +445,11 @@ export interface ListPromptsResponse { total: number; } +export interface ListCommitsResponse { + commits: PromptCommit[]; + total: number; +} + export enum PromptSortField { NumDownloads = "num_downloads", NumViews = "num_views", @@ -458,7 +463,7 @@ export interface LikePromptResponse { export interface LangSmithSettings { id: string; - displayName: string; - createdAt: string; - tenantHandle?: string; + display_name: string; + created_at: string; + tenant_handle?: string; } diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 4d16fc347..824e47125 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -756,9 +756,12 @@ test.concurrent("Test run stats", async () => { test("Test list prompts", async () => { const client = new Client(); - const response = await client.listPrompts({ limit: 10, offset: 0 }); - expect(response.repos.length).toBeLessThanOrEqual(10); - expect(response.total).toBeGreaterThanOrEqual(response.repos.length); + const response = await client.listPrompts({ isPublic: true }); + expect(response).toBeDefined(); + for await (const prompt of response) { + console.log("this is what prompt looks like", prompt); + expect(prompt).toBeDefined(); + } }); test("Test get prompt", async () => { @@ -777,7 +780,7 @@ test("Test get prompt", async () => { const prompt = await client.getPrompt(promptName); expect(prompt).toBeDefined(); - expect(prompt?.repoHandle).toBe(promptName); + expect(prompt?.repo_handle).toBe(promptName); await client.deletePrompt(promptName); }); @@ -826,7 +829,7 @@ test("Test update prompt", async () => { const updatedPrompt = await client.getPrompt(promptName); expect(updatedPrompt?.description).toBe("Updated description"); - expect(updatedPrompt?.isPublic).toBe(true); + expect(updatedPrompt?.is_public).toBe(true); expect(updatedPrompt?.tags).toEqual( expect.arrayContaining(["test", "update"]) ); @@ -898,16 +901,16 @@ test("Test like and unlike prompt", async () => { await client.likePrompt(promptName); let prompt = await client.getPrompt(promptName); - expect(prompt?.numLikes).toBe(1); + expect(prompt?.num_likes).toBe(1); await client.unlikePrompt(promptName); prompt = await client.getPrompt(promptName); - expect(prompt?.numLikes).toBe(0); + expect(prompt?.num_likes).toBe(0); await client.deletePrompt(promptName); }); -test.only("Test pull prompt commit", async () => { +test("Test pull prompt commit", async () => { const client = new Client(); const promptName = `test_pull_commit_${uuidv4().slice(0, 8)}`; @@ -953,7 +956,7 @@ test("Test push and pull prompt", async () => { expect(promptInfo?.description).toBe("Test description"); expect(promptInfo?.readme).toBe("Test readme"); expect(promptInfo?.tags).toEqual(expect.arrayContaining(["test", "tag"])); - expect(promptInfo?.isPublic).toBe(false); + expect(promptInfo?.is_public).toBe(false); await client.deletePrompt(promptName); }); From 61211ab4957c34fd5f8bdbca0ba758d7b564402b Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 25 Jul 2024 18:50:21 -0700 Subject: [PATCH 315/373] tests --- js/src/client.ts | 22 +++++---- js/src/schemas.ts | 7 +-- js/src/tests/client.int.test.ts | 83 +++++++++++++++++++++++++++++++-- 3 files changed, 95 insertions(+), 17 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index 3914a2a7f..57acdc2ec 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -606,7 +606,9 @@ export class Client { ); } - const items: T[] =transform ? transform(await response.json()) : await response.json(); + const items: T[] = transform + ? transform(await response.json()) + : await response.json(); if (items.length === 0) { break; @@ -3069,11 +3071,16 @@ export class Client { return this._likeOrUnlikePrompt(promptIdentifier, false); } - public async *listCommits(promptOwnerAndName: string): AsyncIterableIterator { - for await (const commits of this._getPaginated( + public async *listCommits( + promptOwnerAndName: string + ): AsyncIterableIterator { + for await (const commits of this._getPaginated< + PromptCommit, + ListCommitsResponse + >( `/commits/${promptOwnerAndName}/`, {} as URLSearchParams, - (res) => res.commits, + (res) => res.commits )) { yield* commits; } @@ -3129,12 +3136,11 @@ export class Client { isPublic?: boolean; isArchived?: boolean; sortField?: PromptSortField; - sortDirection?: "desc" | "asc"; query?: string; }): AsyncIterableIterator { const params = new URLSearchParams(); params.append("sort_field", options?.sortField ?? "updated_at"); - params.append("sort_direction", options?.sortDirection ?? "desc"); + params.append("sort_direction", "desc"); params.append("is_archived", (!!options?.isArchived).toString()); if (options?.isPublic !== undefined) { @@ -3148,7 +3154,7 @@ export class Client { for await (const prompts of this._getPaginated( "/repos", params, - (res) => res.repos, + (res) => res.repos )) { yield* prompts; } @@ -3412,7 +3418,7 @@ export class Client { }; } - public async pullPrompt( + public async _pullPrompt( promptIdentifier: string, options?: { includeModel?: boolean; diff --git a/js/src/schemas.ts b/js/src/schemas.ts index 350f114ba..99fdd1056 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -450,12 +450,7 @@ export interface ListCommitsResponse { total: number; } -export enum PromptSortField { - NumDownloads = "num_downloads", - NumViews = "num_views", - UpdatedAt = "updated_at", - NumLikes = "num_likes", -} +export type PromptSortField = "num_downloads" | "num_views" | "updated_at" | "num_likes" export interface LikePromptResponse { likes: number; diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 824e47125..b7c0e5316 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -14,7 +14,10 @@ import { toArray, waitUntil, } from "./utils.js"; -import { ChatPromptTemplate } from "@langchain/core/prompts"; +import { ChatPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; +import { ChatOpenAI } from "@langchain/openai"; +import { RunnableSequence } from "@langchain/core/runnables"; +import { load } from "langchain/load"; type CheckOutputsType = boolean | ((run: Run) => boolean); async function waitUntilRunFound( @@ -756,12 +759,65 @@ test.concurrent("Test run stats", async () => { test("Test list prompts", async () => { const client = new Client(); + // push 3 prompts + const promptName1 = `test_prompt_${uuidv4().slice(0, 8)}`; + const promptName2 = `test_prompt_${uuidv4().slice(0, 8)}`; + const promptName3 = `test_prompt_${uuidv4().slice(0, 8)}`; + + await client.pushPrompt(promptName1, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + isPublic: true, + }); + await client.pushPrompt(promptName2, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + await client.pushPrompt(promptName3, { + object: ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "{{question}}" }), + ], + { templateFormat: "mustache" } + ), + }); + + // expect at least one of the prompts to have promptName1 const response = await client.listPrompts({ isPublic: true }); + let found = false; expect(response).toBeDefined(); for await (const prompt of response) { - console.log("this is what prompt looks like", prompt); expect(prompt).toBeDefined(); + if (prompt.repo_handle === promptName1) { + found = true; + } + } + expect(found).toBe(true); + + // expect the prompts to be sorted by updated_at + const response2 = client.listPrompts({ sortField: "updated_at" }); + expect(response2).toBeDefined(); + let lastUpdatedAt: number | undefined; + for await (const prompt of response2) { + expect(prompt.updated_at).toBeDefined(); + const currentUpdatedAt = new Date(prompt.updated_at).getTime(); + if (lastUpdatedAt !== undefined) { + expect(currentUpdatedAt).toBeLessThanOrEqual(lastUpdatedAt); + } + lastUpdatedAt = currentUpdatedAt; } + expect(lastUpdatedAt).toBeDefined(); }); test("Test get prompt", async () => { @@ -949,7 +1005,7 @@ test("Test push and pull prompt", async () => { tags: ["test", "tag"], }); - const pulledPrompt = await client.pullPrompt(promptName); + const pulledPrompt = await client._pullPrompt(promptName); expect(pulledPrompt).toBeDefined(); const promptInfo = await client.getPrompt(promptName); @@ -960,3 +1016,24 @@ test("Test push and pull prompt", async () => { await client.deletePrompt(promptName); }); + +test("Test pull prompt include model", async () => { + const client = new Client(); + const model = new ChatOpenAI({}); + const promptTemplate = PromptTemplate.fromTemplate( + "Tell me a joke about {topic}" + ); + const promptWithModel = promptTemplate.pipe(model); + + const promptName = `test_prompt_with_model_${uuidv4().slice(0, 8)}`; + await client.pushPrompt(promptName, { object: promptWithModel }); + + const pulledPrompt = await client._pullPrompt(promptName, { + includeModel: true, + }); + const rs: RunnableSequence = await load(pulledPrompt); + expect(rs).toBeDefined(); + expect(rs).toBeInstanceOf(RunnableSequence); + + await client.deletePrompt(promptName); +}); From b9c511d8bbd5a8adc3674e902800a13f4eb39292 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 25 Jul 2024 18:56:26 -0700 Subject: [PATCH 316/373] add langchain/openai --- js/package.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/js/package.json b/js/package.json index a81a87b5d..b0234873a 100644 --- a/js/package.json +++ b/js/package.json @@ -66,7 +66,7 @@ "build:esm": "rm -f src/package.json && tsc --outDir dist/ && rm -rf dist/tests dist/**/tests", "build:cjs": "echo '{}' > src/package.json && tsc --outDir dist-cjs/ -p tsconfig.cjs.json && node scripts/move-cjs-to-dist.js && rm -r dist-cjs src/package.json", "test": "cross-env NODE_OPTIONS=--experimental-vm-modules jest --passWithNoTests --testPathIgnorePatterns='\\.int\\.test.[tj]s' --testTimeout 30000", - "test:integration": "cross-env NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000", + "test:integration": "cross-env NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=client\\.int\\.test.ts --testTimeout 100000", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "watch:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --watch --config jest.config.cjs --testTimeout 100000", "lint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", @@ -106,6 +106,7 @@ "@jest/globals": "^29.5.0", "@langchain/core": "^0.2.0", "@langchain/langgraph": "^0.0.19", + "@langchain/openai": "^0.2.5", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.59.8", From 459e7bf493bc4119d9d5ae827d9dd1b48620df4c Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 25 Jul 2024 18:57:37 -0700 Subject: [PATCH 317/373] prettier --- js/src/schemas.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/js/src/schemas.ts b/js/src/schemas.ts index 99fdd1056..5692b8a86 100644 --- a/js/src/schemas.ts +++ b/js/src/schemas.ts @@ -450,7 +450,11 @@ export interface ListCommitsResponse { total: number; } -export type PromptSortField = "num_downloads" | "num_views" | "updated_at" | "num_likes" +export type PromptSortField = + | "num_downloads" + | "num_views" + | "updated_at" + | "num_likes"; export interface LikePromptResponse { likes: number; From d901757d62ca9de0dbfe4e96ea0e0c976b912f86 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 25 Jul 2024 19:01:03 -0700 Subject: [PATCH 318/373] rm test path --- js/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/package.json b/js/package.json index b0234873a..8dab2593f 100644 --- a/js/package.json +++ b/js/package.json @@ -66,7 +66,7 @@ "build:esm": "rm -f src/package.json && tsc --outDir dist/ && rm -rf dist/tests dist/**/tests", "build:cjs": "echo '{}' > src/package.json && tsc --outDir dist-cjs/ -p tsconfig.cjs.json && node scripts/move-cjs-to-dist.js && rm -r dist-cjs src/package.json", "test": "cross-env NODE_OPTIONS=--experimental-vm-modules jest --passWithNoTests --testPathIgnorePatterns='\\.int\\.test.[tj]s' --testTimeout 30000", - "test:integration": "cross-env NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=client\\.int\\.test.ts --testTimeout 100000", + "test:integration": "cross-env NODE_OPTIONS=--experimental-vm-modules jest --testPathPattern=\\.int\\.test.ts --testTimeout 100000", "test:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --config jest.config.cjs --testTimeout 100000", "watch:single": "NODE_OPTIONS=--experimental-vm-modules yarn run jest --watch --config jest.config.cjs --testTimeout 100000", "lint": "NODE_OPTIONS=--max-old-space-size=4096 eslint --cache --ext .ts,.js src/", From 952db7eef648ffd5a0575d16e79d19064393e5d8 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 26 Jul 2024 10:39:40 -0700 Subject: [PATCH 319/373] rm --- js/src/client.ts | 46 ---------------------------------------------- 1 file changed, 46 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index 57acdc2ec..bbf114346 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -3086,52 +3086,6 @@ export class Client { } } - public async *listProjects2({ - projectIds, - name, - nameContains, - referenceDatasetId, - referenceDatasetName, - referenceFree, - }: { - projectIds?: string[]; - name?: string; - nameContains?: string; - referenceDatasetId?: string; - referenceDatasetName?: string; - referenceFree?: boolean; - } = {}): AsyncIterable { - const params = new URLSearchParams(); - if (projectIds !== undefined) { - for (const projectId of projectIds) { - params.append("id", projectId); - } - } - if (name !== undefined) { - params.append("name", name); - } - if (nameContains !== undefined) { - params.append("name_contains", nameContains); - } - if (referenceDatasetId !== undefined) { - params.append("reference_dataset", referenceDatasetId); - } else if (referenceDatasetName !== undefined) { - const dataset = await this.readDataset({ - datasetName: referenceDatasetName, - }); - params.append("reference_dataset", dataset.id); - } - if (referenceFree !== undefined) { - params.append("reference_free", referenceFree.toString()); - } - for await (const projects of this._getPaginated( - "/sessions", - params - )) { - yield* projects; - } - } - public async *listPrompts(options?: { isPublic?: boolean; isArchived?: boolean; From 65c20cf8b414435e175588e4f45c875d40e92c6a Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 26 Jul 2024 10:55:21 -0700 Subject: [PATCH 320/373] nits --- js/src/client.ts | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/js/src/client.ts b/js/src/client.ts index bbf114346..36ded0d64 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -3131,6 +3131,12 @@ export class Client { return null; } + if (!response.ok) { + throw new Error( + `Failed to get prompt: ${response.status} ${await response.text()}` + ); + } + const result = await response.json(); if (result.repo) { return result.repo as Prompt; @@ -3179,6 +3185,12 @@ export class Client { ...this.fetchOptions, }); + if (!response.ok) { + throw new Error( + `Failed to create prompt: ${response.status} ${await response.text()}` + ); + } + const { repo } = await response.json(); return repo as Prompt; } @@ -3372,6 +3384,13 @@ export class Client { }; } + /** + * + * This method should not be used directly, use `import { pull } from "langchain/hub"` instead. + * Using this method directly returns the JSON string of the prompt rather than a LangChain object. + * @private + * + */ public async _pullPrompt( promptIdentifier: string, options?: { @@ -3413,7 +3432,7 @@ export class Client { }); } - if (options?.object === null) { + if (!options?.object) { return await this._getPromptUrl(promptIdentifier); } From a12638946923ca90d2e725b386bf07f1ed92f4ae Mon Sep 17 00:00:00 2001 From: Tat Dat Duong Date: Fri, 26 Jul 2024 12:18:42 -0700 Subject: [PATCH 321/373] Code review --- js/src/wrappers/openai.ts | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index ba898f131..23ff7d77e 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -286,12 +286,11 @@ const _wrapClient = ( get(target, propKey, receiver) { const originalValue = target[propKey as keyof T]; if (typeof originalValue === "function") { - return traceable( - originalValue.bind(target), - Object.assign({ run_type: "llm" }, options, { - name: [runName, propKey.toString()].join("."), - }) - ); + return traceable(originalValue.bind(target), { + run_type: "llm", + ...options, + name: [runName, propKey.toString()].join("."), + }); } else if ( originalValue != null && !Array.isArray(originalValue) && From 2a81d658e6bab9cafe1728de3349c2b42625c2fe Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 26 Jul 2024 12:44:02 -0700 Subject: [PATCH 322/373] add openai --- js/package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/js/package.json b/js/package.json index caef7b910..8a1598cd6 100644 --- a/js/package.json +++ b/js/package.json @@ -107,6 +107,7 @@ "langchain": "^0.2.10", "@langchain/core": "^0.2.17", "@langchain/langgraph": "^0.0.29", + "@langchain/openai": "^0.2.5", "@tsconfig/recommended": "^1.0.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.59.8", From 855a1867d484c4b7d24114a84e1f2e29b7c6bbe7 Mon Sep 17 00:00:00 2001 From: jacoblee93 Date: Sat, 27 Jul 2024 14:24:06 -0700 Subject: [PATCH 323/373] Add wrapAISDKModel method for Vercel's AI SDK --- js/package.json | 7 +- js/scripts/create-entrypoints.js | 1 + js/src/tests/wrapped_ai_sdk.int.test.ts | 50 ++++++ js/src/traceable.ts | 93 +++++++++++- js/src/wrappers/generic.ts | 72 +++++++++ js/src/wrappers/index.ts | 1 + js/src/wrappers/openai.ts | 70 --------- js/src/wrappers/vercel.ts | 79 ++++++++++ js/yarn.lock | 193 ++++++++++++++++++++++++ 9 files changed, 493 insertions(+), 73 deletions(-) create mode 100644 js/src/tests/wrapped_ai_sdk.int.test.ts create mode 100644 js/src/wrappers/generic.ts create mode 100644 js/src/wrappers/vercel.ts diff --git a/js/package.json b/js/package.json index 8a1598cd6..606c2d3e2 100644 --- a/js/package.json +++ b/js/package.json @@ -101,10 +101,10 @@ "uuid": "^9.0.0" }, "devDependencies": { + "@ai-sdk/anthropic": "^0.0.33", "@babel/preset-env": "^7.22.4", "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", - "langchain": "^0.2.10", "@langchain/core": "^0.2.17", "@langchain/langgraph": "^0.0.29", "@langchain/openai": "^0.2.5", @@ -112,6 +112,7 @@ "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.59.8", "@typescript-eslint/parser": "^5.59.8", + "ai": "^3.2.37", "babel-jest": "^29.5.0", "cross-env": "^7.0.3", "dotenv": "^16.1.3", @@ -121,11 +122,13 @@ "eslint-plugin-no-instanceof": "^1.0.1", "eslint-plugin-prettier": "^4.2.1", "jest": "^29.5.0", + "langchain": "^0.2.10", "openai": "^4.38.5", "prettier": "^2.8.8", "ts-jest": "^29.1.0", "ts-node": "^10.9.1", - "typescript": "^5.4.5" + "typescript": "^5.4.5", + "zod": "^3.23.8" }, "peerDependencies": { "@langchain/core": "*", diff --git a/js/scripts/create-entrypoints.js b/js/scripts/create-entrypoints.js index 61d7341ab..a3487f756 100644 --- a/js/scripts/create-entrypoints.js +++ b/js/scripts/create-entrypoints.js @@ -17,6 +17,7 @@ const entrypoints = { wrappers: "wrappers/index", anonymizer: "anonymizer/index", "wrappers/openai": "wrappers/openai", + "wrappers/vercel": "wrappers/vercel", "singletons/traceable": "singletons/traceable", }; diff --git a/js/src/tests/wrapped_ai_sdk.int.test.ts b/js/src/tests/wrapped_ai_sdk.int.test.ts new file mode 100644 index 000000000..80556b61e --- /dev/null +++ b/js/src/tests/wrapped_ai_sdk.int.test.ts @@ -0,0 +1,50 @@ +import { anthropic } from "@ai-sdk/anthropic"; +import { generateObject, generateText, streamObject, streamText } from "ai"; +import { z } from "zod"; +import { wrapAISDKModel } from "../wrappers/vercel.js"; + +test("AI SDK generateText", async () => { + const modelWithTracing = wrapAISDKModel(anthropic("claude-3-haiku-20240307")); + const { text } = await generateText({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + }); + console.log(text); +}); + +test("AI SDK generateObject", async () => { + const modelWithTracing = wrapAISDKModel(anthropic("claude-3-haiku-20240307")); + const { object } = await generateObject({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + schema: z.object({ + ingredients: z.array(z.string()), + }), + }); + console.log(object); +}); + +test("AI SDK streamText", async () => { + const modelWithTracing = wrapAISDKModel(anthropic("claude-3-haiku-20240307")); + const { textStream } = await streamText({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + }); + for await (const chunk of textStream) { + console.log(chunk); + } +}); + +test("AI SDK streamObject", async () => { + const modelWithTracing = wrapAISDKModel(anthropic("claude-3-haiku-20240307")); + const { partialObjectStream } = await streamObject({ + model: modelWithTracing, + prompt: "Write a vegetarian lasagna recipe for 4 people.", + schema: z.object({ + ingredients: z.array(z.string()), + }), + }); + for await (const chunk of partialObjectStream) { + console.log(chunk); + } +}); diff --git a/js/src/traceable.ts b/js/src/traceable.ts index 1f009c680..f663a6de2 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -279,6 +279,7 @@ export function traceable any>( // eslint-disable-next-line @typescript-eslint/no-explicit-any aggregator?: (args: any[]) => any; argsConfigPath?: [number] | [number, string]; + __finalTracedIteratorKey?: string; /** * Extract invocation parameters from the arguments of the traced function. @@ -294,7 +295,12 @@ export function traceable any>( } ) { type Inputs = Parameters; - const { aggregator, argsConfigPath, ...runTreeConfig } = config ?? {}; + const { + aggregator, + __finalTracedIteratorKey, + argsConfigPath, + ...runTreeConfig + } = config ?? {}; const traceableFunc = ( ...args: Inputs | [RunTree, ...Inputs] | [RunnableConfigLike, ...Inputs] @@ -434,6 +440,47 @@ export function traceable any>( return chunks; } + function tapReadableStreamForTracing( + stream: ReadableStream, + snapshot: ReturnType | undefined + ) { + const reader = stream.getReader(); + let finished = false; + const chunks: unknown[] = []; + + const tappedStream = new ReadableStream({ + async start(controller) { + // eslint-disable-next-line no-constant-condition + while (true) { + const result = await (snapshot + ? snapshot(() => reader.read()) + : reader.read()); + if (result.done) { + finished = true; + await currentRunTree?.end( + handleRunOutputs(await handleChunks(chunks)) + ); + await handleEnd(); + controller.close(); + break; + } + chunks.push(result.value); + controller.enqueue(result.value); + } + }, + async cancel(reason) { + if (!finished) await currentRunTree?.end(undefined, "Cancelled"); + await currentRunTree?.end( + handleRunOutputs(await handleChunks(chunks)) + ); + await handleEnd(); + return reader.cancel(reason); + }, + }); + + return tappedStream; + } + async function* wrapAsyncIteratorForTracing( iterator: AsyncIterator, snapshot: ReturnType | undefined @@ -463,10 +510,14 @@ export function traceable any>( await handleEnd(); } } + function wrapAsyncGeneratorForTracing( iterable: AsyncIterable, snapshot: ReturnType | undefined ) { + if (isReadableStream(iterable)) { + return tapReadableStreamForTracing(iterable, snapshot); + } const iterator = iterable[Symbol.asyncIterator](); const wrappedIterator = wrapAsyncIteratorForTracing(iterator, snapshot); iterable[Symbol.asyncIterator] = () => wrappedIterator; @@ -512,6 +563,25 @@ export function traceable any>( return wrapAsyncGeneratorForTracing(returnValue, snapshot); } + if ( + !Array.isArray(returnValue) && + typeof returnValue === "object" && + returnValue != null && + __finalTracedIteratorKey !== undefined && + isAsyncIterable( + (returnValue as Record)[__finalTracedIteratorKey] + ) + ) { + const snapshot = AsyncLocalStorage.snapshot(); + return { + ...returnValue, + [__finalTracedIteratorKey]: wrapAsyncGeneratorForTracing( + (returnValue as Record)[__finalTracedIteratorKey], + snapshot + ), + }; + } + const tracedPromise = new Promise((resolve, reject) => { Promise.resolve(returnValue) .then( @@ -523,6 +593,27 @@ export function traceable any>( ); } + if ( + !Array.isArray(rawOutput) && + typeof rawOutput === "object" && + rawOutput != null && + __finalTracedIteratorKey !== undefined && + isAsyncIterable( + (rawOutput as Record)[__finalTracedIteratorKey] + ) + ) { + const snapshot = AsyncLocalStorage.snapshot(); + return { + ...rawOutput, + [__finalTracedIteratorKey]: wrapAsyncGeneratorForTracing( + (rawOutput as Record)[ + __finalTracedIteratorKey + ], + snapshot + ), + }; + } + if (isGenerator(wrappedFunc) && isIteratorLike(rawOutput)) { const chunks = gatherAll(rawOutput); diff --git a/js/src/wrappers/generic.ts b/js/src/wrappers/generic.ts new file mode 100644 index 000000000..3b62bc0f8 --- /dev/null +++ b/js/src/wrappers/generic.ts @@ -0,0 +1,72 @@ +import type { RunTreeConfig } from "../index.js"; +import { traceable } from "../traceable.js"; + +export const _wrapClient = ( + sdk: T, + runName: string, + options?: Omit +): T => { + return new Proxy(sdk, { + get(target, propKey, receiver) { + const originalValue = target[propKey as keyof T]; + if (typeof originalValue === "function") { + return traceable(originalValue.bind(target), { + run_type: "llm", + ...options, + name: [runName, propKey.toString()].join("."), + }); + } else if ( + originalValue != null && + !Array.isArray(originalValue) && + // eslint-disable-next-line no-instanceof/no-instanceof + !(originalValue instanceof Date) && + typeof originalValue === "object" + ) { + return _wrapClient( + originalValue, + [runName, propKey.toString()].join("."), + options + ); + } else { + return Reflect.get(target, propKey, receiver); + } + }, + }); +}; + +type WrapSDKOptions = Partial< + RunTreeConfig & { + /** + * @deprecated Use `name` instead. + */ + runName: string; + } +>; + +/** + * Wrap an arbitrary SDK, enabling automatic LangSmith tracing. + * Method signatures are unchanged. + * + * Note that this will wrap and trace ALL SDK methods, not just + * LLM completion methods. If the passed SDK contains other methods, + * we recommend using the wrapped instance for LLM calls only. + * @param sdk An arbitrary SDK instance. + * @param options LangSmith options. + * @returns + */ +export const wrapSDK = ( + sdk: T, + options?: WrapSDKOptions +): T => { + const traceableOptions = options ? { ...options } : undefined; + if (traceableOptions != null) { + delete traceableOptions.runName; + delete traceableOptions.name; + } + + return _wrapClient( + sdk, + options?.name ?? options?.runName ?? sdk.constructor?.name, + traceableOptions + ); +}; diff --git a/js/src/wrappers/index.ts b/js/src/wrappers/index.ts index e8f265647..6ff1385b0 100644 --- a/js/src/wrappers/index.ts +++ b/js/src/wrappers/index.ts @@ -1 +1,2 @@ export * from "./openai.js"; +export { wrapSDK } from "./generic.js"; diff --git a/js/src/wrappers/openai.ts b/js/src/wrappers/openai.ts index 23ff7d77e..05fae4d5d 100644 --- a/js/src/wrappers/openai.ts +++ b/js/src/wrappers/openai.ts @@ -276,73 +276,3 @@ export const wrapOpenAI = ( return openai as PatchedOpenAIClient; }; - -const _wrapClient = ( - sdk: T, - runName: string, - options?: Omit -): T => { - return new Proxy(sdk, { - get(target, propKey, receiver) { - const originalValue = target[propKey as keyof T]; - if (typeof originalValue === "function") { - return traceable(originalValue.bind(target), { - run_type: "llm", - ...options, - name: [runName, propKey.toString()].join("."), - }); - } else if ( - originalValue != null && - !Array.isArray(originalValue) && - // eslint-disable-next-line no-instanceof/no-instanceof - !(originalValue instanceof Date) && - typeof originalValue === "object" - ) { - return _wrapClient( - originalValue, - [runName, propKey.toString()].join("."), - options - ); - } else { - return Reflect.get(target, propKey, receiver); - } - }, - }); -}; - -type WrapSDKOptions = Partial< - RunTreeConfig & { - /** - * @deprecated Use `name` instead. - */ - runName: string; - } ->; - -/** - * Wrap an arbitrary SDK, enabling automatic LangSmith tracing. - * Method signatures are unchanged. - * - * Note that this will wrap and trace ALL SDK methods, not just - * LLM completion methods. If the passed SDK contains other methods, - * we recommend using the wrapped instance for LLM calls only. - * @param sdk An arbitrary SDK instance. - * @param options LangSmith options. - * @returns - */ -export const wrapSDK = ( - sdk: T, - options?: WrapSDKOptions -): T => { - const traceableOptions = options ? { ...options } : undefined; - if (traceableOptions != null) { - delete traceableOptions.runName; - delete traceableOptions.name; - } - - return _wrapClient( - sdk, - options?.name ?? options?.runName ?? sdk.constructor?.name, - traceableOptions - ); -}; diff --git a/js/src/wrappers/vercel.ts b/js/src/wrappers/vercel.ts new file mode 100644 index 000000000..0264f112f --- /dev/null +++ b/js/src/wrappers/vercel.ts @@ -0,0 +1,79 @@ +import type { RunTreeConfig } from "../index.js"; +import { traceable } from "../traceable.js"; +import { _wrapClient } from "./generic.js"; + +/** + * Wrap a Vercel AI SDK model, enabling automatic LangSmith tracing. + * After wrapping a model, you can use it with the Vercel AI SDK Core + * methods as normal. + * + * @example + * ```ts + * import { anthropic } from "@ai-sdk/anthropic"; + * import { streamText } from "ai"; + * import { wrapAISDKModel } from "langsmith/wrappers/vercel"; + * + * const anthropicModel = anthropic("claude-3-haiku-20240307"); + * + * const modelWithTracing = wrapAISDKModel(anthropicModel); + * + * const { textStream } = await streamText({ + * model: modelWithTracing, + * prompt: "Write a vegetarian lasagna recipe for 4 people.", + * }); + * + * for await (const chunk of textStream) { + * console.log(chunk); + * } + * ``` + * @param model An AI SDK model instance. + * @param options LangSmith options. + * @returns + */ +export const wrapAISDKModel = ( + model: T, + options?: Partial +): T => { + if ( + !("doStream" in model) || + typeof model.doStream !== "function" || + !("doGenerate" in model) || + typeof model.doGenerate !== "function" + ) { + throw new Error( + `Received invalid input. This version of wrapAISDKModel only supports Vercel LanguageModelV1 instances.` + ); + } + const runName = options?.name ?? model.constructor?.name; + return new Proxy(model, { + get(target, propKey, receiver) { + const originalValue = target[propKey as keyof T]; + if (typeof originalValue === "function") { + let __finalTracedIteratorKey; + if (propKey === "doStream") { + __finalTracedIteratorKey = "stream"; + } + return traceable(originalValue.bind(target), { + run_type: "llm", + name: runName, + ...options, + __finalTracedIteratorKey, + }); + } else if ( + originalValue != null && + !Array.isArray(originalValue) && + // eslint-disable-next-line no-instanceof/no-instanceof + !(originalValue instanceof Date) && + typeof originalValue === "object" + ) { + return _wrapClient( + originalValue, + [runName, propKey.toString()].join("."), + options + ); + } else { + return Reflect.get(target, propKey, receiver); + } + }, + }); +}; diff --git a/js/yarn.lock b/js/yarn.lock index cf459cb03..4f652fb3a 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -2,6 +2,74 @@ # yarn lockfile v1 +"@ai-sdk/anthropic@^0.0.33": + version "0.0.33" + resolved "https://registry.yarnpkg.com/@ai-sdk/anthropic/-/anthropic-0.0.33.tgz#ab0d690e844965e0f54e6bbc85b91f0a90a4153d" + integrity sha512-xCgerb04tpVOYLL3CmaXUWXa+U8Dt8vflkat4m/0PKQdYGq06JLx/+vaRO8dEz+zU12sQl+3HTPrX53v/wVSxQ== + dependencies: + "@ai-sdk/provider" "0.0.14" + "@ai-sdk/provider-utils" "1.0.5" + +"@ai-sdk/provider-utils@1.0.5": + version "1.0.5" + resolved "https://registry.yarnpkg.com/@ai-sdk/provider-utils/-/provider-utils-1.0.5.tgz#765c60871019ded104d79b4cea0805ba563bb5aa" + integrity sha512-XfOawxk95X3S43arn2iQIFyWGMi0DTxsf9ETc6t7bh91RPWOOPYN1tsmS5MTKD33OGJeaDQ/gnVRzXUCRBrckQ== + dependencies: + "@ai-sdk/provider" "0.0.14" + eventsource-parser "1.1.2" + nanoid "3.3.6" + secure-json-parse "2.7.0" + +"@ai-sdk/provider@0.0.14": + version "0.0.14" + resolved "https://registry.yarnpkg.com/@ai-sdk/provider/-/provider-0.0.14.tgz#a07569c39a8828aa8312cf1ac6f35ce6ee1b2fce" + integrity sha512-gaQ5Y033nro9iX1YUjEDFDRhmMcEiCk56LJdIUbX5ozEiCNCfpiBpEqrjSp/Gp5RzBS2W0BVxfG7UGW6Ezcrzg== + dependencies: + json-schema "0.4.0" + +"@ai-sdk/react@0.0.30": + version "0.0.30" + resolved "https://registry.yarnpkg.com/@ai-sdk/react/-/react-0.0.30.tgz#51d586141a81d7f9b76798922b206e8c6faf04dc" + integrity sha512-VnHYRzwhiM4bZdL9DXwJltN8Qnz1MkFdRTa1y7KdmHSJ18ebCNWmPO5XJhnZiQdEXHYmrzZ3WiVt2X6pxK07FA== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + swr "2.2.5" + +"@ai-sdk/solid@0.0.23": + version "0.0.23" + resolved "https://registry.yarnpkg.com/@ai-sdk/solid/-/solid-0.0.23.tgz#712cf1a02bfc337806c5c1b486d16252bec57a15" + integrity sha512-GMojG2PsqwnOGfx7C1MyQPzPBIlC44qn3ykjp9OVnN2Fu47mcFp3QM6gwWoHwNqi7FQDjRy+s/p+8EqYIQcAwg== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + +"@ai-sdk/svelte@0.0.24": + version "0.0.24" + resolved "https://registry.yarnpkg.com/@ai-sdk/svelte/-/svelte-0.0.24.tgz#2519b84a0c104c82d5e48d3b8e9350e9dd4af6cf" + integrity sha512-ZjzzvfYLE01VTO0rOZf6z9sTGhJhe6IYZMxQiM3P+zemufRYe57NDcLYEb6h+2qhvU6Z+k/Q+Nh/spAt0JzGUg== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + sswr "2.1.0" + +"@ai-sdk/ui-utils@0.0.20": + version "0.0.20" + resolved "https://registry.yarnpkg.com/@ai-sdk/ui-utils/-/ui-utils-0.0.20.tgz#c68968185a7cc33f7d98d13999731e1c7b672cbb" + integrity sha512-6MRWigzXfuxUcAYEFMLP6cLbALJkg12Iz1Sl+wuPMpB6aw7di2ePiTuNakFUYjgP7TNsW4UxzpypBqqJ1KNB0A== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + secure-json-parse "2.7.0" + +"@ai-sdk/vue@0.0.24": + version "0.0.24" + resolved "https://registry.yarnpkg.com/@ai-sdk/vue/-/vue-0.0.24.tgz#2e72f7e755850ed51540f9a7b25dc6b228a8647a" + integrity sha512-0S+2dVSui6LFgaWoFx+3h5R7GIP9MxdJo63tFuLvgyKr2jmpo5S5kGcWl95vNdzKDqaesAXfOnky+tn5A2d49A== + dependencies: + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/ui-utils" "0.0.20" + swrv "1.0.4" + "@ampproject/remapping@^2.2.0": version "2.2.1" resolved "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz" @@ -1338,6 +1406,17 @@ zod "^3.22.4" zod-to-json-schema "^3.22.3" +"@langchain/openai@^0.2.5": + version "0.2.5" + resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.2.5.tgz#e85b983986a7415ea743d4c854bb0674134334d4" + integrity sha512-gQXS5VBFyAco0jgSnUVan6fYVSIxlffmDaeDGpXrAmz2nQPgiN/h24KYOt2NOZ1zRheRzRuO/CfRagMhyVUaFA== + dependencies: + "@langchain/core" ">=0.2.16 <0.3.0" + js-tiktoken "^1.0.12" + openai "^4.49.1" + zod "^3.22.4" + zod-to-json-schema "^3.22.3" + "@langchain/textsplitters@~0.0.0": version "0.0.2" resolved "https://registry.yarnpkg.com/@langchain/textsplitters/-/textsplitters-0.0.2.tgz#500baa8341fb7fc86fca531a4192665a319504a3" @@ -1367,6 +1446,11 @@ "@nodelib/fs.scandir" "2.1.5" fastq "^1.6.0" +"@opentelemetry/api@1.9.0": + version "1.9.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/api/-/api-1.9.0.tgz#d03eba68273dc0f7509e2a3d5cba21eae10379fe" + integrity sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg== + "@sinclair/typebox@^0.25.16": version "0.25.24" resolved "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz" @@ -1444,6 +1528,11 @@ dependencies: "@babel/types" "^7.20.7" +"@types/diff-match-patch@^1.0.36": + version "1.0.36" + resolved "https://registry.yarnpkg.com/@types/diff-match-patch/-/diff-match-patch-1.0.36.tgz#dcef10a69d357fe9d43ac4ff2eca6b85dbf466af" + integrity sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg== + "@types/graceful-fs@^4.1.3": version "4.1.6" resolved "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.6.tgz" @@ -1658,6 +1747,26 @@ agentkeepalive@^4.2.1: dependencies: humanize-ms "^1.2.1" +ai@^3.2.37: + version "3.2.37" + resolved "https://registry.yarnpkg.com/ai/-/ai-3.2.37.tgz#148ed3124e6b0a01c703597471718520ef1c498d" + integrity sha512-waqKYZOE1zJwKEHx69R4v/xNG0a1o0He8TDgX29hUu36Zk0yrBJoVSlXbC9KoFuxW4eRpt+gZv1kqd1nVc1CGg== + dependencies: + "@ai-sdk/provider" "0.0.14" + "@ai-sdk/provider-utils" "1.0.5" + "@ai-sdk/react" "0.0.30" + "@ai-sdk/solid" "0.0.23" + "@ai-sdk/svelte" "0.0.24" + "@ai-sdk/ui-utils" "0.0.20" + "@ai-sdk/vue" "0.0.24" + "@opentelemetry/api" "1.9.0" + eventsource-parser "1.1.2" + json-schema "0.4.0" + jsondiffpatch "0.6.0" + nanoid "3.3.6" + secure-json-parse "2.7.0" + zod-to-json-schema "3.22.5" + ajv@^6.10.0, ajv@^6.12.4: version "6.12.6" resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz" @@ -1971,6 +2080,11 @@ chalk@^4.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" +chalk@^5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.3.0.tgz#67c20a7ebef70e7f3970a01f90fa210cb6860385" + integrity sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w== + char-regex@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz" @@ -1986,6 +2100,11 @@ cjs-module-lexer@^1.0.0: resolved "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz" integrity sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA== +client-only@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/client-only/-/client-only-0.0.1.tgz#38bba5d403c41ab150bff64a95c85013cf73bca1" + integrity sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA== + cliui@^8.0.1: version "8.0.1" resolved "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz" @@ -2136,6 +2255,11 @@ detect-newline@^3.0.0: resolved "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz" integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA== +diff-match-patch@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/diff-match-patch/-/diff-match-patch-1.0.5.tgz#abb584d5f10cd1196dfc55aa03701592ae3f7b37" + integrity sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw== + diff-sequences@^29.4.3: version "29.4.3" resolved "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz" @@ -2452,6 +2576,11 @@ eventemitter3@^4.0.4: resolved "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz" integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== +eventsource-parser@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/eventsource-parser/-/eventsource-parser-1.1.2.tgz#ed6154a4e3dbe7cda9278e5e35d2ffc58b309f89" + integrity sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA== + execa@^5.0.0: version "5.1.1" resolved "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz" @@ -3462,6 +3591,11 @@ json-schema-traverse@^0.4.1: resolved "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz" integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== +json-schema@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" + integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== + json-stable-stringify-without-jsonify@^1.0.1: version "1.0.1" resolved "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz" @@ -3479,6 +3613,15 @@ json5@^2.2.2, json5@^2.2.3: resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== +jsondiffpatch@0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/jsondiffpatch/-/jsondiffpatch-0.6.0.tgz#daa6a25bedf0830974c81545568d5f671c82551f" + integrity sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ== + dependencies: + "@types/diff-match-patch" "^1.0.36" + chalk "^5.3.0" + diff-match-patch "^1.0.5" + jsonpointer@^5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-5.0.1.tgz#2110e0af0900fd37467b5907ecd13a7884a1b559" @@ -3705,6 +3848,11 @@ mustache@^4.2.0: resolved "https://registry.yarnpkg.com/mustache/-/mustache-4.2.0.tgz#e5892324d60a12ec9c2a73359edca52972bf6f64" integrity sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ== +nanoid@3.3.6: + version "3.3.6" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.6.tgz#443380c856d6e9f9824267d960b4236ad583ea4c" + integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA== + natural-compare-lite@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz" @@ -4128,6 +4276,11 @@ safe-regex-test@^1.0.0: get-intrinsic "^1.1.3" is-regex "^1.1.4" +secure-json-parse@2.7.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/secure-json-parse/-/secure-json-parse-2.7.0.tgz#5a5f9cd6ae47df23dba3151edd06855d47e09862" + integrity sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw== + semver@7.x, semver@^7.3.5, semver@^7.3.7: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" @@ -4140,6 +4293,11 @@ semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== +semver@^7.6.3: + version "7.6.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz" @@ -4194,6 +4352,13 @@ sprintf-js@~1.0.2: resolved "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz" integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== +sswr@2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/sswr/-/sswr-2.1.0.tgz#1eb64cd647cc9e11f871e7f43554abd8c64e1103" + integrity sha512-Cqc355SYlTAaUt8iDPaC/4DPPXK925PePLMxyBKuWd5kKc5mwsG3nT9+Mq2tyguL5s7b4Jg+IRMpTRsNTAfpSQ== + dependencies: + swrev "^4.0.0" + stack-utils@^2.0.3: version "2.0.6" resolved "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz" @@ -4298,6 +4463,24 @@ supports-preserve-symlinks-flag@^1.0.0: resolved "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz" integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== +swr@2.2.5: + version "2.2.5" + resolved "https://registry.yarnpkg.com/swr/-/swr-2.2.5.tgz#063eea0e9939f947227d5ca760cc53696f46446b" + integrity sha512-QtxqyclFeAsxEUeZIYmsaQ0UjimSq1RZ9Un7I68/0ClKK/U3LoyQunwkQfJZr2fc22DfIXLNDc2wFyTEikCUpg== + dependencies: + client-only "^0.0.1" + use-sync-external-store "^1.2.0" + +swrev@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/swrev/-/swrev-4.0.0.tgz#83da6983c7ef9d71ac984a9b169fc197cbf18ff8" + integrity sha512-LqVcOHSB4cPGgitD1riJ1Hh4vdmITOp+BkmfmXRh4hSF/t7EnS4iD+SOTmq7w5pPm/SiPeto4ADbKS6dHUDWFA== + +swrv@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/swrv/-/swrv-1.0.4.tgz#278b4811ed4acbb1ae46654972a482fd1847e480" + integrity sha512-zjEkcP8Ywmj+xOJW3lIT65ciY/4AL4e/Or7Gj0MzU3zBJNMdJiT8geVZhINavnlHRMMCcJLHhraLTAiDOTmQ9g== + test-exclude@^6.0.0: version "6.0.0" resolved "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz" @@ -4478,6 +4661,11 @@ uri-js@^4.2.2: dependencies: punycode "^2.1.0" +use-sync-external-store@^1.2.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz#c3b6390f3a30eba13200d2302dcdf1e7b57b2ef9" + integrity sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw== + uuid@^10.0.0: version "10.0.0" resolved "https://registry.yarnpkg.com/uuid/-/uuid-10.0.0.tgz#5a95aa454e6e002725c79055fd42aaba30ca6294" @@ -4637,6 +4825,11 @@ yocto-queue@^0.1.0: resolved "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== +zod-to-json-schema@3.22.5: + version "3.22.5" + resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.22.5.tgz#3646e81cfc318dbad2a22519e5ce661615418673" + integrity sha512-+akaPo6a0zpVCCseDed504KBJUQpEW5QZw7RMneNmKw+fGaML1Z9tUNLnHHAC8x6dzVRO1eB2oEMyZRnuBZg7Q== + zod-to-json-schema@^3.22.3: version "3.22.4" resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.22.4.tgz#f8cc691f6043e9084375e85fb1f76ebafe253d70" From 0fe267804ba0ac2d153a2f02db8d91f1570fb240 Mon Sep 17 00:00:00 2001 From: jacoblee93 Date: Sat, 27 Jul 2024 14:40:05 -0700 Subject: [PATCH 324/373] Use OpenAI in int test for CI --- js/.gitignore | 4 ++++ js/package.json | 15 ++++++++++++++- js/src/tests/wrapped_ai_sdk.int.test.ts | 10 +++++----- js/tsconfig.json | 1 + js/yarn.lock | 8 ++++---- 5 files changed, 28 insertions(+), 10 deletions(-) diff --git a/js/.gitignore b/js/.gitignore index 902b3f759..e758389d2 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -71,6 +71,10 @@ Chinook_Sqlite.sql /wrappers/openai.js /wrappers/openai.d.ts /wrappers/openai.d.cts +/wrappers/vercel.cjs +/wrappers/vercel.js +/wrappers/vercel.d.ts +/wrappers/vercel.d.cts /singletons/traceable.cjs /singletons/traceable.js /singletons/traceable.d.ts diff --git a/js/package.json b/js/package.json index 606c2d3e2..279a507ca 100644 --- a/js/package.json +++ b/js/package.json @@ -45,6 +45,10 @@ "wrappers/openai.js", "wrappers/openai.d.ts", "wrappers/openai.d.cts", + "wrappers/vercel.cjs", + "wrappers/vercel.js", + "wrappers/vercel.d.ts", + "wrappers/vercel.d.cts", "singletons/traceable.cjs", "singletons/traceable.js", "singletons/traceable.d.ts", @@ -101,7 +105,7 @@ "uuid": "^9.0.0" }, "devDependencies": { - "@ai-sdk/anthropic": "^0.0.33", + "@ai-sdk/openai": "^0.0.40", "@babel/preset-env": "^7.22.4", "@faker-js/faker": "^8.4.1", "@jest/globals": "^29.5.0", @@ -252,6 +256,15 @@ "import": "./wrappers/openai.js", "require": "./wrappers/openai.cjs" }, + "./wrappers/vercel": { + "types": { + "import": "./wrappers/vercel.d.ts", + "require": "./wrappers/vercel.d.cts", + "default": "./wrappers/vercel.d.ts" + }, + "import": "./wrappers/vercel.js", + "require": "./wrappers/vercel.cjs" + }, "./singletons/traceable": { "types": { "import": "./singletons/traceable.d.ts", diff --git a/js/src/tests/wrapped_ai_sdk.int.test.ts b/js/src/tests/wrapped_ai_sdk.int.test.ts index 80556b61e..7553423db 100644 --- a/js/src/tests/wrapped_ai_sdk.int.test.ts +++ b/js/src/tests/wrapped_ai_sdk.int.test.ts @@ -1,10 +1,10 @@ -import { anthropic } from "@ai-sdk/anthropic"; +import { openai } from "@ai-sdk/openai"; import { generateObject, generateText, streamObject, streamText } from "ai"; import { z } from "zod"; import { wrapAISDKModel } from "../wrappers/vercel.js"; test("AI SDK generateText", async () => { - const modelWithTracing = wrapAISDKModel(anthropic("claude-3-haiku-20240307")); + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); const { text } = await generateText({ model: modelWithTracing, prompt: "Write a vegetarian lasagna recipe for 4 people.", @@ -13,7 +13,7 @@ test("AI SDK generateText", async () => { }); test("AI SDK generateObject", async () => { - const modelWithTracing = wrapAISDKModel(anthropic("claude-3-haiku-20240307")); + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); const { object } = await generateObject({ model: modelWithTracing, prompt: "Write a vegetarian lasagna recipe for 4 people.", @@ -25,7 +25,7 @@ test("AI SDK generateObject", async () => { }); test("AI SDK streamText", async () => { - const modelWithTracing = wrapAISDKModel(anthropic("claude-3-haiku-20240307")); + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); const { textStream } = await streamText({ model: modelWithTracing, prompt: "Write a vegetarian lasagna recipe for 4 people.", @@ -36,7 +36,7 @@ test("AI SDK streamText", async () => { }); test("AI SDK streamObject", async () => { - const modelWithTracing = wrapAISDKModel(anthropic("claude-3-haiku-20240307")); + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); const { partialObjectStream } = await streamObject({ model: modelWithTracing, prompt: "Write a vegetarian lasagna recipe for 4 people.", diff --git a/js/tsconfig.json b/js/tsconfig.json index 92b1a3026..ab24d6247 100644 --- a/js/tsconfig.json +++ b/js/tsconfig.json @@ -42,6 +42,7 @@ "src/wrappers/index.ts", "src/anonymizer/index.ts", "src/wrappers/openai.ts", + "src/wrappers/vercel.ts", "src/singletons/traceable.ts" ] } diff --git a/js/yarn.lock b/js/yarn.lock index 4f652fb3a..28195c859 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -2,10 +2,10 @@ # yarn lockfile v1 -"@ai-sdk/anthropic@^0.0.33": - version "0.0.33" - resolved "https://registry.yarnpkg.com/@ai-sdk/anthropic/-/anthropic-0.0.33.tgz#ab0d690e844965e0f54e6bbc85b91f0a90a4153d" - integrity sha512-xCgerb04tpVOYLL3CmaXUWXa+U8Dt8vflkat4m/0PKQdYGq06JLx/+vaRO8dEz+zU12sQl+3HTPrX53v/wVSxQ== +"@ai-sdk/openai@^0.0.40": + version "0.0.40" + resolved "https://registry.yarnpkg.com/@ai-sdk/openai/-/openai-0.0.40.tgz#227df69c8edf8b26b17f78ae55daa03e58a58870" + integrity sha512-9Iq1UaBHA5ZzNv6j3govuKGXrbrjuWvZIgWNJv4xzXlDMHu9P9hnqlBr/Aiay54WwCuTVNhTzAUTfFgnTs2kbQ== dependencies: "@ai-sdk/provider" "0.0.14" "@ai-sdk/provider-utils" "1.0.5" From 546a36f4ae283d43df4ca7b0590eda5ba12206b0 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Tue, 30 Jul 2024 05:09:53 -0700 Subject: [PATCH 325/373] [Python] Fix nesting in async trace context manager (#895) We were previously setting the context vars in a separate context and then letting it be gc'd. Fixes https://github.com/langchain-ai/langsmith-sdk/issues/892 --- python/langsmith/_internal/_aiter.py | 6 ++-- python/langsmith/run_helpers.py | 14 ++++++++-- python/pyproject.toml | 2 +- python/tests/unit_tests/test_run_helpers.py | 31 +++++++++++++++++++-- 4 files changed, 44 insertions(+), 9 deletions(-) diff --git a/python/langsmith/_internal/_aiter.py b/python/langsmith/_internal/_aiter.py index a2f0701a1..7ae217f68 100644 --- a/python/langsmith/_internal/_aiter.py +++ b/python/langsmith/_internal/_aiter.py @@ -310,7 +310,9 @@ def accepts_context(callable: Callable[..., Any]) -> bool: # Ported from Python 3.9+ to support Python 3.8 -async def aio_to_thread(func, /, *args, **kwargs): +async def aio_to_thread( + func, /, *args, __ctx: Optional[contextvars.Context] = None, **kwargs +): """Asynchronously run function *func* in a separate thread. Any *args and **kwargs supplied for this function are directly passed @@ -321,7 +323,7 @@ async def aio_to_thread(func, /, *args, **kwargs): Return a coroutine that can be awaited to get the eventual result of *func*. """ loop = asyncio.get_running_loop() - ctx = contextvars.copy_context() + ctx = __ctx or contextvars.copy_context() func_call = functools.partial(ctx.run, func, *args, **kwargs) return await loop.run_in_executor(None, func_call) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 1131400bd..41885796c 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -926,7 +926,11 @@ async def __aenter__(self) -> run_trees.RunTree: Returns: run_trees.RunTree: The newly created run. """ - return await aitertools.aio_to_thread(self._setup) + ctx = copy_context() + result = await aitertools.aio_to_thread(self._setup, __ctx=ctx) + # Set the context for the current thread + _set_tracing_context(get_tracing_context(ctx)) + return result async def __aexit__( self, @@ -941,14 +945,18 @@ async def __aexit__( exc_value: The exception instance that occurred, if any. traceback: The traceback object associated with the exception, if any. """ + ctx = copy_context() if exc_type is not None: await asyncio.shield( - aitertools.aio_to_thread(self._teardown, exc_type, exc_value, traceback) + aitertools.aio_to_thread( + self._teardown, exc_type, exc_value, traceback, __ctx=ctx + ) ) else: await aitertools.aio_to_thread( - self._teardown, exc_type, exc_value, traceback + self._teardown, exc_type, exc_value, traceback, __ctx=ctx ) + _set_tracing_context(get_tracing_context(ctx)) def _get_project_name(project_name: Optional[str]) -> Optional[str]: diff --git a/python/pyproject.toml b/python/pyproject.toml index f6f9fa609..dd9143861 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.93" +version = "0.1.94" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 4bbc182c9..d5be6c1dd 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -962,12 +962,25 @@ def _get_run(r: RunTree) -> None: async def test_traceable_to_atrace(): + @traceable + async def great_grandchild_fn(a: int, b: int) -> int: + return a + b + @traceable async def parent_fn(a: int, b: int) -> int: async with langsmith.trace( name="child_fn", inputs={"a": a, "b": b} ) as run_tree: - result = a + b + async with langsmith.trace( + "grandchild_fn", inputs={"a": a, "b": b, "c": "oh my"} + ) as run_tree_gc: + try: + async with langsmith.trace("expect_error", inputs={}): + raise ValueError("oh no") + except ValueError: + pass + result = await great_grandchild_fn(a, b) + run_tree_gc.end(outputs={"result": result}) run_tree.end(outputs={"result": result}) return result @@ -991,8 +1004,20 @@ def _get_run(r: RunTree) -> None: child_runs = run.child_runs assert child_runs assert len(child_runs) == 1 - assert child_runs[0].name == "child_fn" - assert child_runs[0].inputs == {"a": 1, "b": 2} + child = child_runs[0] + assert child.name == "child_fn" + assert child.inputs == {"a": 1, "b": 2} + assert len(child.child_runs) == 1 + grandchild = child.child_runs[0] + assert grandchild.name == "grandchild_fn" + assert grandchild.inputs == {"a": 1, "b": 2, "c": "oh my"} + assert len(grandchild.child_runs) == 2 + ggcerror = grandchild.child_runs[0] + assert ggcerror.name == "expect_error" + assert "oh no" in str(ggcerror.error) + ggc = grandchild.child_runs[1] + assert ggc.name == "great_grandchild_fn" + assert ggc.inputs == {"a": 1, "b": 2} def test_trace_to_traceable(): From c60b0ce993aad2e1264c725f672df942f28241ca Mon Sep 17 00:00:00 2001 From: jacoblee93 Date: Tue, 30 Jul 2024 12:05:48 -0700 Subject: [PATCH 326/373] Add aggregation --- js/src/tests/wrapped_ai_sdk.int.test.ts | 29 ++++++++++++++++++++++- js/src/wrappers/vercel.ts | 31 +++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/js/src/tests/wrapped_ai_sdk.int.test.ts b/js/src/tests/wrapped_ai_sdk.int.test.ts index 7553423db..ddc221741 100644 --- a/js/src/tests/wrapped_ai_sdk.int.test.ts +++ b/js/src/tests/wrapped_ai_sdk.int.test.ts @@ -1,5 +1,11 @@ import { openai } from "@ai-sdk/openai"; -import { generateObject, generateText, streamObject, streamText } from "ai"; +import { + generateObject, + generateText, + streamObject, + streamText, + tool, +} from "ai"; import { z } from "zod"; import { wrapAISDKModel } from "../wrappers/vercel.js"; @@ -12,6 +18,27 @@ test("AI SDK generateText", async () => { console.log(text); }); +test("AI SDK generateText with a tool", async () => { + const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); + const { text } = await generateText({ + model: modelWithTracing, + prompt: + "Write a vegetarian lasagna recipe for 4 people. Get ingredients first.", + tools: { + getIngredients: tool({ + description: "get a list of ingredients", + parameters: z.object({ + ingredients: z.array(z.string()), + }), + execute: async () => + JSON.stringify(["pasta", "tomato", "cheese", "onions"]), + }), + }, + maxToolRoundtrips: 2, + }); + console.log(text); +}); + test("AI SDK generateObject", async () => { const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini")); const { object } = await generateObject({ diff --git a/js/src/wrappers/vercel.ts b/js/src/wrappers/vercel.ts index 0264f112f..1cd706d4e 100644 --- a/js/src/wrappers/vercel.ts +++ b/js/src/wrappers/vercel.ts @@ -50,14 +50,45 @@ export const wrapAISDKModel = ( const originalValue = target[propKey as keyof T]; if (typeof originalValue === "function") { let __finalTracedIteratorKey; + let aggregator; if (propKey === "doStream") { __finalTracedIteratorKey = "stream"; + aggregator = (chunks: any[]) => { + return chunks.reduce( + (aggregated, chunk) => { + console.log(chunk); + if (chunk.type === "text-delta") { + return { + ...aggregated, + text: aggregated.text + chunk.textDelta, + }; + } else if (chunk.type === "tool-call") { + return { + ...aggregated, + ...chunk, + }; + } else if (chunk.type === "finish") { + return { + ...aggregated, + usage: chunk.usage, + finishReason: chunk.finishReason, + }; + } else { + return aggregated; + } + }, + { + text: "", + } + ); + }; } return traceable(originalValue.bind(target), { run_type: "llm", name: runName, ...options, __finalTracedIteratorKey, + aggregator, }); } else if ( originalValue != null && From 31cb73b9c481cdd483e8bf99ee004a63c0d1affb Mon Sep 17 00:00:00 2001 From: jacoblee93 Date: Tue, 30 Jul 2024 12:09:06 -0700 Subject: [PATCH 327/373] Nits --- js/src/traceable.ts | 2 +- js/src/wrappers/vercel.ts | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/js/src/traceable.ts b/js/src/traceable.ts index f663a6de2..dc43af0d3 100644 --- a/js/src/traceable.ts +++ b/js/src/traceable.ts @@ -297,8 +297,8 @@ export function traceable any>( type Inputs = Parameters; const { aggregator, - __finalTracedIteratorKey, argsConfigPath, + __finalTracedIteratorKey, ...runTreeConfig } = config ?? {}; diff --git a/js/src/wrappers/vercel.ts b/js/src/wrappers/vercel.ts index 1cd706d4e..dc022d7c8 100644 --- a/js/src/wrappers/vercel.ts +++ b/js/src/wrappers/vercel.ts @@ -56,7 +56,6 @@ export const wrapAISDKModel = ( aggregator = (chunks: any[]) => { return chunks.reduce( (aggregated, chunk) => { - console.log(chunk); if (chunk.type === "text-delta") { return { ...aggregated, From 616fec3f48828845cdbd465e6e018a588bf847d0 Mon Sep 17 00:00:00 2001 From: jacoblee93 Date: Tue, 30 Jul 2024 12:19:01 -0700 Subject: [PATCH 328/373] js[patch]: Release 0.1.40 --- js/package.json | 4 ++-- js/src/index.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index 279a507ca..f67db8f95 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.39", + "version": "0.1.40", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ @@ -276,4 +276,4 @@ }, "./package.json": "./package.json" } -} +} \ No newline at end of file diff --git a/js/src/index.ts b/js/src/index.ts index 73f1007da..b2e3baf5e 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.39"; +export const __version__ = "0.1.40"; From 64824d3446d97189210649116898f6a87261a108 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Wed, 31 Jul 2024 14:45:14 -0700 Subject: [PATCH 329/373] Respect env var in langsmith.trace (#901) It's hard to use in production if it has a different switch than the rest of the tracing code. --- python/langsmith/run_helpers.py | 12 +++---- python/langsmith/utils.py | 4 +-- python/tests/unit_tests/test_run_helpers.py | 37 +++++++++++++++++---- 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 41885796c..90301c03d 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -804,7 +804,8 @@ def _setup(self) -> run_trees.RunTree: run_trees.RunTree: The newly created run. """ self.old_ctx = get_tracing_context() - is_disabled = self.old_ctx.get("enabled", True) is False + enabled = utils.tracing_is_enabled(self.old_ctx) + outer_tags = _TAGS.get() outer_metadata = _METADATA.get() parent_run_ = _get_parent_run( @@ -827,7 +828,7 @@ def _setup(self) -> run_trees.RunTree: project_name_ = _get_project_name(self.project_name) - if parent_run_ is not None and not is_disabled: + if parent_run_ is not None and enabled: self.new_run = parent_run_.create_child( name=self.name, run_id=self.run_id, @@ -851,7 +852,7 @@ def _setup(self) -> run_trees.RunTree: client=self.client, # type: ignore[arg-type] ) - if not is_disabled: + if enabled: self.new_run.post() _TAGS.set(tags_) _METADATA.set(metadata) @@ -877,7 +878,6 @@ def _teardown( traceback: The traceback object associated with the exception, if any. """ if self.new_run is None: - warnings.warn("Tracing context was not set up properly.", RuntimeWarning) return if exc_type is not None: if self.exceptions_to_handle and issubclass( @@ -889,8 +889,8 @@ def _teardown( tb = f"{exc_type.__name__}: {exc_value}\n\n{tb}" self.new_run.end(error=tb) if self.old_ctx is not None: - is_disabled = self.old_ctx.get("enabled", True) is False - if not is_disabled: + enabled = utils.tracing_is_enabled(self.old_ctx) + if enabled: self.new_run.patch() _set_tracing_context(self.old_ctx) diff --git a/python/langsmith/utils.py b/python/langsmith/utils.py index 2456af92a..0d3552dcc 100644 --- a/python/langsmith/utils.py +++ b/python/langsmith/utils.py @@ -73,11 +73,11 @@ class LangSmithConnectionError(LangSmithError): """Couldn't connect to the LangSmith API.""" -def tracing_is_enabled() -> bool: +def tracing_is_enabled(ctx: Optional[dict] = None) -> bool: """Return True if tracing is enabled.""" from langsmith.run_helpers import get_current_run_tree, get_tracing_context - tc = get_tracing_context() + tc = ctx or get_tracing_context() # You can manually override the environment using context vars. # Check that first. # Doing this before checking the run tree lets us diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index d5be6c1dd..4c960ddf8 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -2,6 +2,7 @@ import functools import inspect import json +import os import sys import time import uuid @@ -1071,11 +1072,11 @@ def test_client_passed_when_trace_parent(): mock_client = _get_mock_client() rt = RunTree(name="foo", client=mock_client) headers = rt.to_headers() - - with trace( - name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client - ) as rt: - rt.outputs["bar"] = "baz" + with tracing_context(enabled=True): + with trace( + name="foo", inputs={"foo": "bar"}, parent=headers, client=mock_client + ) as rt: + rt.outputs["bar"] = "baz" calls = _get_calls(mock_client) assert len(calls) == 1 call = calls[0] @@ -1281,7 +1282,7 @@ async def my_function(a: int) -> int: mock_calls = _get_calls( mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls ) - assert len(mock_calls) == num_calls + assert len(mock_calls) >= num_calls @pytest.mark.parametrize("auto_batch_tracing", [True, False]) @@ -1316,3 +1317,27 @@ async def my_function(a: int) -> AsyncGenerator[int, None]: mock_client, verbs={"POST", "PATCH", "GET"}, minimum=num_calls ) assert len(mock_calls) == num_calls + + +@pytest.mark.parametrize("env_var", [True, False]) +@pytest.mark.parametrize("context", [True, False, None]) +async def test_trace_respects_env_var(env_var: bool, context: Optional[bool]): + mock_client = _get_mock_client() + with patch.dict(os.environ, {"LANGSMITH_TRACING": "true" if env_var else "false "}): + with tracing_context(enabled=context): + with trace(name="foo", inputs={"a": 1}, client=mock_client) as run: + assert run.name == "foo" + pass + async with trace(name="bar", inputs={"b": 2}, client=mock_client) as run2: + assert run2.name == "bar" + pass + + mock_calls = _get_calls(mock_client) + if context is None: + expect = env_var + else: + expect = context + if expect: + assert len(mock_calls) >= 1 + else: + assert not mock_calls From e0047ad00a5e22c2e4e9143b114f3e2e069d444d Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 1 Aug 2024 14:30:08 -0700 Subject: [PATCH 330/373] patch: push existing prompt without specifying any other options --- js/src/client.ts | 14 ++++++++------ js/src/tests/client.int.test.ts | 12 ++++++++++++ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/js/src/client.ts b/js/src/client.ts index 36ded0d64..aabcccb0f 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -3417,12 +3417,14 @@ export class Client { ): Promise { // Create or update prompt metadata if (await this.promptExists(promptIdentifier)) { - await this.updatePrompt(promptIdentifier, { - description: options?.description, - readme: options?.readme, - tags: options?.tags, - isPublic: options?.isPublic, - }); + if (options && Object.keys(options).some((key) => key !== "object")) { + await this.updatePrompt(promptIdentifier, { + description: options?.description, + readme: options?.readme, + tags: options?.tags, + isPublic: options?.isPublic, + }); + } } else { await this.createPrompt(promptIdentifier, { description: options?.description, diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index b7c0e5316..9fabd6819 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -997,6 +997,13 @@ test("Test push and pull prompt", async () => { ], { templateFormat: "mustache" } ); + const template2 = ChatPromptTemplate.fromMessages( + [ + new SystemMessage({ content: "System message" }), + new HumanMessage({ content: "My question is: {{question}}" }), + ], + { templateFormat: "mustache" } + ); await client.pushPrompt(promptName, { object: template, @@ -1005,6 +1012,11 @@ test("Test push and pull prompt", async () => { tags: ["test", "tag"], }); + // test you can push an updated manifest + await client.pushPrompt(promptName, { + object: template2, + }); + const pulledPrompt = await client._pullPrompt(promptName); expect(pulledPrompt).toBeDefined(); From 95d3bada2d7baa375d9e2da8b9d39371aa5f912f Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 1 Aug 2024 14:32:19 -0700 Subject: [PATCH 331/373] comment --- js/src/tests/client.int.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 9fabd6819..3dfea306f 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -1012,7 +1012,7 @@ test("Test push and pull prompt", async () => { tags: ["test", "tag"], }); - // test you can push an updated manifest + // test you can push an updated manifest without any other options await client.pushPrompt(promptName, { object: template2, }); From 459ba740dc8e01010299ec60559d3dfd4a48588e Mon Sep 17 00:00:00 2001 From: bvs-langchain <166456249+bvs-langchain@users.noreply.github.com> Date: Thu, 1 Aug 2024 18:44:22 -0400 Subject: [PATCH 332/373] chore: add environment variables for basic auth to docker-compose (#903) Co-authored-by: William FH <13333726+hinthornw@users.noreply.github.com> --- python/langsmith/cli/docker-compose.yaml | 8 ++++++++ python/pyproject.toml | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/python/langsmith/cli/docker-compose.yaml b/python/langsmith/cli/docker-compose.yaml index 87130aa13..172cc6e5d 100644 --- a/python/langsmith/cli/docker-compose.yaml +++ b/python/langsmith/cli/docker-compose.yaml @@ -37,6 +37,10 @@ services: - CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123} - CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false} - FF_ORG_CREATION_DISABLED=${ORG_CREATION_DISABLED:-false} + - BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false} + - INITIAL_ORG_ADMIN_EMAIL=${INITIAL_ORG_ADMIN_EMAIL} + - INITIAL_ORG_ADMIN_PASSWORD=${INITIAL_ORG_ADMIN_PASSWORD} + - BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET} ports: - 1984:1984 depends_on: @@ -63,6 +67,10 @@ services: - API_KEY_SALT=${API_KEY_SALT} - POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres} - REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379} + - BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false} + - INITIAL_ORG_ADMIN_EMAIL=${INITIAL_ORG_ADMIN_EMAIL} + - INITIAL_ORG_ADMIN_PASSWORD=${INITIAL_ORG_ADMIN_PASSWORD} + - BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET} ports: - 1986:1986 depends_on: diff --git a/python/pyproject.toml b/python/pyproject.toml index dd9143861..cda98a8c7 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.94" +version = "0.1.95" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From ee4a1be42d47fddb34fc18f7f31752390daa0875 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Thu, 1 Aug 2024 18:05:30 -0700 Subject: [PATCH 333/373] List and read shared runs (#905) - Fixes list endpoint to point to cursor paginated endpoint - Add GET endpoint - Permit passing in url directly for the share token --- python/langsmith/client.py | 36 +++- python/langsmith/wrappers/_openai.py | 10 +- python/poetry.lock | 292 ++++++++++++++------------- python/pyproject.toml | 2 +- 4 files changed, 179 insertions(+), 161 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index be40dfb02..2c18e05fa 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -88,7 +88,10 @@ def _is_localhost(url: str) -> bool: def _parse_token_or_url( - url_or_token: Union[str, uuid.UUID], api_url: str, num_parts: int = 2 + url_or_token: Union[str, uuid.UUID], + api_url: str, + num_parts: int = 2, + kind: str = "dataset", ) -> Tuple[str, str]: """Parse a public dataset URL or share token.""" try: @@ -104,7 +107,7 @@ def _parse_token_or_url( if len(path_parts) >= num_parts: token_uuid = path_parts[-num_parts] else: - raise ls_utils.LangSmithUserError(f"Invalid public dataset URL: {url_or_token}") + raise ls_utils.LangSmithUserError(f"Invalid public {kind} URL: {url_or_token}") return api_url, token_uuid @@ -1949,21 +1952,32 @@ def run_is_shared(self, run_id: ID_TYPE) -> bool: link = self.read_run_shared_link(_as_uuid(run_id, "run_id")) return link is not None - def list_shared_runs( - self, share_token: ID_TYPE, run_ids: Optional[List[str]] = None - ) -> List[ls_schemas.Run]: + def read_shared_run( + self, share_token: Union[ID_TYPE, str], run_id: Optional[ID_TYPE] = None + ) -> ls_schemas.Run: """Get shared runs.""" - params = {"id": run_ids, "share_token": str(share_token)} + _, token_uuid = _parse_token_or_url(share_token, "", kind="run") + path = f"/public/{token_uuid}/run" + if run_id is not None: + path += f"/{_as_uuid(run_id, 'run_id')}" response = self.request_with_retries( "GET", - f"/public/{_as_uuid(share_token, 'share_token')}/runs", + path, headers=self._headers, - params=params, ) ls_utils.raise_for_status_with_text(response) - return [ - ls_schemas.Run(**run, _host_url=self._host_url) for run in response.json() - ] + return ls_schemas.Run(**response.json(), _host_url=self._host_url) + + def list_shared_runs( + self, share_token: Union[ID_TYPE, str], run_ids: Optional[List[str]] = None + ) -> Iterator[ls_schemas.Run]: + """Get shared runs.""" + body = {"id": run_ids} if run_ids else {} + _, token_uuid = _parse_token_or_url(share_token, "", kind="run") + for run in self._get_cursor_paginated_list( + f"/public/{token_uuid}/runs/query", body=body + ): + yield ls_schemas.Run(**run, _host_url=self._host_url) def read_dataset_shared_schema( self, diff --git a/python/langsmith/wrappers/_openai.py b/python/langsmith/wrappers/_openai.py index 5b6798e8d..07b317324 100644 --- a/python/langsmith/wrappers/_openai.py +++ b/python/langsmith/wrappers/_openai.py @@ -114,13 +114,11 @@ def _reduce_choices(choices: List[Choice]) -> dict: "arguments": "", } if chunk.function.name: - message["tool_calls"][index]["function"][ - "name" - ] += chunk.function.name + fn_ = message["tool_calls"][index]["function"] + fn_["name"] += chunk.function.name if chunk.function.arguments: - message["tool_calls"][index]["function"][ - "arguments" - ] += chunk.function.arguments + fn_ = message["tool_calls"][index]["function"] + fn_["arguments"] += chunk.function.arguments return { "index": choices[0].index, "finish_reason": next( diff --git a/python/poetry.lock b/python/poetry.lock index d347f4525..efcd045b3 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -700,13 +700,13 @@ files = [ [[package]] name = "openai" -version = "1.35.7" +version = "1.35.10" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.35.7-py3-none-any.whl", hash = "sha256:3d1e0b0aac9b0db69a972d36dc7efa7563f8e8d65550b27a48f2a0c2ec207e80"}, - {file = "openai-1.35.7.tar.gz", hash = "sha256:009bfa1504c9c7ef64d87be55936d142325656bbc6d98c68b669d6472e4beb09"}, + {file = "openai-1.35.10-py3-none-any.whl", hash = "sha256:962cb5c23224b5cbd16078308dabab97a08b0a5ad736a4fdb3dc2ffc44ac974f"}, + {file = "openai-1.35.10.tar.gz", hash = "sha256:85966949f4f960f3e4b239a659f9fd64d3a97ecc43c44dc0a044b5c7f11cccc6"}, ] [package.dependencies] @@ -723,57 +723,62 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orjson" -version = "3.10.5" +version = "3.10.6" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:545d493c1f560d5ccfc134803ceb8955a14c3fcb47bbb4b2fee0232646d0b932"}, - {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4324929c2dd917598212bfd554757feca3e5e0fa60da08be11b4aa8b90013c1"}, - {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c13ca5e2ddded0ce6a927ea5a9f27cae77eee4c75547b4297252cb20c4d30e6"}, - {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6c8e30adfa52c025f042a87f450a6b9ea29649d828e0fec4858ed5e6caecf63"}, - {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:338fd4f071b242f26e9ca802f443edc588fa4ab60bfa81f38beaedf42eda226c"}, - {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6970ed7a3126cfed873c5d21ece1cd5d6f83ca6c9afb71bbae21a0b034588d96"}, - {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:235dadefb793ad12f7fa11e98a480db1f7c6469ff9e3da5e73c7809c700d746b"}, - {file = "orjson-3.10.5-cp310-none-win32.whl", hash = "sha256:be79e2393679eda6a590638abda16d167754393f5d0850dcbca2d0c3735cebe2"}, - {file = "orjson-3.10.5-cp310-none-win_amd64.whl", hash = "sha256:c4a65310ccb5c9910c47b078ba78e2787cb3878cdded1702ac3d0da71ddc5228"}, - {file = "orjson-3.10.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cdf7365063e80899ae3a697def1277c17a7df7ccfc979990a403dfe77bb54d40"}, - {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b68742c469745d0e6ca5724506858f75e2f1e5b59a4315861f9e2b1df77775a"}, - {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d10cc1b594951522e35a3463da19e899abe6ca95f3c84c69e9e901e0bd93d38"}, - {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcbe82b35d1ac43b0d84072408330fd3295c2896973112d495e7234f7e3da2e1"}, - {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c0eb7e0c75e1e486c7563fe231b40fdd658a035ae125c6ba651ca3b07936f5"}, - {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:53ed1c879b10de56f35daf06dbc4a0d9a5db98f6ee853c2dbd3ee9d13e6f302f"}, - {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:099e81a5975237fda3100f918839af95f42f981447ba8f47adb7b6a3cdb078fa"}, - {file = "orjson-3.10.5-cp311-none-win32.whl", hash = "sha256:1146bf85ea37ac421594107195db8bc77104f74bc83e8ee21a2e58596bfb2f04"}, - {file = "orjson-3.10.5-cp311-none-win_amd64.whl", hash = "sha256:36a10f43c5f3a55c2f680efe07aa93ef4a342d2960dd2b1b7ea2dd764fe4a37c"}, - {file = "orjson-3.10.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:68f85ecae7af14a585a563ac741b0547a3f291de81cd1e20903e79f25170458f"}, - {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28afa96f496474ce60d3340fe8d9a263aa93ea01201cd2bad844c45cd21f5268"}, - {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cd684927af3e11b6e754df80b9ffafd9fb6adcaa9d3e8fdd5891be5a5cad51e"}, - {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d21b9983da032505f7050795e98b5d9eee0df903258951566ecc358f6696969"}, - {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ad1de7fef79736dde8c3554e75361ec351158a906d747bd901a52a5c9c8d24b"}, - {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d97531cdfe9bdd76d492e69800afd97e5930cb0da6a825646667b2c6c6c0211"}, - {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d69858c32f09c3e1ce44b617b3ebba1aba030e777000ebdf72b0d8e365d0b2b3"}, - {file = "orjson-3.10.5-cp312-none-win32.whl", hash = "sha256:64c9cc089f127e5875901ac05e5c25aa13cfa5dbbbd9602bda51e5c611d6e3e2"}, - {file = "orjson-3.10.5-cp312-none-win_amd64.whl", hash = "sha256:b2efbd67feff8c1f7728937c0d7f6ca8c25ec81373dc8db4ef394c1d93d13dc5"}, - {file = "orjson-3.10.5-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:03b565c3b93f5d6e001db48b747d31ea3819b89abf041ee10ac6988886d18e01"}, - {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:584c902ec19ab7928fd5add1783c909094cc53f31ac7acfada817b0847975f26"}, - {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a35455cc0b0b3a1eaf67224035f5388591ec72b9b6136d66b49a553ce9eb1e6"}, - {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1670fe88b116c2745a3a30b0f099b699a02bb3482c2591514baf5433819e4f4d"}, - {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:185c394ef45b18b9a7d8e8f333606e2e8194a50c6e3c664215aae8cf42c5385e"}, - {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ca0b3a94ac8d3886c9581b9f9de3ce858263865fdaa383fbc31c310b9eac07c9"}, - {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dfc91d4720d48e2a709e9c368d5125b4b5899dced34b5400c3837dadc7d6271b"}, - {file = "orjson-3.10.5-cp38-none-win32.whl", hash = "sha256:c05f16701ab2a4ca146d0bca950af254cb7c02f3c01fca8efbbad82d23b3d9d4"}, - {file = "orjson-3.10.5-cp38-none-win_amd64.whl", hash = "sha256:8a11d459338f96a9aa7f232ba95679fc0c7cedbd1b990d736467894210205c09"}, - {file = "orjson-3.10.5-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:85c89131d7b3218db1b24c4abecea92fd6c7f9fab87441cfc342d3acc725d807"}, - {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66215277a230c456f9038d5e2d84778141643207f85336ef8d2a9da26bd7ca"}, - {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51bbcdea96cdefa4a9b4461e690c75ad4e33796530d182bdd5c38980202c134a"}, - {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbead71dbe65f959b7bd8cf91e0e11d5338033eba34c114f69078d59827ee139"}, - {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df58d206e78c40da118a8c14fc189207fffdcb1f21b3b4c9c0c18e839b5a214"}, - {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c4057c3b511bb8aef605616bd3f1f002a697c7e4da6adf095ca5b84c0fd43595"}, - {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b39e006b00c57125ab974362e740c14a0c6a66ff695bff44615dcf4a70ce2b86"}, - {file = "orjson-3.10.5-cp39-none-win32.whl", hash = "sha256:eded5138cc565a9d618e111c6d5c2547bbdd951114eb822f7f6309e04db0fb47"}, - {file = "orjson-3.10.5-cp39-none-win_amd64.whl", hash = "sha256:cc28e90a7cae7fcba2493953cff61da5a52950e78dc2dacfe931a317ee3d8de7"}, - {file = "orjson-3.10.5.tar.gz", hash = "sha256:7a5baef8a4284405d96c90c7c62b755e9ef1ada84c2406c24a9ebec86b89f46d"}, + {file = "orjson-3.10.6-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:fb0ee33124db6eaa517d00890fc1a55c3bfe1cf78ba4a8899d71a06f2d6ff5c7"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c1c4b53b24a4c06547ce43e5fee6ec4e0d8fe2d597f4647fc033fd205707365"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eadc8fd310edb4bdbd333374f2c8fec6794bbbae99b592f448d8214a5e4050c0"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61272a5aec2b2661f4fa2b37c907ce9701e821b2c1285d5c3ab0207ebd358d38"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57985ee7e91d6214c837936dc1608f40f330a6b88bb13f5a57ce5257807da143"}, + {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:633a3b31d9d7c9f02d49c4ab4d0a86065c4a6f6adc297d63d272e043472acab5"}, + {file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1c680b269d33ec444afe2bdc647c9eb73166fa47a16d9a75ee56a374f4a45f43"}, + {file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f759503a97a6ace19e55461395ab0d618b5a117e8d0fbb20e70cfd68a47327f2"}, + {file = "orjson-3.10.6-cp310-none-win32.whl", hash = "sha256:95a0cce17f969fb5391762e5719575217bd10ac5a189d1979442ee54456393f3"}, + {file = "orjson-3.10.6-cp310-none-win_amd64.whl", hash = "sha256:df25d9271270ba2133cc88ee83c318372bdc0f2cd6f32e7a450809a111efc45c"}, + {file = "orjson-3.10.6-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b1ec490e10d2a77c345def52599311849fc063ae0e67cf4f84528073152bb2ba"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d43d3feb8f19d07e9f01e5b9be4f28801cf7c60d0fa0d279951b18fae1932b"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3045267e98fe749408eee1593a142e02357c5c99be0802185ef2170086a863"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c27bc6a28ae95923350ab382c57113abd38f3928af3c80be6f2ba7eb8d8db0b0"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d27456491ca79532d11e507cadca37fb8c9324a3976294f68fb1eff2dc6ced5a"}, + {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05ac3d3916023745aa3b3b388e91b9166be1ca02b7c7e41045da6d12985685f0"}, + {file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1335d4ef59ab85cab66fe73fd7a4e881c298ee7f63ede918b7faa1b27cbe5212"}, + {file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4bbc6d0af24c1575edc79994c20e1b29e6fb3c6a570371306db0993ecf144dc5"}, + {file = "orjson-3.10.6-cp311-none-win32.whl", hash = "sha256:450e39ab1f7694465060a0550b3f6d328d20297bf2e06aa947b97c21e5241fbd"}, + {file = "orjson-3.10.6-cp311-none-win_amd64.whl", hash = "sha256:227df19441372610b20e05bdb906e1742ec2ad7a66ac8350dcfd29a63014a83b"}, + {file = "orjson-3.10.6-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ea2977b21f8d5d9b758bb3f344a75e55ca78e3ff85595d248eee813ae23ecdfb"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6f3d167d13a16ed263b52dbfedff52c962bfd3d270b46b7518365bcc2121eed"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f710f346e4c44a4e8bdf23daa974faede58f83334289df80bc9cd12fe82573c7"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7275664f84e027dcb1ad5200b8b18373e9c669b2a9ec33d410c40f5ccf4b257e"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0943e4c701196b23c240b3d10ed8ecd674f03089198cf503105b474a4f77f21f"}, + {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:446dee5a491b5bc7d8f825d80d9637e7af43f86a331207b9c9610e2f93fee22a"}, + {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:64c81456d2a050d380786413786b057983892db105516639cb5d3ee3c7fd5148"}, + {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:960db0e31c4e52fa0fc3ecbaea5b2d3b58f379e32a95ae6b0ebeaa25b93dfd34"}, + {file = "orjson-3.10.6-cp312-none-win32.whl", hash = "sha256:a6ea7afb5b30b2317e0bee03c8d34c8181bc5a36f2afd4d0952f378972c4efd5"}, + {file = "orjson-3.10.6-cp312-none-win_amd64.whl", hash = "sha256:874ce88264b7e655dde4aeaacdc8fd772a7962faadfb41abe63e2a4861abc3dc"}, + {file = "orjson-3.10.6-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:66680eae4c4e7fc193d91cfc1353ad6d01b4801ae9b5314f17e11ba55e934183"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caff75b425db5ef8e8f23af93c80f072f97b4fb3afd4af44482905c9f588da28"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3722fddb821b6036fd2a3c814f6bd9b57a89dc6337b9924ecd614ebce3271394"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2c116072a8533f2fec435fde4d134610f806bdac20188c7bd2081f3e9e0133f"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6eeb13218c8cf34c61912e9df2de2853f1d009de0e46ea09ccdf3d757896af0a"}, + {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965a916373382674e323c957d560b953d81d7a8603fbeee26f7b8248638bd48b"}, + {file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03c95484d53ed8e479cade8628c9cea00fd9d67f5554764a1110e0d5aa2de96e"}, + {file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e060748a04cccf1e0a6f2358dffea9c080b849a4a68c28b1b907f272b5127e9b"}, + {file = "orjson-3.10.6-cp38-none-win32.whl", hash = "sha256:738dbe3ef909c4b019d69afc19caf6b5ed0e2f1c786b5d6215fbb7539246e4c6"}, + {file = "orjson-3.10.6-cp38-none-win_amd64.whl", hash = "sha256:d40f839dddf6a7d77114fe6b8a70218556408c71d4d6e29413bb5f150a692ff7"}, + {file = "orjson-3.10.6-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:697a35a083c4f834807a6232b3e62c8b280f7a44ad0b759fd4dce748951e70db"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd502f96bf5ea9a61cbc0b2b5900d0dd68aa0da197179042bdd2be67e51a1e4b"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f215789fb1667cdc874c1b8af6a84dc939fd802bf293a8334fce185c79cd359b"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2debd8ddce948a8c0938c8c93ade191d2f4ba4649a54302a7da905a81f00b56"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5410111d7b6681d4b0d65e0f58a13be588d01b473822483f77f513c7f93bd3b2"}, + {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb1f28a137337fdc18384079fa5726810681055b32b92253fa15ae5656e1dddb"}, + {file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bf2fbbce5fe7cd1aa177ea3eab2b8e6a6bc6e8592e4279ed3db2d62e57c0e1b2"}, + {file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:79b9b9e33bd4c517445a62b90ca0cc279b0f1f3970655c3df9e608bc3f91741a"}, + {file = "orjson-3.10.6-cp39-none-win32.whl", hash = "sha256:30b0a09a2014e621b1adf66a4f705f0809358350a757508ee80209b2d8dae219"}, + {file = "orjson-3.10.6-cp39-none-win_amd64.whl", hash = "sha256:49e3bc615652617d463069f91b867a4458114c5b104e13b7ae6872e5f79d0844"}, + {file = "orjson-3.10.6.tar.gz", hash = "sha256:e54b63d0a7c6c54a5f5f726bc93a2078111ef060fec4ecbf34c5db800ca3b3a7"}, ] [[package]] @@ -874,18 +879,18 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pydantic" -version = "2.8.0" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.0-py3-none-any.whl", hash = "sha256:ead4f3a1e92386a734ca1411cb25d94147cf8778ed5be6b56749047676d6364e"}, - {file = "pydantic-2.8.0.tar.gz", hash = "sha256:d970ffb9d030b710795878940bd0489842c638e7252fc4a19c3ae2f7da4d6141"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.20.0" +pydantic-core = "2.20.1" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -896,99 +901,100 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.20.0" +version = "2.20.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e9dcd7fb34f7bfb239b5fa420033642fff0ad676b765559c3737b91f664d4fa9"}, - {file = "pydantic_core-2.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:649a764d9b0da29816889424697b2a3746963ad36d3e0968784ceed6e40c6355"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7701df088d0b05f3460f7ba15aec81ac8b0fb5690367dfd072a6c38cf5b7fdb5"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab760f17c3e792225cdaef31ca23c0aea45c14ce80d8eff62503f86a5ab76bff"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1ad5b4d73cde784cf64580166568074f5ccd2548d765e690546cff3d80937d"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b81ec2efc04fc1dbf400647d4357d64fb25543bae38d2d19787d69360aad21c9"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4a9732a5cad764ba37f3aa873dccb41b584f69c347a57323eda0930deec8e10"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6dc85b9e10cc21d9c1055f15684f76fa4facadddcb6cd63abab702eb93c98943"}, - {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:21d9f7e24f63fdc7118e6cc49defaab8c1d27570782f7e5256169d77498cf7c7"}, - {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8b315685832ab9287e6124b5d74fc12dda31e6421d7f6b08525791452844bc2d"}, - {file = "pydantic_core-2.20.0-cp310-none-win32.whl", hash = "sha256:c3dc8ec8b87c7ad534c75b8855168a08a7036fdb9deeeed5705ba9410721c84d"}, - {file = "pydantic_core-2.20.0-cp310-none-win_amd64.whl", hash = "sha256:85770b4b37bb36ef93a6122601795231225641003e0318d23c6233c59b424279"}, - {file = "pydantic_core-2.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:58e251bb5a5998f7226dc90b0b753eeffa720bd66664eba51927c2a7a2d5f32c"}, - {file = "pydantic_core-2.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78d584caac52c24240ef9ecd75de64c760bbd0e20dbf6973631815e3ef16ef8b"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5084ec9721f82bef5ff7c4d1ee65e1626783abb585f8c0993833490b63fe1792"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d0f52684868db7c218437d260e14d37948b094493f2646f22d3dda7229bbe3f"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1def125d59a87fe451212a72ab9ed34c118ff771e5473fef4f2f95d8ede26d75"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b34480fd6778ab356abf1e9086a4ced95002a1e195e8d2fd182b0def9d944d11"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d42669d319db366cb567c3b444f43caa7ffb779bf9530692c6f244fc635a41eb"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53b06aea7a48919a254b32107647be9128c066aaa6ee6d5d08222325f25ef175"}, - {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1f038156b696a1c39d763b2080aeefa87ddb4162c10aa9fabfefffc3dd8180fa"}, - {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3f0f3a4a23717280a5ee3ac4fb1f81d6fde604c9ec5100f7f6f987716bb8c137"}, - {file = "pydantic_core-2.20.0-cp311-none-win32.whl", hash = "sha256:316fe7c3fec017affd916a0c83d6f1ec697cbbbdf1124769fa73328e7907cc2e"}, - {file = "pydantic_core-2.20.0-cp311-none-win_amd64.whl", hash = "sha256:2d06a7fa437f93782e3f32d739c3ec189f82fca74336c08255f9e20cea1ed378"}, - {file = "pydantic_core-2.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d6f8c49657f3eb7720ed4c9b26624063da14937fc94d1812f1e04a2204db3e17"}, - {file = "pydantic_core-2.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad1bd2f377f56fec11d5cfd0977c30061cd19f4fa199bf138b200ec0d5e27eeb"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed741183719a5271f97d93bbcc45ed64619fa38068aaa6e90027d1d17e30dc8d"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d82e5ed3a05f2dcb89c6ead2fd0dbff7ac09bc02c1b4028ece2d3a3854d049ce"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2ba34a099576234671f2e4274e5bc6813b22e28778c216d680eabd0db3f7dad"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:879ae6bb08a063b3e1b7ac8c860096d8fd6b48dd9b2690b7f2738b8c835e744b"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0eefc7633a04c0694340aad91fbfd1986fe1a1e0c63a22793ba40a18fcbdc8"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73deadd6fd8a23e2f40b412b3ac617a112143c8989a4fe265050fd91ba5c0608"}, - {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:35681445dc85446fb105943d81ae7569aa7e89de80d1ca4ac3229e05c311bdb1"}, - {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0f6dd3612a3b9f91f2e63924ea18a4476656c6d01843ca20a4c09e00422195af"}, - {file = "pydantic_core-2.20.0-cp312-none-win32.whl", hash = "sha256:7e37b6bb6e90c2b8412b06373c6978d9d81e7199a40e24a6ef480e8acdeaf918"}, - {file = "pydantic_core-2.20.0-cp312-none-win_amd64.whl", hash = "sha256:7d4df13d1c55e84351fab51383520b84f490740a9f1fec905362aa64590b7a5d"}, - {file = "pydantic_core-2.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:d43e7ab3b65e4dc35a7612cfff7b0fd62dce5bc11a7cd198310b57f39847fd6c"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6a24d7b5893392f2b8e3b7a0031ae3b14c6c1942a4615f0d8794fdeeefb08b"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2f13c3e955a087c3ec86f97661d9f72a76e221281b2262956af381224cfc243"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72432fd6e868c8d0a6849869e004b8bcae233a3c56383954c228316694920b38"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d70a8ff2d4953afb4cbe6211f17268ad29c0b47e73d3372f40e7775904bc28fc"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e49524917b8d3c2f42cd0d2df61178e08e50f5f029f9af1f402b3ee64574392"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4f0f71653b1c1bad0350bc0b4cc057ab87b438ff18fa6392533811ebd01439c"}, - {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:16197e6f4fdecb9892ed2436e507e44f0a1aa2cff3b9306d1c879ea2f9200997"}, - {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:763602504bf640b3ded3bba3f8ed8a1cc2fc6a87b8d55c1c5689f428c49c947e"}, - {file = "pydantic_core-2.20.0-cp313-none-win32.whl", hash = "sha256:a3f243f318bd9523277fa123b3163f4c005a3e8619d4b867064de02f287a564d"}, - {file = "pydantic_core-2.20.0-cp313-none-win_amd64.whl", hash = "sha256:03aceaf6a5adaad3bec2233edc5a7905026553916615888e53154807e404545c"}, - {file = "pydantic_core-2.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d6f2d8b8da1f03f577243b07bbdd3412eee3d37d1f2fd71d1513cbc76a8c1239"}, - {file = "pydantic_core-2.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a272785a226869416c6b3c1b7e450506152d3844207331f02f27173562c917e0"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efbb412d55a4ffe73963fed95c09ccb83647ec63b711c4b3752be10a56f0090b"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e4f46189d8740561b43655263a41aac75ff0388febcb2c9ec4f1b60a0ec12f3"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3df115f4a3c8c5e4d5acf067d399c6466d7e604fc9ee9acbe6f0c88a0c3cf"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a340d2bdebe819d08f605e9705ed551c3feb97e4fd71822d7147c1e4bdbb9508"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:616b9c2f882393d422ba11b40e72382fe975e806ad693095e9a3b67c59ea6150"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25c46bb2ff6084859bbcfdf4f1a63004b98e88b6d04053e8bf324e115398e9e7"}, - {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23425eccef8f2c342f78d3a238c824623836c6c874d93c726673dbf7e56c78c0"}, - {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:52527e8f223ba29608d999d65b204676398009725007c9336651c2ec2d93cffc"}, - {file = "pydantic_core-2.20.0-cp38-none-win32.whl", hash = "sha256:1c3c5b7f70dd19a6845292b0775295ea81c61540f68671ae06bfe4421b3222c2"}, - {file = "pydantic_core-2.20.0-cp38-none-win_amd64.whl", hash = "sha256:8093473d7b9e908af1cef30025609afc8f5fd2a16ff07f97440fd911421e4432"}, - {file = "pydantic_core-2.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ee7785938e407418795e4399b2bf5b5f3cf6cf728077a7f26973220d58d885cf"}, - {file = "pydantic_core-2.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e75794883d635071cf6b4ed2a5d7a1e50672ab7a051454c76446ef1ebcdcc91"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:344e352c96e53b4f56b53d24728217c69399b8129c16789f70236083c6ceb2ac"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:978d4123ad1e605daf1ba5e01d4f235bcf7b6e340ef07e7122e8e9cfe3eb61ab"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c05eaf6c863781eb834ab41f5963604ab92855822a2062897958089d1335dad"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc7e43b4a528ffca8c9151b6a2ca34482c2fdc05e6aa24a84b7f475c896fc51d"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658287a29351166510ebbe0a75c373600cc4367a3d9337b964dada8d38bcc0f4"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1dacf660d6de692fe351e8c806e7efccf09ee5184865893afbe8e59be4920b4a"}, - {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e147fc6e27b9a487320d78515c5f29798b539179f7777018cedf51b7749e4f4"}, - {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c867230d715a3dd1d962c8d9bef0d3168994ed663e21bf748b6e3a529a129aab"}, - {file = "pydantic_core-2.20.0-cp39-none-win32.whl", hash = "sha256:22b813baf0dbf612752d8143a2dbf8e33ccb850656b7850e009bad2e101fc377"}, - {file = "pydantic_core-2.20.0-cp39-none-win_amd64.whl", hash = "sha256:3a7235b46c1bbe201f09b6f0f5e6c36b16bad3d0532a10493742f91fbdc8035f"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cafde15a6f7feaec2f570646e2ffc5b73412295d29134a29067e70740ec6ee20"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2aec8eeea0b08fd6bc2213d8e86811a07491849fd3d79955b62d83e32fa2ad5f"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840200827984f1c4e114008abc2f5ede362d6e11ed0b5931681884dd41852ff1"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ea1d8b7df522e5ced34993c423c3bf3735c53df8b2a15688a2f03a7d678800"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5b8376a867047bf08910573deb95d3c8dfb976eb014ee24f3b5a61ccc5bee1b"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d08264b4460326cefacc179fc1411304d5af388a79910832835e6f641512358b"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7a3639011c2e8a9628466f616ed7fb413f30032b891898e10895a0a8b5857d6c"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:05e83ce2f7eba29e627dd8066aa6c4c0269b2d4f889c0eba157233a353053cea"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:603a843fea76a595c8f661cd4da4d2281dff1e38c4a836a928eac1a2f8fe88e4"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac76f30d5d3454f4c28826d891fe74d25121a346c69523c9810ebba43f3b1cec"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e3b1d4b1b3f6082849f9b28427ef147a5b46a6132a3dbaf9ca1baa40c88609"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2761f71faed820e25ec62eacba670d1b5c2709bb131a19fcdbfbb09884593e5a"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0586cddbf4380e24569b8a05f234e7305717cc8323f50114dfb2051fcbce2a3"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b8c46a8cf53e849eea7090f331ae2202cd0f1ceb090b00f5902c423bd1e11805"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b4a085bd04af7245e140d1b95619fe8abb445a3d7fdf219b3f80c940853268ef"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:116b326ac82c8b315e7348390f6d30bcfe6e688a7d3f1de50ff7bcc2042a23c2"}, - {file = "pydantic_core-2.20.0.tar.gz", hash = "sha256:366be8e64e0cb63d87cf79b4e1765c0703dd6313c729b22e7b9e378db6b96877"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies] diff --git a/python/pyproject.toml b/python/pyproject.toml index cda98a8c7..54af5b124 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.95" +version = "0.1.96" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From cc736d769b5582356f356e1ab4d05413fb27bb18 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 1 Aug 2024 18:07:11 -0700 Subject: [PATCH 334/373] fix: pushing new manifest without metadata update --- python/langsmith/client.py | 19 ++++++++++--------- .../tests/integration_tests/test_prompts.py | 9 ++++++++- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index be40dfb02..a5585288f 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -5036,7 +5036,7 @@ def create_prompt( description: Optional[str] = None, readme: Optional[str] = None, tags: Optional[Sequence[str]] = None, - is_public: bool = False, + is_public: Optional[bool] = False, ) -> ls_schemas.Prompt: """Create a new prompt. @@ -5074,7 +5074,7 @@ def create_prompt( "description": description or "", "readme": readme or "", "tags": tags or [], - "is_public": is_public, + "is_public": is_public or False, } response = self.request_with_retries("POST", "/repos/", json=json) @@ -5350,13 +5350,14 @@ def push_prompt( """ # Create or update prompt metadata if self._prompt_exists(prompt_identifier): - self.update_prompt( - prompt_identifier, - description=description, - readme=readme, - tags=tags, - is_public=is_public, - ) + if not (parent_commit_hash is None and is_public is None and description is None and readme is None and tags is None): + self.update_prompt( + prompt_identifier, + description=description, + readme=readme, + tags=tags, + is_public=is_public, + ) else: self.create_prompt( prompt_identifier, diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index 80f6e5c4c..f5b7bdcaf 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -411,7 +411,7 @@ def test_create_commit( langsmith_client.delete_prompt(prompt_name) -def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate): +def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate, prompt_template_2: ChatPromptTemplate): prompt_name = f"test_push_new_{uuid4().hex[:8]}" url = langsmith_client.push_prompt( prompt_name, @@ -444,6 +444,13 @@ def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate assert updated_prompt.description == "Updated prompt" assert not updated_prompt.is_public assert updated_prompt.num_commits == 1 + + # test updating prompt manifest but not metadata + url = langsmith_client.push_prompt( + prompt_name, + object=prompt_template_2, + ) + assert isinstance(url, str) langsmith_client.delete_prompt(prompt_name) From b60542760965c980f351ac94920ef47a88164414 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Thu, 1 Aug 2024 18:19:20 -0700 Subject: [PATCH 335/373] lint --- python/langsmith/client.py | 8 +++++++- python/tests/integration_tests/test_prompts.py | 6 +++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index a5585288f..52b532a0b 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -5350,7 +5350,13 @@ def push_prompt( """ # Create or update prompt metadata if self._prompt_exists(prompt_identifier): - if not (parent_commit_hash is None and is_public is None and description is None and readme is None and tags is None): + if not ( + parent_commit_hash is None + and is_public is None + and description is None + and readme is None + and tags is None + ): self.update_prompt( prompt_identifier, description=description, diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index f5b7bdcaf..bf248a989 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -411,7 +411,11 @@ def test_create_commit( langsmith_client.delete_prompt(prompt_name) -def test_push_prompt(langsmith_client: Client, prompt_template_3: PromptTemplate, prompt_template_2: ChatPromptTemplate): +def test_push_prompt( + langsmith_client: Client, + prompt_template_3: PromptTemplate, + prompt_template_2: ChatPromptTemplate +): prompt_name = f"test_push_new_{uuid4().hex[:8]}" url = langsmith_client.push_prompt( prompt_name, From d5fa3b43bb988d7ad2b72b9f803d3dbe35b44793 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 2 Aug 2024 09:35:44 -0700 Subject: [PATCH 336/373] make bool non optional --- python/langsmith/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 52b532a0b..5738278a8 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -5036,7 +5036,7 @@ def create_prompt( description: Optional[str] = None, readme: Optional[str] = None, tags: Optional[Sequence[str]] = None, - is_public: Optional[bool] = False, + is_public: bool = False, ) -> ls_schemas.Prompt: """Create a new prompt. From d995041c7f3205289e6bf5ba3e4a5f012c23fe74 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Fri, 2 Aug 2024 18:01:20 -0700 Subject: [PATCH 337/373] limt --- python/tests/integration_tests/test_prompts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tests/integration_tests/test_prompts.py b/python/tests/integration_tests/test_prompts.py index bf248a989..0bef1ba57 100644 --- a/python/tests/integration_tests/test_prompts.py +++ b/python/tests/integration_tests/test_prompts.py @@ -414,7 +414,7 @@ def test_create_commit( def test_push_prompt( langsmith_client: Client, prompt_template_3: PromptTemplate, - prompt_template_2: ChatPromptTemplate + prompt_template_2: ChatPromptTemplate, ): prompt_name = f"test_push_new_{uuid4().hex[:8]}" url = langsmith_client.push_prompt( @@ -448,7 +448,7 @@ def test_push_prompt( assert updated_prompt.description == "Updated prompt" assert not updated_prompt.is_public assert updated_prompt.num_commits == 1 - + # test updating prompt manifest but not metadata url = langsmith_client.push_prompt( prompt_name, From 16738205830264ecda46e604c628c36461af15d9 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Sat, 3 Aug 2024 11:58:51 -0700 Subject: [PATCH 338/373] allow filtering projects by metadata --- python/langsmith/client.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 2c18e05fa..9c460c3c1 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -2438,6 +2438,7 @@ def list_projects( reference_dataset_name: Optional[str] = None, reference_free: Optional[bool] = None, limit: Optional[int] = None, + metadata: Optional[Dict[str, Any]] = None, ) -> Iterator[ls_schemas.TracerSession]: """List projects from the LangSmith API. @@ -2457,6 +2458,8 @@ def list_projects( Whether to filter for only projects not associated with a dataset. limit : Optional[int], optional The maximum number of projects to return, by default None + metadata: Optional[Dict[str, Any]], optional + Metadata to filter by. Yields: ------ @@ -2486,6 +2489,8 @@ def list_projects( params["reference_dataset"] = reference_dataset_id if reference_free is not None: params["reference_free"] = reference_free + if metadata is not None: + params["metadata"] = json.dumps(metadata) for i, project in enumerate( self._get_paginated_list("/sessions", params=params) ): From 33082f928bc0e9c10b446c7316f243ce53ba5dba Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Sat, 3 Aug 2024 12:34:50 -0700 Subject: [PATCH 339/373] add for js --- js/src/client.ts | 5 +++++ js/src/tests/client.int.test.ts | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/js/src/client.ts b/js/src/client.ts index aabcccb0f..86d8de7b5 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -1758,6 +1758,7 @@ export class Client { referenceDatasetId, referenceDatasetName, referenceFree, + metadata, }: { projectIds?: string[]; name?: string; @@ -1765,6 +1766,7 @@ export class Client { referenceDatasetId?: string; referenceDatasetName?: string; referenceFree?: boolean; + metadata?: RecordStringAny; } = {}): AsyncIterable { const params = new URLSearchParams(); if (projectIds !== undefined) { @@ -1789,6 +1791,9 @@ export class Client { if (referenceFree !== undefined) { params.append("reference_free", referenceFree.toString()); } + if (metadata !== undefined) { + params.append("metadata", JSON.stringify(metadata)); + } for await (const projects of this._getPaginated( "/sessions", params diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 3dfea306f..6952c0fc8 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -912,6 +912,25 @@ test("Test delete prompt", async () => { expect(await client.promptExists(promptName)).toBe(false); }); +test("test listing projects by metadata", async () => { + const client = new Client(); + await client.createProject({ + projectName: "my_metadata_project", + metadata: { + "foo": "bar", + "baz": "qux", + } + }); + + const projects = await client.listProjects({ metadata: { "foo": "bar" } }); + + let myProject: TracerSession | null = null; + for await (const project of projects) { + myProject = project; + } + expect(myProject?.name).toEqual("my_metadata_project"); +}); + test("Test create commit", async () => { const client = new Client(); From 48bc82715168e022d12b7ccef70fbfdd3ff9cd66 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Sat, 3 Aug 2024 12:35:12 -0700 Subject: [PATCH 340/373] Update client.int.test.ts --- js/src/tests/client.int.test.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 6952c0fc8..9729a41ff 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -917,12 +917,12 @@ test("test listing projects by metadata", async () => { await client.createProject({ projectName: "my_metadata_project", metadata: { - "foo": "bar", - "baz": "qux", - } + foo: "bar", + baz: "qux", + }, }); - const projects = await client.listProjects({ metadata: { "foo": "bar" } }); + const projects = await client.listProjects({ metadata: { foo: "bar" } }); let myProject: TracerSession | null = null; for await (const project of projects) { From 99712f21a50c26a376cce91d6cde9c4f74b78094 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Sat, 3 Aug 2024 12:41:08 -0700 Subject: [PATCH 341/373] Update client.int.test.ts --- js/src/tests/client.int.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 9729a41ff..50b9643a5 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -1,4 +1,4 @@ -import { Dataset, Run } from "../schemas.js"; +import { Dataset, Run, TracerSession } from "../schemas.js"; import { FunctionMessage, HumanMessage, From 283bb97f04c472fcef2fd77c126a1c8b4ba92ce8 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Sat, 3 Aug 2024 13:01:16 -0700 Subject: [PATCH 342/373] fix test --- js/src/tests/client.int.test.ts | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 50b9643a5..04b26e3f4 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -917,18 +917,20 @@ test("test listing projects by metadata", async () => { await client.createProject({ projectName: "my_metadata_project", metadata: { - foo: "bar", - baz: "qux", + foobar: "bar", + baz: "barfooqux", }, }); - const projects = await client.listProjects({ metadata: { foo: "bar" } }); + const projects = await client.listProjects({ metadata: { foobar: "bar" } }); let myProject: TracerSession | null = null; for await (const project of projects) { myProject = project; } expect(myProject?.name).toEqual("my_metadata_project"); + + await client.deleteProject({ projectName: "my_metadata_project" }); }); test("Test create commit", async () => { From 88c72745c878c58de425e07fd1069adf97210fd2 Mon Sep 17 00:00:00 2001 From: SN <6432132+samnoyes@users.noreply.github.com> Date: Mon, 5 Aug 2024 18:00:23 -0700 Subject: [PATCH 343/373] bump --- js/package.json | 2 +- js/src/index.ts | 2 +- python/pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/js/package.json b/js/package.json index f67db8f95..05b2d7e86 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.40", + "version": "0.1.41", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/index.ts b/js/src/index.ts index b2e3baf5e..faac74776 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.40"; +export const __version__ = "0.1.41"; diff --git a/python/pyproject.toml b/python/pyproject.toml index 54af5b124..ff387d75d 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.96" +version = "0.1.97" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From 01d5622b88d16104d28de6908e6d93e65d392bd3 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 5 Aug 2024 22:34:00 -0700 Subject: [PATCH 344/373] Process outputs (#911) --- python/langsmith/run_helpers.py | 40 ++++++-- python/pyproject.toml | 2 +- python/tests/integration_tests/test_runs.py | 6 +- python/tests/unit_tests/test_run_helpers.py | 107 ++++++++++++++++++++ 4 files changed, 140 insertions(+), 15 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 90301c03d..55d9d1ad0 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -25,6 +25,7 @@ Mapping, Optional, Protocol, + Sequence, Tuple, Type, TypedDict, @@ -242,9 +243,10 @@ def traceable( metadata: Optional[Mapping[str, Any]] = None, tags: Optional[List[str]] = None, client: Optional[ls_client.Client] = None, - reduce_fn: Optional[Callable] = None, + reduce_fn: Optional[Callable[[Sequence], dict]] = None, project_name: Optional[str] = None, process_inputs: Optional[Callable[[dict], dict]] = None, + process_outputs: Optional[Callable[..., dict]] = None, _invocation_params_fn: Optional[Callable[[dict], dict]] = None, ) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: ... @@ -270,7 +272,11 @@ def traceable( called, and the run itself will be stuck in a pending state. project_name: The name of the project to log the run to. Defaults to None, which will use the default project. - process_inputs: A function to filter the inputs to the run. Defaults to None. + process_inputs: Custom serialization / processing function for inputs. + Defaults to None. + process_outputs: Custom serialization / processing function for outputs. + Defaults to None. + Returns: @@ -415,6 +421,18 @@ def manual_extra_function(x): process_inputs=kwargs.pop("process_inputs", None), invocation_params_fn=kwargs.pop("_invocation_params_fn", None), ) + outputs_processor = kwargs.pop("process_outputs", None) + + def _on_run_end( + container: _TraceableContainer, + outputs: Optional[Any] = None, + error: Optional[BaseException] = None, + ) -> None: + """Handle the end of run.""" + if outputs and outputs_processor is not None: + outputs = outputs_processor(outputs) + _container_end(container, outputs=outputs, error=error) + if kwargs: warnings.warn( f"The following keyword arguments are not recognized and will be ignored: " @@ -463,11 +481,11 @@ async def async_wrapper( except BaseException as e: # shield from cancellation, given we're catching all exceptions await asyncio.shield( - aitertools.aio_to_thread(_container_end, run_container, error=e) + aitertools.aio_to_thread(_on_run_end, run_container, error=e) ) raise e await aitertools.aio_to_thread( - _container_end, run_container, outputs=function_result + _on_run_end, run_container, outputs=function_result ) return function_result @@ -536,7 +554,7 @@ async def async_generator_wrapper( pass except BaseException as e: await asyncio.shield( - aitertools.aio_to_thread(_container_end, run_container, error=e) + aitertools.aio_to_thread(_on_run_end, run_container, error=e) ) raise e if results: @@ -551,7 +569,7 @@ async def async_generator_wrapper( else: function_result = None await aitertools.aio_to_thread( - _container_end, run_container, outputs=function_result + _on_run_end, run_container, outputs=function_result ) @functools.wraps(func) @@ -578,9 +596,9 @@ def wrapper( kwargs.pop("config", None) function_result = run_container["context"].run(func, *args, **kwargs) except BaseException as e: - _container_end(run_container, error=e) + _on_run_end(run_container, error=e) raise e - _container_end(run_container, outputs=function_result) + _on_run_end(run_container, outputs=function_result) return function_result @functools.wraps(func) @@ -630,7 +648,7 @@ def generator_wrapper( pass except BaseException as e: - _container_end(run_container, error=e) + _on_run_end(run_container, error=e) raise e if results: if reduce_fn: @@ -643,7 +661,7 @@ def generator_wrapper( function_result = results else: function_result = None - _container_end(run_container, outputs=function_result) + _on_run_end(run_container, outputs=function_result) if inspect.isasyncgenfunction(func): selected_wrapper: Callable = async_generator_wrapper @@ -1131,7 +1149,7 @@ def _container_end( container: _TraceableContainer, outputs: Optional[Any] = None, error: Optional[BaseException] = None, -): +) -> None: """End the run.""" run_tree = container.get("new_run") if run_tree is None: diff --git a/python/pyproject.toml b/python/pyproject.toml index ff387d75d..27d00542f 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.97" +version = "0.1.98" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" diff --git a/python/tests/integration_tests/test_runs.py b/python/tests/integration_tests/test_runs.py index fbf87ea92..c9b62661e 100644 --- a/python/tests/integration_tests/test_runs.py +++ b/python/tests/integration_tests/test_runs.py @@ -3,7 +3,7 @@ import uuid from collections import defaultdict from concurrent.futures import ThreadPoolExecutor -from typing import AsyncGenerator, Generator, Optional +from typing import AsyncGenerator, Generator, Optional, Sequence import pytest # type: ignore @@ -330,7 +330,7 @@ def test_sync_generator_reduce_fn(langchain_client: Client): project_name = "__My Tracer Project - test_sync_generator_reduce_fn" run_meta = uuid.uuid4().hex - def reduce_fn(outputs: list) -> dict: + def reduce_fn(outputs: Sequence) -> dict: return {"my_output": " ".join(outputs)} @traceable(run_type="chain", reduce_fn=reduce_fn) @@ -411,7 +411,7 @@ async def test_async_generator_reduce_fn(langchain_client: Client): project_name = "__My Tracer Project - test_async_generator_reduce_fn" run_meta = uuid.uuid4().hex - def reduce_fn(outputs: list) -> dict: + def reduce_fn(outputs: Sequence) -> dict: return {"my_output": " ".join(outputs)} @traceable(run_type="chain", reduce_fn=reduce_fn) diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index 4c960ddf8..f749dc17a 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -1341,3 +1341,110 @@ async def test_trace_respects_env_var(env_var: bool, context: Optional[bool]): assert len(mock_calls) >= 1 else: assert not mock_calls + + +async def test_process_inputs_outputs(): + mock_client = _get_mock_client() + in_s = "what's life's meaning" + + def process_inputs(inputs: dict) -> dict: + assert inputs == {"val": in_s, "ooblek": "nada"} + inputs["val2"] = "this is mutated" + return {"serialized_in": "what's the meaning of life?"} + + def process_outputs(outputs: int) -> dict: + assert outputs == 42 + return {"serialized_out": 24} + + @traceable(process_inputs=process_inputs, process_outputs=process_outputs) + def my_function(val: str, **kwargs: Any) -> int: + assert not kwargs.get("val2") + return 42 + + with tracing_context(enabled=True): + my_function( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + + def _check_client(client: Client) -> None: + mock_calls = _get_calls(client) + assert len(mock_calls) == 1 + call = mock_calls[0] + assert call.args[0] == "POST" + assert call.args[1].startswith("https://api.smith.langchain.com") + body = json.loads(call.kwargs["data"]) + assert body["post"] + assert body["post"][0]["inputs"] == { + "serialized_in": "what's the meaning of life?" + } + assert body["post"][0]["outputs"] == {"serialized_out": 24} + + _check_client(mock_client) + + @traceable(process_inputs=process_inputs, process_outputs=process_outputs) + async def amy_function(val: str, **kwargs: Any) -> int: + assert not kwargs.get("val2") + return 42 + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + await amy_function( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + + _check_client(mock_client) + + # Do generator + + def reducer(outputs: list) -> dict: + return {"reduced": outputs[0]} + + def process_reduced_outputs(outputs: dict) -> dict: + assert outputs == {"reduced": 42} + return {"serialized_out": 24} + + @traceable( + process_inputs=process_inputs, + process_outputs=process_reduced_outputs, + reduce_fn=reducer, + ) + def my_gen(val: str, **kwargs: Any) -> Generator[int, None, None]: + assert not kwargs.get("val2") + yield 42 + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + result = list( + my_gen( + in_s, + ooblek="nada", + langsmith_extra={"client": mock_client}, + ) + ) + assert result == [42] + + _check_client(mock_client) + + @traceable( + process_inputs=process_inputs, + process_outputs=process_reduced_outputs, + reduce_fn=reducer, + ) + async def amy_gen(val: str, **kwargs: Any) -> AsyncGenerator[int, None]: + assert not kwargs.get("val2") + yield 42 + + mock_client = _get_mock_client() + with tracing_context(enabled=True): + result = [ + i + async for i in amy_gen( + in_s, ooblek="nada", langsmith_extra={"client": mock_client} + ) + ] + assert result == [42] + _check_client(mock_client) From ce0c8f46ed0e2820990ad465c86cfc71d4ac8fb4 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 6 Aug 2024 09:34:07 -0700 Subject: [PATCH 345/373] just false --- python/langsmith/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index a89b21551..191d41c87 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -5093,7 +5093,7 @@ def create_prompt( "description": description or "", "readme": readme or "", "tags": tags or [], - "is_public": is_public or False, + "is_public": is_public, } response = self.request_with_retries("POST", "/repos/", json=json) From 248bb5646198426f38ce7a14dbff1461bcd56411 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 6 Aug 2024 09:41:35 -0700 Subject: [PATCH 346/373] change negation to any --- python/langsmith/client.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 191d41c87..645377941 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -5369,12 +5369,9 @@ def push_prompt( """ # Create or update prompt metadata if self._prompt_exists(prompt_identifier): - if not ( - parent_commit_hash is None - and is_public is None - and description is None - and readme is None - and tags is None + if any( + param is not None + for param in [parent_commit_hash, is_public, description, readme, tags] ): self.update_prompt( prompt_identifier, From 281043df4b4b610af1f6c1e0a2ad8d71911fde49 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 6 Aug 2024 10:24:24 -0700 Subject: [PATCH 347/373] test --- js/src/tests/client.int.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 04b26e3f4..517061c09 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -794,7 +794,7 @@ test("Test list prompts", async () => { }); // expect at least one of the prompts to have promptName1 - const response = await client.listPrompts({ isPublic: true }); + const response = await client.listPrompts({ isPublic: true, query: 'test_prompt' }); let found = false; expect(response).toBeDefined(); for await (const prompt of response) { From 6e09e0967217d8822bd758b365a4cbd32dd303cc Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Tue, 6 Aug 2024 10:26:50 -0700 Subject: [PATCH 348/373] prettier --- js/src/tests/client.int.test.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 517061c09..001efb014 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -794,7 +794,10 @@ test("Test list prompts", async () => { }); // expect at least one of the prompts to have promptName1 - const response = await client.listPrompts({ isPublic: true, query: 'test_prompt' }); + const response = await client.listPrompts({ + isPublic: true, + query: "test_prompt", + }); let found = false; expect(response).toBeDefined(); for await (const prompt of response) { From 50d3d92239d88e9021078792d92243bb7825aaa1 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Thu, 8 Aug 2024 10:59:38 -0700 Subject: [PATCH 349/373] Accept EvaluationResults with dict (#915) Previously required {"results": [EvaluationResult]} Now can be: {"results": [EvaluationResultLike]} --- js/src/tests/client.int.test.ts | 24 ++++++++------- python/langsmith/client.py | 38 +++++++++++++++++------- python/tests/unit_tests/test_client.py | 41 +++++++++++++++++++++++++- 3 files changed, 81 insertions(+), 22 deletions(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 04b26e3f4..ee940c6fe 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -759,10 +759,11 @@ test.concurrent("Test run stats", async () => { test("Test list prompts", async () => { const client = new Client(); + const uid = uuidv4(); // push 3 prompts - const promptName1 = `test_prompt_${uuidv4().slice(0, 8)}`; - const promptName2 = `test_prompt_${uuidv4().slice(0, 8)}`; - const promptName3 = `test_prompt_${uuidv4().slice(0, 8)}`; + const promptName1 = `test_prompt_${uid}__0`; + const promptName2 = `test_prompt_${uid}__1`; + const promptName3 = `test_prompt_${uid}__2`; await client.pushPrompt(promptName1, { object: ChatPromptTemplate.fromMessages( @@ -794,7 +795,7 @@ test("Test list prompts", async () => { }); // expect at least one of the prompts to have promptName1 - const response = await client.listPrompts({ isPublic: true }); + const response = client.listPrompts({ isPublic: true, query: uid }); let found = false; expect(response).toBeDefined(); for await (const prompt of response) { @@ -806,7 +807,7 @@ test("Test list prompts", async () => { expect(found).toBe(true); // expect the prompts to be sorted by updated_at - const response2 = client.listPrompts({ sortField: "updated_at" }); + const response2 = client.listPrompts({ sortField: "updated_at", query: uid }); expect(response2).toBeDefined(); let lastUpdatedAt: number | undefined; for await (const prompt of response2) { @@ -914,23 +915,26 @@ test("Test delete prompt", async () => { test("test listing projects by metadata", async () => { const client = new Client(); + const uid = uuidv4(); + const projectName = `my_metadata_project_${uid}`; + await client.createProject({ - projectName: "my_metadata_project", + projectName: projectName, metadata: { - foobar: "bar", + foobar: uid, baz: "barfooqux", }, }); - const projects = await client.listProjects({ metadata: { foobar: "bar" } }); + const projects = await client.listProjects({ metadata: { foobar: uid } }); let myProject: TracerSession | null = null; for await (const project of projects) { myProject = project; } - expect(myProject?.name).toEqual("my_metadata_project"); + expect(myProject?.name).toEqual(projectName); - await client.deleteProject({ projectName: "my_metadata_project" }); + await client.deleteProject({ projectName: projectName }); }); test("Test create commit", async () => { diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 9c460c3c1..49213d6e8 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -48,6 +48,7 @@ import orjson import requests from requests import adapters as requests_adapters +from typing_extensions import TypeGuard from urllib3.util import Retry import langsmith @@ -3673,25 +3674,40 @@ def _resolve_example_id( def _select_eval_results( self, - results: Union[ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults], + results: Union[ + ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict + ], *, fn_name: Optional[str] = None, ) -> List[ls_evaluator.EvaluationResult]: from langsmith.evaluation import evaluator as ls_evaluator # noqa: F811 + def _cast_result( + single_result: Union[ls_evaluator.EvaluationResult, dict], + ) -> ls_evaluator.EvaluationResult: + if isinstance(single_result, dict): + return ls_evaluator.EvaluationResult( + **{ + "key": fn_name, + "comment": single_result.get("reasoning"), + **single_result, + } + ) + return single_result + + def _is_eval_results(results: Any) -> TypeGuard[ls_evaluator.EvaluationResults]: + return isinstance(results, dict) and "results" in results + if isinstance(results, ls_evaluator.EvaluationResult): results_ = [results] + elif _is_eval_results(results): + results_ = [_cast_result(r) for r in results["results"]] elif isinstance(results, dict): - if "results" in results: - results_ = cast(List[ls_evaluator.EvaluationResult], results["results"]) - else: - results_ = [ - ls_evaluator.EvaluationResult(**{"key": fn_name, **results}) # type: ignore[arg-type] - ] + results_ = [_cast_result(cast(dict, results))] else: - raise TypeError( - f"Invalid evaluation result type {type(results)}." - " Expected EvaluationResult or EvaluationResults." + raise ValueError( + f"Invalid evaluation results type: {type(results)}." + " Must be EvaluationResult, EvaluationResults." ) return results_ @@ -3745,7 +3761,7 @@ def evaluate_run( def _log_evaluation_feedback( self, evaluator_response: Union[ - ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults + ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict ], run: Optional[ls_schemas.Run] = None, source_info: Optional[Dict[str, Any]] = None, diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index 0d247d836..c60afa702 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -28,7 +28,7 @@ import langsmith.env as ls_env import langsmith.utils as ls_utils -from langsmith import run_trees +from langsmith import EvaluationResult, run_trees from langsmith import schemas as ls_schemas from langsmith.client import ( Client, @@ -1077,3 +1077,42 @@ def test_batch_ingest_run_splits_large_batches(payload_size: int): # Check that no duplicate run_ids are present in the request bodies assert len(request_bodies) == len(set([body["id"] for body in request_bodies])) + + +def test_select_eval_results(): + expected = EvaluationResult( + key="foo", + value="bar", + score=7899082, + metadata={"a": "b"}, + comment="hi", + feedback_config={"c": "d"}, + ) + client = Client(api_key="test") + for count, input_ in [ + (1, expected), + (1, expected.dict()), + (1, {"results": [expected]}), + (1, {"results": [expected.dict()]}), + (2, {"results": [expected.dict(), expected.dict()]}), + (2, {"results": [expected, expected]}), + ]: + op = client._select_eval_results(input_) + assert len(op) == count + assert op == [expected] * count + + expected2 = EvaluationResult( + key="foo", + metadata={"a": "b"}, + comment="this is a comment", + feedback_config={"c": "d"}, + ) + + as_reasoning = { + "reasoning": expected2.comment, + **expected2.dict(exclude={"comment"}), + } + for input_ in [as_reasoning, {"results": [as_reasoning]}, {"results": [expected2]}]: + assert client._select_eval_results(input_) == [ + expected2, + ] From 0b26e82ce9eaeb0243b1ef415a812bfafc3cca52 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Sun, 11 Aug 2024 10:16:13 -0700 Subject: [PATCH 350/373] merge --- js/src/tests/client.int.test.ts | 27 +++++++++-------- python/langsmith/client.py | 38 +++++++++++++++++------- python/tests/unit_tests/test_client.py | 41 +++++++++++++++++++++++++- 3 files changed, 81 insertions(+), 25 deletions(-) diff --git a/js/src/tests/client.int.test.ts b/js/src/tests/client.int.test.ts index 001efb014..ee940c6fe 100644 --- a/js/src/tests/client.int.test.ts +++ b/js/src/tests/client.int.test.ts @@ -759,10 +759,11 @@ test.concurrent("Test run stats", async () => { test("Test list prompts", async () => { const client = new Client(); + const uid = uuidv4(); // push 3 prompts - const promptName1 = `test_prompt_${uuidv4().slice(0, 8)}`; - const promptName2 = `test_prompt_${uuidv4().slice(0, 8)}`; - const promptName3 = `test_prompt_${uuidv4().slice(0, 8)}`; + const promptName1 = `test_prompt_${uid}__0`; + const promptName2 = `test_prompt_${uid}__1`; + const promptName3 = `test_prompt_${uid}__2`; await client.pushPrompt(promptName1, { object: ChatPromptTemplate.fromMessages( @@ -794,10 +795,7 @@ test("Test list prompts", async () => { }); // expect at least one of the prompts to have promptName1 - const response = await client.listPrompts({ - isPublic: true, - query: "test_prompt", - }); + const response = client.listPrompts({ isPublic: true, query: uid }); let found = false; expect(response).toBeDefined(); for await (const prompt of response) { @@ -809,7 +807,7 @@ test("Test list prompts", async () => { expect(found).toBe(true); // expect the prompts to be sorted by updated_at - const response2 = client.listPrompts({ sortField: "updated_at" }); + const response2 = client.listPrompts({ sortField: "updated_at", query: uid }); expect(response2).toBeDefined(); let lastUpdatedAt: number | undefined; for await (const prompt of response2) { @@ -917,23 +915,26 @@ test("Test delete prompt", async () => { test("test listing projects by metadata", async () => { const client = new Client(); + const uid = uuidv4(); + const projectName = `my_metadata_project_${uid}`; + await client.createProject({ - projectName: "my_metadata_project", + projectName: projectName, metadata: { - foobar: "bar", + foobar: uid, baz: "barfooqux", }, }); - const projects = await client.listProjects({ metadata: { foobar: "bar" } }); + const projects = await client.listProjects({ metadata: { foobar: uid } }); let myProject: TracerSession | null = null; for await (const project of projects) { myProject = project; } - expect(myProject?.name).toEqual("my_metadata_project"); + expect(myProject?.name).toEqual(projectName); - await client.deleteProject({ projectName: "my_metadata_project" }); + await client.deleteProject({ projectName: projectName }); }); test("Test create commit", async () => { diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 645377941..cc3805695 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -48,6 +48,7 @@ import orjson import requests from requests import adapters as requests_adapters +from typing_extensions import TypeGuard from urllib3.util import Retry import langsmith @@ -3673,25 +3674,40 @@ def _resolve_example_id( def _select_eval_results( self, - results: Union[ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults], + results: Union[ + ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict + ], *, fn_name: Optional[str] = None, ) -> List[ls_evaluator.EvaluationResult]: from langsmith.evaluation import evaluator as ls_evaluator # noqa: F811 + def _cast_result( + single_result: Union[ls_evaluator.EvaluationResult, dict], + ) -> ls_evaluator.EvaluationResult: + if isinstance(single_result, dict): + return ls_evaluator.EvaluationResult( + **{ + "key": fn_name, + "comment": single_result.get("reasoning"), + **single_result, + } + ) + return single_result + + def _is_eval_results(results: Any) -> TypeGuard[ls_evaluator.EvaluationResults]: + return isinstance(results, dict) and "results" in results + if isinstance(results, ls_evaluator.EvaluationResult): results_ = [results] + elif _is_eval_results(results): + results_ = [_cast_result(r) for r in results["results"]] elif isinstance(results, dict): - if "results" in results: - results_ = cast(List[ls_evaluator.EvaluationResult], results["results"]) - else: - results_ = [ - ls_evaluator.EvaluationResult(**{"key": fn_name, **results}) # type: ignore[arg-type] - ] + results_ = [_cast_result(cast(dict, results))] else: - raise TypeError( - f"Invalid evaluation result type {type(results)}." - " Expected EvaluationResult or EvaluationResults." + raise ValueError( + f"Invalid evaluation results type: {type(results)}." + " Must be EvaluationResult, EvaluationResults." ) return results_ @@ -3745,7 +3761,7 @@ def evaluate_run( def _log_evaluation_feedback( self, evaluator_response: Union[ - ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults + ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict ], run: Optional[ls_schemas.Run] = None, source_info: Optional[Dict[str, Any]] = None, diff --git a/python/tests/unit_tests/test_client.py b/python/tests/unit_tests/test_client.py index 0d247d836..c60afa702 100644 --- a/python/tests/unit_tests/test_client.py +++ b/python/tests/unit_tests/test_client.py @@ -28,7 +28,7 @@ import langsmith.env as ls_env import langsmith.utils as ls_utils -from langsmith import run_trees +from langsmith import EvaluationResult, run_trees from langsmith import schemas as ls_schemas from langsmith.client import ( Client, @@ -1077,3 +1077,42 @@ def test_batch_ingest_run_splits_large_batches(payload_size: int): # Check that no duplicate run_ids are present in the request bodies assert len(request_bodies) == len(set([body["id"] for body in request_bodies])) + + +def test_select_eval_results(): + expected = EvaluationResult( + key="foo", + value="bar", + score=7899082, + metadata={"a": "b"}, + comment="hi", + feedback_config={"c": "d"}, + ) + client = Client(api_key="test") + for count, input_ in [ + (1, expected), + (1, expected.dict()), + (1, {"results": [expected]}), + (1, {"results": [expected.dict()]}), + (2, {"results": [expected.dict(), expected.dict()]}), + (2, {"results": [expected, expected]}), + ]: + op = client._select_eval_results(input_) + assert len(op) == count + assert op == [expected] * count + + expected2 = EvaluationResult( + key="foo", + metadata={"a": "b"}, + comment="this is a comment", + feedback_config={"c": "d"}, + ) + + as_reasoning = { + "reasoning": expected2.comment, + **expected2.dict(exclude={"comment"}), + } + for input_ in [as_reasoning, {"results": [as_reasoning]}, {"results": [expected2]}]: + assert client._select_eval_results(input_) == [ + expected2, + ] From 77904d2946e32ba849e098b592408603d03e46d5 Mon Sep 17 00:00:00 2001 From: Maddy Adams Date: Sun, 11 Aug 2024 12:09:09 -0700 Subject: [PATCH 351/373] chore: bump version --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index 27d00542f..7c7c95888 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langsmith" -version = "0.1.98" +version = "0.1.99" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." authors = ["LangChain "] license = "MIT" From a73ca190f9a044443ed5095f35162ffce0ec4b17 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 12 Aug 2024 10:06:18 -0700 Subject: [PATCH 352/373] [Python] fix: handle vals that can't have truthiness checks (#921) --- python/langsmith/run_helpers.py | 9 +- python/poetry.lock | 730 +++++++++++--------- python/tests/unit_tests/test_run_helpers.py | 16 +- 3 files changed, 436 insertions(+), 319 deletions(-) diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 55d9d1ad0..05f2534fe 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -429,9 +429,12 @@ def _on_run_end( error: Optional[BaseException] = None, ) -> None: """Handle the end of run.""" - if outputs and outputs_processor is not None: - outputs = outputs_processor(outputs) - _container_end(container, outputs=outputs, error=error) + try: + if outputs_processor is not None: + outputs = outputs_processor(outputs) + _container_end(container, outputs=outputs, error=error) + except BaseException as e: + LOGGER.warning(f"Unable to process trace outputs: {repr(e)}") if kwargs: warnings.warn( diff --git a/python/poetry.lock b/python/poetry.lock index efcd045b3..3d4d1374c 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -38,52 +38,52 @@ trio = ["trio (>=0.23)"] [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "black" -version = "24.4.2" +version = "24.8.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"}, - {file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"}, - {file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"}, - {file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"}, - {file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"}, - {file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"}, - {file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"}, - {file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"}, - {file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"}, - {file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"}, - {file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"}, - {file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"}, - {file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"}, - {file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"}, - {file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"}, - {file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"}, - {file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"}, - {file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"}, - {file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"}, - {file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"}, - {file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"}, - {file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"}, + {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, + {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, + {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, + {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, + {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, + {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, + {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, + {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, + {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, + {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, + {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, + {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, + {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, + {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, + {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, + {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, + {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, + {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, + {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, + {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, + {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, + {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, ] [package.dependencies] @@ -238,63 +238,83 @@ files = [ [[package]] name = "coverage" -version = "7.5.4" +version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cfb5a4f556bb51aba274588200a46e4dd6b505fb1a5f8c5ae408222eb416f99"}, - {file = "coverage-7.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2174e7c23e0a454ffe12267a10732c273243b4f2d50d07544a91198f05c48f47"}, - {file = "coverage-7.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2214ee920787d85db1b6a0bd9da5f8503ccc8fcd5814d90796c2f2493a2f4d2e"}, - {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1137f46adb28e3813dec8c01fefadcb8c614f33576f672962e323b5128d9a68d"}, - {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b385d49609f8e9efc885790a5a0e89f2e3ae042cdf12958b6034cc442de428d3"}, - {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4a474f799456e0eb46d78ab07303286a84a3140e9700b9e154cfebc8f527016"}, - {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5cd64adedf3be66f8ccee418473c2916492d53cbafbfcff851cbec5a8454b136"}, - {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e564c2cf45d2f44a9da56f4e3a26b2236504a496eb4cb0ca7221cd4cc7a9aca9"}, - {file = "coverage-7.5.4-cp310-cp310-win32.whl", hash = "sha256:7076b4b3a5f6d2b5d7f1185fde25b1e54eb66e647a1dfef0e2c2bfaf9b4c88c8"}, - {file = "coverage-7.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:018a12985185038a5b2bcafab04ab833a9a0f2c59995b3cec07e10074c78635f"}, - {file = "coverage-7.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:db14f552ac38f10758ad14dd7b983dbab424e731588d300c7db25b6f89e335b5"}, - {file = "coverage-7.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3257fdd8e574805f27bb5342b77bc65578e98cbc004a92232106344053f319ba"}, - {file = "coverage-7.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a6612c99081d8d6134005b1354191e103ec9705d7ba2754e848211ac8cacc6b"}, - {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d45d3cbd94159c468b9b8c5a556e3f6b81a8d1af2a92b77320e887c3e7a5d080"}, - {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed550e7442f278af76d9d65af48069f1fb84c9f745ae249c1a183c1e9d1b025c"}, - {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a892be37ca35eb5019ec85402c3371b0f7cda5ab5056023a7f13da0961e60da"}, - {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8192794d120167e2a64721d88dbd688584675e86e15d0569599257566dec9bf0"}, - {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:820bc841faa502e727a48311948e0461132a9c8baa42f6b2b84a29ced24cc078"}, - {file = "coverage-7.5.4-cp311-cp311-win32.whl", hash = "sha256:6aae5cce399a0f065da65c7bb1e8abd5c7a3043da9dceb429ebe1b289bc07806"}, - {file = "coverage-7.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:d2e344d6adc8ef81c5a233d3a57b3c7d5181f40e79e05e1c143da143ccb6377d"}, - {file = "coverage-7.5.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:54317c2b806354cbb2dc7ac27e2b93f97096912cc16b18289c5d4e44fc663233"}, - {file = "coverage-7.5.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:042183de01f8b6d531e10c197f7f0315a61e8d805ab29c5f7b51a01d62782747"}, - {file = "coverage-7.5.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6bb74ed465d5fb204b2ec41d79bcd28afccf817de721e8a807d5141c3426638"}, - {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3d45ff86efb129c599a3b287ae2e44c1e281ae0f9a9bad0edc202179bcc3a2e"}, - {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5013ed890dc917cef2c9f765c4c6a8ae9df983cd60dbb635df8ed9f4ebc9f555"}, - {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1014fbf665fef86cdfd6cb5b7371496ce35e4d2a00cda501cf9f5b9e6fced69f"}, - {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3684bc2ff328f935981847082ba4fdc950d58906a40eafa93510d1b54c08a66c"}, - {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:581ea96f92bf71a5ec0974001f900db495488434a6928a2ca7f01eee20c23805"}, - {file = "coverage-7.5.4-cp312-cp312-win32.whl", hash = "sha256:73ca8fbc5bc622e54627314c1a6f1dfdd8db69788f3443e752c215f29fa87a0b"}, - {file = "coverage-7.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:cef4649ec906ea7ea5e9e796e68b987f83fa9a718514fe147f538cfeda76d7a7"}, - {file = "coverage-7.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdd31315fc20868c194130de9ee6bfd99755cc9565edff98ecc12585b90be882"}, - {file = "coverage-7.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:02ff6e898197cc1e9fa375581382b72498eb2e6d5fc0b53f03e496cfee3fac6d"}, - {file = "coverage-7.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d05c16cf4b4c2fc880cb12ba4c9b526e9e5d5bb1d81313d4d732a5b9fe2b9d53"}, - {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5986ee7ea0795a4095ac4d113cbb3448601efca7f158ec7f7087a6c705304e4"}, - {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df54843b88901fdc2f598ac06737f03d71168fd1175728054c8f5a2739ac3e4"}, - {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ab73b35e8d109bffbda9a3e91c64e29fe26e03e49addf5b43d85fc426dde11f9"}, - {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:aea072a941b033813f5e4814541fc265a5c12ed9720daef11ca516aeacd3bd7f"}, - {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:16852febd96acd953b0d55fc842ce2dac1710f26729b31c80b940b9afcd9896f"}, - {file = "coverage-7.5.4-cp38-cp38-win32.whl", hash = "sha256:8f894208794b164e6bd4bba61fc98bf6b06be4d390cf2daacfa6eca0a6d2bb4f"}, - {file = "coverage-7.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:e2afe743289273209c992075a5a4913e8d007d569a406ffed0bd080ea02b0633"}, - {file = "coverage-7.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b95c3a8cb0463ba9f77383d0fa8c9194cf91f64445a63fc26fb2327e1e1eb088"}, - {file = "coverage-7.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7564cc09dd91b5a6001754a5b3c6ecc4aba6323baf33a12bd751036c998be4"}, - {file = "coverage-7.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44da56a2589b684813f86d07597fdf8a9c6ce77f58976727329272f5a01f99f7"}, - {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e16f3d6b491c48c5ae726308e6ab1e18ee830b4cdd6913f2d7f77354b33f91c8"}, - {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbc5958cb471e5a5af41b0ddaea96a37e74ed289535e8deca404811f6cb0bc3d"}, - {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a04e990a2a41740b02d6182b498ee9796cf60eefe40cf859b016650147908029"}, - {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ddbd2f9713a79e8e7242d7c51f1929611e991d855f414ca9996c20e44a895f7c"}, - {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b1ccf5e728ccf83acd313c89f07c22d70d6c375a9c6f339233dcf792094bcbf7"}, - {file = "coverage-7.5.4-cp39-cp39-win32.whl", hash = "sha256:56b4eafa21c6c175b3ede004ca12c653a88b6f922494b023aeb1e836df953ace"}, - {file = "coverage-7.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:65e528e2e921ba8fd67d9055e6b9f9e34b21ebd6768ae1c1723f4ea6ace1234d"}, - {file = "coverage-7.5.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:79b356f3dd5b26f3ad23b35c75dbdaf1f9e2450b6bcefc6d0825ea0aa3f86ca5"}, - {file = "coverage-7.5.4.tar.gz", hash = "sha256:a44963520b069e12789d0faea4e9fdb1e410cdc4aab89d94f7f55cbb7fef0353"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] @@ -331,13 +351,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.1" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, - {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] @@ -468,6 +488,76 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jiter" +version = "0.5.0" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"}, + {file = "jiter-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a063f71c4b06225543dddadbe09d203dc0c95ba352d8b85f1221173480a71d5"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acc0d5b8b3dd12e91dd184b87273f864b363dfabc90ef29a1092d269f18c7e28"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c22541f0b672f4d741382a97c65609332a783501551445ab2df137ada01e019e"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63314832e302cc10d8dfbda0333a384bf4bcfce80d65fe99b0f3c0da8945a91a"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a25fbd8a5a58061e433d6fae6d5298777c0814a8bcefa1e5ecfff20c594bd749"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503b2c27d87dfff5ab717a8200fbbcf4714516c9d85558048b1fc14d2de7d8dc"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d1f3d27cce923713933a844872d213d244e09b53ec99b7a7fdf73d543529d6d"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c95980207b3998f2c3b3098f357994d3fd7661121f30669ca7cb945f09510a87"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:afa66939d834b0ce063f57d9895e8036ffc41c4bd90e4a99631e5f261d9b518e"}, + {file = "jiter-0.5.0-cp310-none-win32.whl", hash = "sha256:f16ca8f10e62f25fd81d5310e852df6649af17824146ca74647a018424ddeccf"}, + {file = "jiter-0.5.0-cp310-none-win_amd64.whl", hash = "sha256:b2950e4798e82dd9176935ef6a55cf6a448b5c71515a556da3f6b811a7844f1e"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4c8e1ed0ef31ad29cae5ea16b9e41529eb50a7fba70600008e9f8de6376d553"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6f16e21276074a12d8421692515b3fd6d2ea9c94fd0734c39a12960a20e85f3"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5280e68e7740c8c128d3ae5ab63335ce6d1fb6603d3b809637b11713487af9e6"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:583c57fc30cc1fec360e66323aadd7fc3edeec01289bfafc35d3b9dcb29495e4"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26351cc14507bdf466b5f99aba3df3143a59da75799bf64a53a3ad3155ecded9"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829df14d656b3fb87e50ae8b48253a8851c707da9f30d45aacab2aa2ba2d614"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42a4bdcf7307b86cb863b2fb9bb55029b422d8f86276a50487982d99eed7c6e"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04d461ad0aebf696f8da13c99bc1b3e06f66ecf6cfd56254cc402f6385231c06"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6375923c5f19888c9226582a124b77b622f8fd0018b843c45eeb19d9701c403"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cec323a853c24fd0472517113768c92ae0be8f8c384ef4441d3632da8baa646"}, + {file = "jiter-0.5.0-cp311-none-win32.whl", hash = "sha256:aa1db0967130b5cab63dfe4d6ff547c88b2a394c3410db64744d491df7f069bb"}, + {file = "jiter-0.5.0-cp311-none-win_amd64.whl", hash = "sha256:aa9d2b85b2ed7dc7697597dcfaac66e63c1b3028652f751c81c65a9f220899ae"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9f664e7351604f91dcdd557603c57fc0d551bc65cc0a732fdacbf73ad335049a"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:044f2f1148b5248ad2c8c3afb43430dccf676c5a5834d2f5089a4e6c5bbd64df"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:702e3520384c88b6e270c55c772d4bd6d7b150608dcc94dea87ceba1b6391248"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:528d742dcde73fad9d63e8242c036ab4a84389a56e04efd854062b660f559544"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cf80e5fe6ab582c82f0c3331df27a7e1565e2dcf06265afd5173d809cdbf9ba"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44dfc9ddfb9b51a5626568ef4e55ada462b7328996294fe4d36de02fce42721f"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c451f7922992751a936b96c5f5b9bb9312243d9b754c34b33d0cb72c84669f4e"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:308fce789a2f093dca1ff91ac391f11a9f99c35369117ad5a5c6c4903e1b3e3a"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7f5ad4a7c6b0d90776fdefa294f662e8a86871e601309643de30bf94bb93a64e"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea189db75f8eca08807d02ae27929e890c7d47599ce3d0a6a5d41f2419ecf338"}, + {file = "jiter-0.5.0-cp312-none-win32.whl", hash = "sha256:e3bbe3910c724b877846186c25fe3c802e105a2c1fc2b57d6688b9f8772026e4"}, + {file = "jiter-0.5.0-cp312-none-win_amd64.whl", hash = "sha256:a586832f70c3f1481732919215f36d41c59ca080fa27a65cf23d9490e75b2ef5"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f04bc2fc50dc77be9d10f73fcc4e39346402ffe21726ff41028f36e179b587e6"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6f433a4169ad22fcb550b11179bb2b4fd405de9b982601914ef448390b2954f3"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad4a6398c85d3a20067e6c69890ca01f68659da94d74c800298581724e426c7e"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6baa88334e7af3f4d7a5c66c3a63808e5efbc3698a1c57626541ddd22f8e4fbf"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ece0a115c05efca597c6d938f88c9357c843f8c245dbbb53361a1c01afd7148"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:335942557162ad372cc367ffaf93217117401bf930483b4b3ebdb1223dbddfa7"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649b0ee97a6e6da174bffcb3c8c051a5935d7d4f2f52ea1583b5b3e7822fbf14"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4be354c5de82157886ca7f5925dbda369b77344b4b4adf2723079715f823989"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5206144578831a6de278a38896864ded4ed96af66e1e63ec5dd7f4a1fce38a3a"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8120c60f8121ac3d6f072b97ef0e71770cc72b3c23084c72c4189428b1b1d3b6"}, + {file = "jiter-0.5.0-cp38-none-win32.whl", hash = "sha256:6f1223f88b6d76b519cb033a4d3687ca157c272ec5d6015c322fc5b3074d8a5e"}, + {file = "jiter-0.5.0-cp38-none-win_amd64.whl", hash = "sha256:c59614b225d9f434ea8fc0d0bec51ef5fa8c83679afedc0433905994fb36d631"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0af3838cfb7e6afee3f00dc66fa24695199e20ba87df26e942820345b0afc566"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:550b11d669600dbc342364fd4adbe987f14d0bbedaf06feb1b983383dcc4b961"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:489875bf1a0ffb3cb38a727b01e6673f0f2e395b2aad3c9387f94187cb214bbf"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b250ca2594f5599ca82ba7e68785a669b352156260c5362ea1b4e04a0f3e2389"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ea18e01f785c6667ca15407cd6dabbe029d77474d53595a189bdc813347218e"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:462a52be85b53cd9bffd94e2d788a09984274fe6cebb893d6287e1c296d50653"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92cc68b48d50fa472c79c93965e19bd48f40f207cb557a8346daa020d6ba973b"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1c834133e59a8521bc87ebcad773608c6fa6ab5c7a022df24a45030826cf10bc"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab3a71ff31cf2d45cb216dc37af522d335211f3a972d2fe14ea99073de6cb104"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cccd3af9c48ac500c95e1bcbc498020c87e1781ff0345dd371462d67b76643eb"}, + {file = "jiter-0.5.0-cp39-none-win32.whl", hash = "sha256:368084d8d5c4fc40ff7c3cc513c4f73e02c85f6009217922d0823a48ee7adf61"}, + {file = "jiter-0.5.0-cp39-none-win_amd64.whl", hash = "sha256:ce03f7b4129eb72f1687fa11300fbf677b02990618428934662406d2a76742a1"}, + {file = "jiter-0.5.0.tar.gz", hash = "sha256:1d916ba875bcab5c5f7d927df998c4cb694d27dceddf3392e58beaf10563368a"}, +] + [[package]] name = "marshmallow" version = "3.21.3" @@ -588,44 +678,44 @@ files = [ [[package]] name = "mypy" -version = "1.10.1" +version = "1.11.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, + {file = "mypy-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a32fc80b63de4b5b3e65f4be82b4cfa362a46702672aa6a0f443b4689af7008c"}, + {file = "mypy-1.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1952f5ea8a5a959b05ed5f16452fddadbaae48b5d39235ab4c3fc444d5fd411"}, + {file = "mypy-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1e30dc3bfa4e157e53c1d17a0dad20f89dc433393e7702b813c10e200843b03"}, + {file = "mypy-1.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2c63350af88f43a66d3dfeeeb8d77af34a4f07d760b9eb3a8697f0386c7590b4"}, + {file = "mypy-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:a831671bad47186603872a3abc19634f3011d7f83b083762c942442d51c58d58"}, + {file = "mypy-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7b6343d338390bb946d449677726edf60102a1c96079b4f002dedff375953fc5"}, + {file = "mypy-1.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4fe9f4e5e521b458d8feb52547f4bade7ef8c93238dfb5bbc790d9ff2d770ca"}, + {file = "mypy-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:886c9dbecc87b9516eff294541bf7f3655722bf22bb898ee06985cd7269898de"}, + {file = "mypy-1.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fca4a60e1dd9fd0193ae0067eaeeb962f2d79e0d9f0f66223a0682f26ffcc809"}, + {file = "mypy-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:0bd53faf56de9643336aeea1c925012837432b5faf1701ccca7fde70166ccf72"}, + {file = "mypy-1.11.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f39918a50f74dc5969807dcfaecafa804fa7f90c9d60506835036cc1bc891dc8"}, + {file = "mypy-1.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bc71d1fb27a428139dd78621953effe0d208aed9857cb08d002280b0422003a"}, + {file = "mypy-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b868d3bcff720dd7217c383474008ddabaf048fad8d78ed948bb4b624870a417"}, + {file = "mypy-1.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a707ec1527ffcdd1c784d0924bf5cb15cd7f22683b919668a04d2b9c34549d2e"}, + {file = "mypy-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:64f4a90e3ea07f590c5bcf9029035cf0efeae5ba8be511a8caada1a4893f5525"}, + {file = "mypy-1.11.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:749fd3213916f1751fff995fccf20c6195cae941dc968f3aaadf9bb4e430e5a2"}, + {file = "mypy-1.11.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b639dce63a0b19085213ec5fdd8cffd1d81988f47a2dec7100e93564f3e8fb3b"}, + {file = "mypy-1.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c956b49c5d865394d62941b109728c5c596a415e9c5b2be663dd26a1ff07bc0"}, + {file = "mypy-1.11.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45df906e8b6804ef4b666af29a87ad9f5921aad091c79cc38e12198e220beabd"}, + {file = "mypy-1.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:d44be7551689d9d47b7abc27c71257adfdb53f03880841a5db15ddb22dc63edb"}, + {file = "mypy-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2684d3f693073ab89d76da8e3921883019ea8a3ec20fa5d8ecca6a2db4c54bbe"}, + {file = "mypy-1.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79c07eb282cb457473add5052b63925e5cc97dfab9812ee65a7c7ab5e3cb551c"}, + {file = "mypy-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11965c2f571ded6239977b14deebd3f4c3abd9a92398712d6da3a772974fad69"}, + {file = "mypy-1.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a2b43895a0f8154df6519706d9bca8280cda52d3d9d1514b2d9c3e26792a0b74"}, + {file = "mypy-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:1a81cf05975fd61aec5ae16501a091cfb9f605dc3e3c878c0da32f250b74760b"}, + {file = "mypy-1.11.1-py3-none-any.whl", hash = "sha256:0624bdb940255d2dd24e829d99a13cfeb72e4e9031f9492148f410ed30bcab54"}, + {file = "mypy-1.11.1.tar.gz", hash = "sha256:f404a0b069709f18bbdb702eb3dcfe51910602995de00bd39cea3050b5772d08"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -646,139 +736,146 @@ files = [ [[package]] name = "numpy" -version = "2.0.0" +version = "2.0.1" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787"}, - {file = "numpy-2.0.0-cp310-cp310-win32.whl", hash = "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98"}, - {file = "numpy-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f"}, - {file = "numpy-2.0.0-cp311-cp311-win32.whl", hash = "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f"}, - {file = "numpy-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"}, - {file = "numpy-2.0.0-cp312-cp312-win32.whl", hash = "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54"}, - {file = "numpy-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44"}, - {file = "numpy-2.0.0-cp39-cp39-win32.whl", hash = "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275"}, - {file = "numpy-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9"}, - {file = "numpy-2.0.0.tar.gz", hash = "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864"}, + {file = "numpy-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fbb536eac80e27a2793ffd787895242b7f18ef792563d742c2d673bfcb75134"}, + {file = "numpy-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:69ff563d43c69b1baba77af455dd0a839df8d25e8590e79c90fcbe1499ebde42"}, + {file = "numpy-2.0.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:1b902ce0e0a5bb7704556a217c4f63a7974f8f43e090aff03fcf262e0b135e02"}, + {file = "numpy-2.0.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:f1659887361a7151f89e79b276ed8dff3d75877df906328f14d8bb40bb4f5101"}, + {file = "numpy-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4658c398d65d1b25e1760de3157011a80375da861709abd7cef3bad65d6543f9"}, + {file = "numpy-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4127d4303b9ac9f94ca0441138acead39928938660ca58329fe156f84b9f3015"}, + {file = "numpy-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e5eeca8067ad04bc8a2a8731183d51d7cbaac66d86085d5f4766ee6bf19c7f87"}, + {file = "numpy-2.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9adbd9bb520c866e1bfd7e10e1880a1f7749f1f6e5017686a5fbb9b72cf69f82"}, + {file = "numpy-2.0.1-cp310-cp310-win32.whl", hash = "sha256:7b9853803278db3bdcc6cd5beca37815b133e9e77ff3d4733c247414e78eb8d1"}, + {file = "numpy-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81b0893a39bc5b865b8bf89e9ad7807e16717f19868e9d234bdaf9b1f1393868"}, + {file = "numpy-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75b4e316c5902d8163ef9d423b1c3f2f6252226d1aa5cd8a0a03a7d01ffc6268"}, + {file = "numpy-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6e4eeb6eb2fced786e32e6d8df9e755ce5be920d17f7ce00bc38fcde8ccdbf9e"}, + {file = "numpy-2.0.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:a1e01dcaab205fbece13c1410253a9eea1b1c9b61d237b6fa59bcc46e8e89343"}, + {file = "numpy-2.0.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:a8fc2de81ad835d999113ddf87d1ea2b0f4704cbd947c948d2f5513deafe5a7b"}, + {file = "numpy-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a3d94942c331dd4e0e1147f7a8699a4aa47dffc11bf8a1523c12af8b2e91bbe"}, + {file = "numpy-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15eb4eca47d36ec3f78cde0a3a2ee24cf05ca7396ef808dda2c0ddad7c2bde67"}, + {file = "numpy-2.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b83e16a5511d1b1f8a88cbabb1a6f6a499f82c062a4251892d9ad5d609863fb7"}, + {file = "numpy-2.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f87fec1f9bc1efd23f4227becff04bd0e979e23ca50cc92ec88b38489db3b55"}, + {file = "numpy-2.0.1-cp311-cp311-win32.whl", hash = "sha256:36d3a9405fd7c511804dc56fc32974fa5533bdeb3cd1604d6b8ff1d292b819c4"}, + {file = "numpy-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:08458fbf403bff5e2b45f08eda195d4b0c9b35682311da5a5a0a0925b11b9bd8"}, + {file = "numpy-2.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bf4e6f4a2a2e26655717a1983ef6324f2664d7011f6ef7482e8c0b3d51e82ac"}, + {file = "numpy-2.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6fddc5fe258d3328cd8e3d7d3e02234c5d70e01ebe377a6ab92adb14039cb4"}, + {file = "numpy-2.0.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5daab361be6ddeb299a918a7c0864fa8618af66019138263247af405018b04e1"}, + {file = "numpy-2.0.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:ea2326a4dca88e4a274ba3a4405eb6c6467d3ffbd8c7d38632502eaae3820587"}, + {file = "numpy-2.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529af13c5f4b7a932fb0e1911d3a75da204eff023ee5e0e79c1751564221a5c8"}, + {file = "numpy-2.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6790654cb13eab303d8402354fabd47472b24635700f631f041bd0b65e37298a"}, + {file = "numpy-2.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cbab9fc9c391700e3e1287666dfd82d8666d10e69a6c4a09ab97574c0b7ee0a7"}, + {file = "numpy-2.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:99d0d92a5e3613c33a5f01db206a33f8fdf3d71f2912b0de1739894668b7a93b"}, + {file = "numpy-2.0.1-cp312-cp312-win32.whl", hash = "sha256:173a00b9995f73b79eb0191129f2455f1e34c203f559dd118636858cc452a1bf"}, + {file = "numpy-2.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:bb2124fdc6e62baae159ebcfa368708867eb56806804d005860b6007388df171"}, + {file = "numpy-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bfc085b28d62ff4009364e7ca34b80a9a080cbd97c2c0630bb5f7f770dae9414"}, + {file = "numpy-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fae4ebbf95a179c1156fab0b142b74e4ba4204c87bde8d3d8b6f9c34c5825ef"}, + {file = "numpy-2.0.1-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:72dc22e9ec8f6eaa206deb1b1355eb2e253899d7347f5e2fae5f0af613741d06"}, + {file = "numpy-2.0.1-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:ec87f5f8aca726117a1c9b7083e7656a9d0d606eec7299cc067bb83d26f16e0c"}, + {file = "numpy-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f682ea61a88479d9498bf2091fdcd722b090724b08b31d63e022adc063bad59"}, + {file = "numpy-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8efc84f01c1cd7e34b3fb310183e72fcdf55293ee736d679b6d35b35d80bba26"}, + {file = "numpy-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3fdabe3e2a52bc4eff8dc7a5044342f8bd9f11ef0934fcd3289a788c0eb10018"}, + {file = "numpy-2.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:24a0e1befbfa14615b49ba9659d3d8818a0f4d8a1c5822af8696706fbda7310c"}, + {file = "numpy-2.0.1-cp39-cp39-win32.whl", hash = "sha256:f9cf5ea551aec449206954b075db819f52adc1638d46a6738253a712d553c7b4"}, + {file = "numpy-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:e9e81fa9017eaa416c056e5d9e71be93d05e2c3c2ab308d23307a8bc4443c368"}, + {file = "numpy-2.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:61728fba1e464f789b11deb78a57805c70b2ed02343560456190d0501ba37b0f"}, + {file = "numpy-2.0.1-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:12f5d865d60fb9734e60a60f1d5afa6d962d8d4467c120a1c0cda6eb2964437d"}, + {file = "numpy-2.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eacf3291e263d5a67d8c1a581a8ebbcfd6447204ef58828caf69a5e3e8c75990"}, + {file = "numpy-2.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2c3a346ae20cfd80b6cfd3e60dc179963ef2ea58da5ec074fd3d9e7a1e7ba97f"}, + {file = "numpy-2.0.1.tar.gz", hash = "sha256:485b87235796410c3519a699cfe1faab097e509e90ebb05dcd098db2ae87e7b3"}, ] [[package]] name = "openai" -version = "1.35.10" +version = "1.40.3" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.35.10-py3-none-any.whl", hash = "sha256:962cb5c23224b5cbd16078308dabab97a08b0a5ad736a4fdb3dc2ffc44ac974f"}, - {file = "openai-1.35.10.tar.gz", hash = "sha256:85966949f4f960f3e4b239a659f9fd64d3a97ecc43c44dc0a044b5c7f11cccc6"}, + {file = "openai-1.40.3-py3-none-any.whl", hash = "sha256:09396cb6e2e15c921a5d872bf92841a60a9425da10dcd962b45fe7c4f48f8395"}, + {file = "openai-1.40.3.tar.gz", hash = "sha256:f2ffe907618240938c59d7ccc67dd01dc8c50be203c0077240db6758d2f02480"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tqdm = ">4" -typing-extensions = ">=4.7,<5" +typing-extensions = ">=4.11,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orjson" -version = "3.10.6" +version = "3.10.7" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.6-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:fb0ee33124db6eaa517d00890fc1a55c3bfe1cf78ba4a8899d71a06f2d6ff5c7"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c1c4b53b24a4c06547ce43e5fee6ec4e0d8fe2d597f4647fc033fd205707365"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eadc8fd310edb4bdbd333374f2c8fec6794bbbae99b592f448d8214a5e4050c0"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61272a5aec2b2661f4fa2b37c907ce9701e821b2c1285d5c3ab0207ebd358d38"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57985ee7e91d6214c837936dc1608f40f330a6b88bb13f5a57ce5257807da143"}, - {file = "orjson-3.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:633a3b31d9d7c9f02d49c4ab4d0a86065c4a6f6adc297d63d272e043472acab5"}, - {file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1c680b269d33ec444afe2bdc647c9eb73166fa47a16d9a75ee56a374f4a45f43"}, - {file = "orjson-3.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f759503a97a6ace19e55461395ab0d618b5a117e8d0fbb20e70cfd68a47327f2"}, - {file = "orjson-3.10.6-cp310-none-win32.whl", hash = "sha256:95a0cce17f969fb5391762e5719575217bd10ac5a189d1979442ee54456393f3"}, - {file = "orjson-3.10.6-cp310-none-win_amd64.whl", hash = "sha256:df25d9271270ba2133cc88ee83c318372bdc0f2cd6f32e7a450809a111efc45c"}, - {file = "orjson-3.10.6-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b1ec490e10d2a77c345def52599311849fc063ae0e67cf4f84528073152bb2ba"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d43d3feb8f19d07e9f01e5b9be4f28801cf7c60d0fa0d279951b18fae1932b"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3045267e98fe749408eee1593a142e02357c5c99be0802185ef2170086a863"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c27bc6a28ae95923350ab382c57113abd38f3928af3c80be6f2ba7eb8d8db0b0"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d27456491ca79532d11e507cadca37fb8c9324a3976294f68fb1eff2dc6ced5a"}, - {file = "orjson-3.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05ac3d3916023745aa3b3b388e91b9166be1ca02b7c7e41045da6d12985685f0"}, - {file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1335d4ef59ab85cab66fe73fd7a4e881c298ee7f63ede918b7faa1b27cbe5212"}, - {file = "orjson-3.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4bbc6d0af24c1575edc79994c20e1b29e6fb3c6a570371306db0993ecf144dc5"}, - {file = "orjson-3.10.6-cp311-none-win32.whl", hash = "sha256:450e39ab1f7694465060a0550b3f6d328d20297bf2e06aa947b97c21e5241fbd"}, - {file = "orjson-3.10.6-cp311-none-win_amd64.whl", hash = "sha256:227df19441372610b20e05bdb906e1742ec2ad7a66ac8350dcfd29a63014a83b"}, - {file = "orjson-3.10.6-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ea2977b21f8d5d9b758bb3f344a75e55ca78e3ff85595d248eee813ae23ecdfb"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6f3d167d13a16ed263b52dbfedff52c962bfd3d270b46b7518365bcc2121eed"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f710f346e4c44a4e8bdf23daa974faede58f83334289df80bc9cd12fe82573c7"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7275664f84e027dcb1ad5200b8b18373e9c669b2a9ec33d410c40f5ccf4b257e"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0943e4c701196b23c240b3d10ed8ecd674f03089198cf503105b474a4f77f21f"}, - {file = "orjson-3.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:446dee5a491b5bc7d8f825d80d9637e7af43f86a331207b9c9610e2f93fee22a"}, - {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:64c81456d2a050d380786413786b057983892db105516639cb5d3ee3c7fd5148"}, - {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:960db0e31c4e52fa0fc3ecbaea5b2d3b58f379e32a95ae6b0ebeaa25b93dfd34"}, - {file = "orjson-3.10.6-cp312-none-win32.whl", hash = "sha256:a6ea7afb5b30b2317e0bee03c8d34c8181bc5a36f2afd4d0952f378972c4efd5"}, - {file = "orjson-3.10.6-cp312-none-win_amd64.whl", hash = "sha256:874ce88264b7e655dde4aeaacdc8fd772a7962faadfb41abe63e2a4861abc3dc"}, - {file = "orjson-3.10.6-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:66680eae4c4e7fc193d91cfc1353ad6d01b4801ae9b5314f17e11ba55e934183"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caff75b425db5ef8e8f23af93c80f072f97b4fb3afd4af44482905c9f588da28"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3722fddb821b6036fd2a3c814f6bd9b57a89dc6337b9924ecd614ebce3271394"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2c116072a8533f2fec435fde4d134610f806bdac20188c7bd2081f3e9e0133f"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6eeb13218c8cf34c61912e9df2de2853f1d009de0e46ea09ccdf3d757896af0a"}, - {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965a916373382674e323c957d560b953d81d7a8603fbeee26f7b8248638bd48b"}, - {file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03c95484d53ed8e479cade8628c9cea00fd9d67f5554764a1110e0d5aa2de96e"}, - {file = "orjson-3.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e060748a04cccf1e0a6f2358dffea9c080b849a4a68c28b1b907f272b5127e9b"}, - {file = "orjson-3.10.6-cp38-none-win32.whl", hash = "sha256:738dbe3ef909c4b019d69afc19caf6b5ed0e2f1c786b5d6215fbb7539246e4c6"}, - {file = "orjson-3.10.6-cp38-none-win_amd64.whl", hash = "sha256:d40f839dddf6a7d77114fe6b8a70218556408c71d4d6e29413bb5f150a692ff7"}, - {file = "orjson-3.10.6-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:697a35a083c4f834807a6232b3e62c8b280f7a44ad0b759fd4dce748951e70db"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd502f96bf5ea9a61cbc0b2b5900d0dd68aa0da197179042bdd2be67e51a1e4b"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f215789fb1667cdc874c1b8af6a84dc939fd802bf293a8334fce185c79cd359b"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2debd8ddce948a8c0938c8c93ade191d2f4ba4649a54302a7da905a81f00b56"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5410111d7b6681d4b0d65e0f58a13be588d01b473822483f77f513c7f93bd3b2"}, - {file = "orjson-3.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb1f28a137337fdc18384079fa5726810681055b32b92253fa15ae5656e1dddb"}, - {file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bf2fbbce5fe7cd1aa177ea3eab2b8e6a6bc6e8592e4279ed3db2d62e57c0e1b2"}, - {file = "orjson-3.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:79b9b9e33bd4c517445a62b90ca0cc279b0f1f3970655c3df9e608bc3f91741a"}, - {file = "orjson-3.10.6-cp39-none-win32.whl", hash = "sha256:30b0a09a2014e621b1adf66a4f705f0809358350a757508ee80209b2d8dae219"}, - {file = "orjson-3.10.6-cp39-none-win_amd64.whl", hash = "sha256:49e3bc615652617d463069f91b867a4458114c5b104e13b7ae6872e5f79d0844"}, - {file = "orjson-3.10.6.tar.gz", hash = "sha256:e54b63d0a7c6c54a5f5f726bc93a2078111ef060fec4ecbf34c5db800ca3b3a7"}, + {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, + {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, + {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, + {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, + {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, + {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, + {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, + {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, + {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, + {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, + {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, + {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, + {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, + {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, + {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, + {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, + {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, + {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, + {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, + {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, ] [[package]] @@ -1153,62 +1250,64 @@ six = ">=1.5" [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] @@ -1311,13 +1410,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.4" +version = "4.66.5" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, - {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, + {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, + {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, ] [package.dependencies] @@ -1353,13 +1452,13 @@ files = [ [[package]] name = "types-pyyaml" -version = "6.0.12.20240311" +version = "6.0.12.20240808" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"}, - {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, + {file = "types-PyYAML-6.0.12.20240808.tar.gz", hash = "sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af"}, + {file = "types_PyYAML-6.0.12.20240808-py3-none-any.whl", hash = "sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35"}, ] [[package]] @@ -1378,13 +1477,13 @@ types-urllib3 = "*" [[package]] name = "types-requests" -version = "2.32.0.20240622" +version = "2.32.0.20240712" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240622.tar.gz", hash = "sha256:ed5e8a412fcc39159d6319385c009d642845f250c63902718f605cd90faade31"}, - {file = "types_requests-2.32.0.20240622-py3-none-any.whl", hash = "sha256:97bac6b54b5bd4cf91d407e62f0932a74821bc2211f22116d9ee1dd643826caf"}, + {file = "types-requests-2.32.0.20240712.tar.gz", hash = "sha256:90c079ff05e549f6bf50e02e910210b98b8ff1ebdd18e19c873cd237737c1358"}, + {file = "types_requests-2.32.0.20240712-py3-none-any.whl", hash = "sha256:f754283e152c752e46e70942fa2a146b5bc70393522257bb85bd1ef7e019dcc3"}, ] [package.dependencies] @@ -1511,43 +1610,46 @@ tests = ["Werkzeug (==2.0.3)", "aiohttp", "boto3", "httplib2", "httpx", "pytest" [[package]] name = "watchdog" -version = "4.0.1" +version = "4.0.2" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, - {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, - {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, - {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, - {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, - {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, - {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, - {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, - {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, - {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, - {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, - {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, - {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, - {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"}, + {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"}, + {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"}, + {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"}, + {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"}, ] [package.extras] diff --git a/python/tests/unit_tests/test_run_helpers.py b/python/tests/unit_tests/test_run_helpers.py index f749dc17a..d3451e88f 100644 --- a/python/tests/unit_tests/test_run_helpers.py +++ b/python/tests/unit_tests/test_run_helpers.py @@ -1383,10 +1383,22 @@ def _check_client(client: Client) -> None: _check_client(mock_client) + class Untruthy: + def __init__(self, val: Any) -> None: + self.val = val + + def __bool__(self) -> bool: + raise ValueError("I'm not truthy") + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Untruthy): + return self.val == other.val + return self.val == other + @traceable(process_inputs=process_inputs, process_outputs=process_outputs) async def amy_function(val: str, **kwargs: Any) -> int: assert not kwargs.get("val2") - return 42 + return Untruthy(42) # type: ignore mock_client = _get_mock_client() with tracing_context(enabled=True): @@ -1436,7 +1448,7 @@ def my_gen(val: str, **kwargs: Any) -> Generator[int, None, None]: ) async def amy_gen(val: str, **kwargs: Any) -> AsyncGenerator[int, None]: assert not kwargs.get("val2") - yield 42 + yield Untruthy(42) # type: ignore mock_client = _get_mock_client() with tracing_context(enabled=True): From e485d4a47c5179e5e22df9bb20c75dd4c3dfc117 Mon Sep 17 00:00:00 2001 From: jakerachleff Date: Thu, 15 Aug 2024 10:54:08 -0700 Subject: [PATCH 353/373] feat: schema validation in langsmith sdk (#922) --- python/langsmith/client.py | 24 ++++++-- python/langsmith/schemas.py | 15 ++--- python/tests/integration_tests/test_client.py | 60 +++++++++++++++++-- 3 files changed, 81 insertions(+), 18 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index cc3805695..82edecabf 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -2529,6 +2529,8 @@ def create_dataset( *, description: Optional[str] = None, data_type: ls_schemas.DataType = ls_schemas.DataType.kv, + inputs_schema: Optional[Dict[str, Any]] = None, + outputs_schema: Optional[Dict[str, Any]] = None, ) -> ls_schemas.Dataset: """Create a dataset in the LangSmith API. @@ -2546,18 +2548,28 @@ def create_dataset( Dataset The created dataset. """ - dataset = ls_schemas.DatasetCreate( - name=dataset_name, - description=description, - data_type=data_type, - ) + dataset: Dict[str, Any] = { + "name": dataset_name, + "data_type": data_type.value, + "created_at": datetime.datetime.now().isoformat(), + } + if description is not None: + dataset["description"] = description + + if inputs_schema is not None: + dataset["inputs_schema_definition"] = inputs_schema + + if outputs_schema is not None: + dataset["outputs_schema_definition"] = outputs_schema + response = self.request_with_retries( "POST", "/datasets", headers={**self._headers, "Content-Type": "application/json"}, - data=dataset.json(), + data=orjson.dumps(dataset), ) ls_utils.raise_for_status_with_text(response) + return ls_schemas.Dataset( **response.json(), _host_url=self._host_url, diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 1bf5787d9..c23b2b713 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -135,13 +135,6 @@ class Config: frozen = True -class DatasetCreate(DatasetBase): - """Dataset create model.""" - - id: Optional[UUID] = None - created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) - - class Dataset(DatasetBase): """Dataset ORM model.""" @@ -151,6 +144,8 @@ class Dataset(DatasetBase): example_count: Optional[int] = None session_count: Optional[int] = None last_session_start_time: Optional[datetime] = None + inputs_schema: Optional[Dict[str, Any]] = None + outputs_schema: Optional[Dict[str, Any]] = None _host_url: Optional[str] = PrivateAttr(default=None) _tenant_id: Optional[UUID] = PrivateAttr(default=None) _public_path: Optional[str] = PrivateAttr(default=None) @@ -163,6 +158,12 @@ def __init__( **kwargs: Any, ) -> None: """Initialize a Dataset object.""" + if "inputs_schema_definition" in kwargs: + kwargs["inputs_schema"] = kwargs.pop("inputs_schema_definition") + + if "outputs_schema_definition" in kwargs: + kwargs["outputs_schema"] = kwargs.pop("outputs_schema_definition") + super().__init__(**kwargs) self._host_url = _host_url self._tenant_id = _tenant_id diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index 89d57da26..22f5355c9 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -13,6 +13,7 @@ import pytest from freezegun import freeze_time +from pydantic import BaseModel from langsmith.client import ID_TYPE, Client from langsmith.schemas import DataType @@ -312,11 +313,7 @@ def test_error_surfaced_invalid_uri(monkeypatch: pytest.MonkeyPatch, uri: str) - client.create_run("My Run", inputs={"text": "hello world"}, run_type="llm") -def test_create_dataset( - monkeypatch: pytest.MonkeyPatch, langchain_client: Client -) -> None: - """Test persisting runs and adding feedback.""" - monkeypatch.setenv("LANGCHAIN_ENDPOINT", "https://dev.api.smith.langchain.com") +def test_create_dataset(langchain_client: Client) -> None: dataset_name = "__test_create_dataset" + uuid4().hex[:4] if langchain_client.has_dataset(dataset_name=dataset_name): langchain_client.delete_dataset(dataset_name=dataset_name) @@ -360,6 +357,59 @@ def test_create_dataset( langchain_client.delete_dataset(dataset_id=dataset.id) +def test_dataset_schema_validation(langchain_client: Client) -> None: + dataset_name = "__test_create_dataset" + uuid4().hex[:4] + if langchain_client.has_dataset(dataset_name=dataset_name): + langchain_client.delete_dataset(dataset_name=dataset_name) + + class InputSchema(BaseModel): + input: str + + class OutputSchema(BaseModel): + output: str + + dataset = langchain_client.create_dataset( + dataset_name, + data_type=DataType.kv, + inputs_schema=InputSchema.model_json_schema(), + outputs_schema=OutputSchema.model_json_schema(), + ) + + # confirm we store the schema from the create request + assert dataset.inputs_schema == InputSchema.model_json_schema() + assert dataset.outputs_schema == OutputSchema.model_json_schema() + + # create an example that matches the schema, which should succeed + langchain_client.create_example( + inputs={"input": "hello world"}, + outputs={"output": "hello"}, + dataset_id=dataset.id, + ) + + # create an example that does not match the input schema + with pytest.raises(LangSmithError): + langchain_client.create_example( + inputs={"john": 1}, + outputs={"output": "hello"}, + dataset_id=dataset.id, + ) + + # create an example that does not match the output schema + with pytest.raises(LangSmithError): + langchain_client.create_example( + inputs={"input": "hello world"}, + outputs={"john": 1}, + dataset_id=dataset.id, + ) + + # assert read API includes the schema definition + read_dataset = langchain_client.read_dataset(dataset_id=dataset.id) + assert read_dataset.inputs_schema == InputSchema.model_json_schema() + assert read_dataset.outputs_schema == OutputSchema.model_json_schema() + + langchain_client.delete_dataset(dataset_id=dataset.id) + + @freeze_time("2023-01-01") def test_list_datasets(langchain_client: Client) -> None: ds1n = "__test_list_datasets1" + uuid4().hex[:4] From 3f1a2778d4104d5fc7449a72935ca56df777fc61 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 13:35:10 -0700 Subject: [PATCH 354/373] python: add Client.search_examples() --- python/langsmith/_expect.py | 6 ++- python/langsmith/_internal/_aiter.py | 6 ++- python/langsmith/_testing.py | 6 ++- python/langsmith/client.py | 57 +++++++++++++++++++++++++++- python/langsmith/run_helpers.py | 6 ++- python/langsmith/schemas.py | 4 ++ 6 files changed, 75 insertions(+), 10 deletions(-) diff --git a/python/langsmith/_expect.py b/python/langsmith/_expect.py index 967390597..3b69deb95 100644 --- a/python/langsmith/_expect.py +++ b/python/langsmith/_expect.py @@ -410,10 +410,12 @@ def score( ## Private Methods @overload - def __call__(self, value: Any, /) -> _Matcher: ... + def __call__(self, value: Any, /) -> _Matcher: + ... @overload - def __call__(self, /, *, client: ls_client.Client) -> _Expect: ... + def __call__(self, /, *, client: ls_client.Client) -> _Expect: + ... def __call__( self, diff --git a/python/langsmith/_internal/_aiter.py b/python/langsmith/_internal/_aiter.py index 7ae217f68..e359f28b9 100644 --- a/python/langsmith/_internal/_aiter.py +++ b/python/langsmith/_internal/_aiter.py @@ -185,10 +185,12 @@ def __len__(self) -> int: return len(self._children) @overload - def __getitem__(self, item: int) -> AsyncIterator[T]: ... + def __getitem__(self, item: int) -> AsyncIterator[T]: + ... @overload - def __getitem__(self, item: slice) -> Tuple[AsyncIterator[T], ...]: ... + def __getitem__(self, item: slice) -> Tuple[AsyncIterator[T], ...]: + ... def __getitem__( self, item: Union[int, slice] diff --git a/python/langsmith/_testing.py b/python/langsmith/_testing.py index 3d5ac9c3b..20da5a39a 100644 --- a/python/langsmith/_testing.py +++ b/python/langsmith/_testing.py @@ -41,7 +41,8 @@ class SkipException(Exception): # type: ignore[no-redef] @overload def test( func: Callable, -) -> Callable: ... +) -> Callable: + ... @overload @@ -51,7 +52,8 @@ def test( output_keys: Optional[Sequence[str]] = None, client: Optional[ls_client.Client] = None, test_suite_name: Optional[str] = None, -) -> Callable[[Callable], Callable]: ... +) -> Callable[[Callable], Callable]: + ... def test(*args: Any, **kwargs: Any) -> Callable: diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 82edecabf..f073835e4 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -416,13 +416,15 @@ def _as_uuid(value: ID_TYPE, var: Optional[str] = None) -> uuid.UUID: @typing.overload -def _ensure_uuid(value: Optional[Union[str, uuid.UUID]]) -> uuid.UUID: ... +def _ensure_uuid(value: Optional[Union[str, uuid.UUID]]) -> uuid.UUID: + ... @typing.overload def _ensure_uuid( value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = True -) -> Optional[uuid.UUID]: ... +) -> Optional[uuid.UUID]: + ... def _ensure_uuid(value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = False): @@ -3412,6 +3414,57 @@ def list_examples( if limit is not None and i + 1 >= limit: break + @ls_utils.xor_args(("dataset_name", "dataset_id")) + def search_examples( + self, + query: dict, + /, + limit: int, + dataset_id: Optional[ID_TYPE] = None, + dataset_name: Optional[str] = None, + **kwargs: Any, + ) -> List[ls_schemas.ExampleBase]: + """Retrieve the dataset examples whose inputs best match the query. + + **Note**: Must have few-shot indexing enabled for the dataset. See (TODO) method + for how to enable indexing. + + Args: + query (dict): The query to search against. Must be JSON serializable. + limit (int): The maximum number of examples to return. + dataset_id (UUID, optional): The ID of the dataset to filter by. + Defaults to None. Must specify one of ``dataset_id`` or + ``dataset_name``. + dataset_name (str, optional): The name of the dataset to filter by. + Defaults to None. Must specify one of ``dataset_id`` or + ``dataset_name``. + kwargs (Any): Additional keyword args to pass as part of request body. + + Returns: + List of ExampleSearch. + """ + if dataset_id is None: + dataset_id = self.read_dataset(dataset_name=dataset_name).id + dataset_id = _as_uuid(dataset_id, "dataset_id") + few_shot_resp = self.request_with_retries( + "POST", + f"/datasets/{dataset_id}/search", + headers=self._headers, + data=json.dumps({"inputs": query, "limit": limit, **kwargs}), + ) + ls_utils.raise_for_status_with_text(few_shot_resp) + examples = [] + for res in few_shot_resp.json()["examples"]: + examples.append( + ls_schemas.ExampleSearch( + **res, + dataset_id=dataset_id, + _host_url=self._host_url, + _tenant_id=self._get_optional_tenant_id(), + ) + ) + return examples + def update_example( self, example_id: ID_TYPE, diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 05f2534fe..6d72f8568 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -232,7 +232,8 @@ def __call__( @overload def traceable( func: Callable[P, R], -) -> SupportsLangsmithExtra[P, R]: ... +) -> SupportsLangsmithExtra[P, R]: + ... @overload @@ -248,7 +249,8 @@ def traceable( process_inputs: Optional[Callable[[dict], dict]] = None, process_outputs: Optional[Callable[..., dict]] = None, _invocation_params_fn: Optional[Callable[[dict], dict]] = None, -) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: ... +) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: + ... def traceable( diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index c23b2b713..60085ea00 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -98,6 +98,10 @@ def url(self) -> Optional[str]: return f"{self._host_url}{path}" return None +class ExampleSearch(ExampleBase): + """Example returned via search.""" + id: UUID + class ExampleUpdate(BaseModel): """Update class for Example.""" From 678d61a60db787d53668401446ab5ef49c8d2be1 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 13:37:28 -0700 Subject: [PATCH 355/373] fmt --- python/langsmith/client.py | 8 ++++---- python/langsmith/schemas.py | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index f073835e4..69c8d9f5a 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3446,18 +3446,18 @@ def search_examples( if dataset_id is None: dataset_id = self.read_dataset(dataset_name=dataset_name).id dataset_id = _as_uuid(dataset_id, "dataset_id") - few_shot_resp = self.request_with_retries( + resp = self.request_with_retries( "POST", f"/datasets/{dataset_id}/search", headers=self._headers, data=json.dumps({"inputs": query, "limit": limit, **kwargs}), ) - ls_utils.raise_for_status_with_text(few_shot_resp) + ls_utils.raise_for_status_with_text(resp) examples = [] - for res in few_shot_resp.json()["examples"]: + for ex in resp.json()["examples"]: examples.append( ls_schemas.ExampleSearch( - **res, + **ex, dataset_id=dataset_id, _host_url=self._host_url, _tenant_id=self._get_optional_tenant_id(), diff --git a/python/langsmith/schemas.py b/python/langsmith/schemas.py index 60085ea00..3da1d4650 100644 --- a/python/langsmith/schemas.py +++ b/python/langsmith/schemas.py @@ -98,8 +98,10 @@ def url(self) -> Optional[str]: return f"{self._host_url}{path}" return None + class ExampleSearch(ExampleBase): """Example returned via search.""" + id: UUID From d9c2272ca786d274abb42094a3b2820f1c7b5ae7 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 13:43:25 -0700 Subject: [PATCH 356/373] fmt --- python/langsmith/client.py | 18 ++++++------------ python/langsmith/evaluation/__init__.py | 2 +- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 69c8d9f5a..7d62c2b45 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3414,43 +3414,38 @@ def list_examples( if limit is not None and i + 1 >= limit: break - @ls_utils.xor_args(("dataset_name", "dataset_id")) + # dataset_name explicitly not supported to avoid extra API calls. def search_examples( self, - query: dict, + inputs: dict, /, limit: int, dataset_id: Optional[ID_TYPE] = None, - dataset_name: Optional[str] = None, **kwargs: Any, - ) -> List[ls_schemas.ExampleBase]: + ) -> List[ls_schemas.ExampleSearch]: """Retrieve the dataset examples whose inputs best match the query. **Note**: Must have few-shot indexing enabled for the dataset. See (TODO) method for how to enable indexing. Args: - query (dict): The query to search against. Must be JSON serializable. + inputs (dict): The inputs to use as a search query. Must match the dataset + input schema. Must be JSON serializable. limit (int): The maximum number of examples to return. dataset_id (UUID, optional): The ID of the dataset to filter by. Defaults to None. Must specify one of ``dataset_id`` or ``dataset_name``. - dataset_name (str, optional): The name of the dataset to filter by. - Defaults to None. Must specify one of ``dataset_id`` or - ``dataset_name``. kwargs (Any): Additional keyword args to pass as part of request body. Returns: List of ExampleSearch. """ - if dataset_id is None: - dataset_id = self.read_dataset(dataset_name=dataset_name).id dataset_id = _as_uuid(dataset_id, "dataset_id") resp = self.request_with_retries( "POST", f"/datasets/{dataset_id}/search", headers=self._headers, - data=json.dumps({"inputs": query, "limit": limit, **kwargs}), + data=json.dumps({"inputs": inputs, "limit": limit, **kwargs}), ) ls_utils.raise_for_status_with_text(resp) examples = [] @@ -3460,7 +3455,6 @@ def search_examples( **ex, dataset_id=dataset_id, _host_url=self._host_url, - _tenant_id=self._get_optional_tenant_id(), ) ) return examples diff --git a/python/langsmith/evaluation/__init__.py b/python/langsmith/evaluation/__init__.py index 253732cfc..f7a51eb9c 100644 --- a/python/langsmith/evaluation/__init__.py +++ b/python/langsmith/evaluation/__init__.py @@ -1,6 +1,6 @@ """Evaluation Helpers.""" -from typing import TYPE_CHECKING, Any, List +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from typing import List From 4f83908129cad1083d783068169835029695b9bf Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 13:45:43 -0700 Subject: [PATCH 357/373] fmt --- python/langsmith/client.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 7d62c2b45..20e49e95a 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3420,7 +3420,7 @@ def search_examples( inputs: dict, /, limit: int, - dataset_id: Optional[ID_TYPE] = None, + dataset_id: ID_TYPE, **kwargs: Any, ) -> List[ls_schemas.ExampleSearch]: """Retrieve the dataset examples whose inputs best match the query. @@ -3433,8 +3433,6 @@ def search_examples( input schema. Must be JSON serializable. limit (int): The maximum number of examples to return. dataset_id (UUID, optional): The ID of the dataset to filter by. - Defaults to None. Must specify one of ``dataset_id`` or - ``dataset_name``. kwargs (Any): Additional keyword args to pass as part of request body. Returns: From b16a39f9b438c725391c6c002f9b32794dc5a332 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 13:50:33 -0700 Subject: [PATCH 358/373] fmt --- python/langsmith/_expect.py | 6 ++---- python/langsmith/_internal/_aiter.py | 6 ++---- python/langsmith/_testing.py | 6 ++---- python/langsmith/client.py | 14 +++----------- python/langsmith/run_helpers.py | 6 ++---- 5 files changed, 11 insertions(+), 27 deletions(-) diff --git a/python/langsmith/_expect.py b/python/langsmith/_expect.py index 3b69deb95..967390597 100644 --- a/python/langsmith/_expect.py +++ b/python/langsmith/_expect.py @@ -410,12 +410,10 @@ def score( ## Private Methods @overload - def __call__(self, value: Any, /) -> _Matcher: - ... + def __call__(self, value: Any, /) -> _Matcher: ... @overload - def __call__(self, /, *, client: ls_client.Client) -> _Expect: - ... + def __call__(self, /, *, client: ls_client.Client) -> _Expect: ... def __call__( self, diff --git a/python/langsmith/_internal/_aiter.py b/python/langsmith/_internal/_aiter.py index e359f28b9..7ae217f68 100644 --- a/python/langsmith/_internal/_aiter.py +++ b/python/langsmith/_internal/_aiter.py @@ -185,12 +185,10 @@ def __len__(self) -> int: return len(self._children) @overload - def __getitem__(self, item: int) -> AsyncIterator[T]: - ... + def __getitem__(self, item: int) -> AsyncIterator[T]: ... @overload - def __getitem__(self, item: slice) -> Tuple[AsyncIterator[T], ...]: - ... + def __getitem__(self, item: slice) -> Tuple[AsyncIterator[T], ...]: ... def __getitem__( self, item: Union[int, slice] diff --git a/python/langsmith/_testing.py b/python/langsmith/_testing.py index 20da5a39a..3d5ac9c3b 100644 --- a/python/langsmith/_testing.py +++ b/python/langsmith/_testing.py @@ -41,8 +41,7 @@ class SkipException(Exception): # type: ignore[no-redef] @overload def test( func: Callable, -) -> Callable: - ... +) -> Callable: ... @overload @@ -52,8 +51,7 @@ def test( output_keys: Optional[Sequence[str]] = None, client: Optional[ls_client.Client] = None, test_suite_name: Optional[str] = None, -) -> Callable[[Callable], Callable]: - ... +) -> Callable[[Callable], Callable]: ... def test(*args: Any, **kwargs: Any) -> Callable: diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 20e49e95a..2ffc1322d 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -416,15 +416,13 @@ def _as_uuid(value: ID_TYPE, var: Optional[str] = None) -> uuid.UUID: @typing.overload -def _ensure_uuid(value: Optional[Union[str, uuid.UUID]]) -> uuid.UUID: - ... +def _ensure_uuid(value: Optional[Union[str, uuid.UUID]]) -> uuid.UUID: ... @typing.overload def _ensure_uuid( value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = True -) -> Optional[uuid.UUID]: - ... +) -> Optional[uuid.UUID]: ... def _ensure_uuid(value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = False): @@ -3448,13 +3446,7 @@ def search_examples( ls_utils.raise_for_status_with_text(resp) examples = [] for ex in resp.json()["examples"]: - examples.append( - ls_schemas.ExampleSearch( - **ex, - dataset_id=dataset_id, - _host_url=self._host_url, - ) - ) + examples.append(ls_schemas.ExampleSearch(**ex, dataset_id=dataset_id)) return examples def update_example( diff --git a/python/langsmith/run_helpers.py b/python/langsmith/run_helpers.py index 6d72f8568..05f2534fe 100644 --- a/python/langsmith/run_helpers.py +++ b/python/langsmith/run_helpers.py @@ -232,8 +232,7 @@ def __call__( @overload def traceable( func: Callable[P, R], -) -> SupportsLangsmithExtra[P, R]: - ... +) -> SupportsLangsmithExtra[P, R]: ... @overload @@ -249,8 +248,7 @@ def traceable( process_inputs: Optional[Callable[[dict], dict]] = None, process_outputs: Optional[Callable[..., dict]] = None, _invocation_params_fn: Optional[Callable[[dict], dict]] = None, -) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: - ... +) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: ... def traceable( From 61f6df7a885e54cf9659d070c275927fe331e665 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 13:52:50 -0700 Subject: [PATCH 359/373] fmt --- python/langsmith/evaluation/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/langsmith/evaluation/__init__.py b/python/langsmith/evaluation/__init__.py index f7a51eb9c..253732cfc 100644 --- a/python/langsmith/evaluation/__init__.py +++ b/python/langsmith/evaluation/__init__.py @@ -1,6 +1,6 @@ """Evaluation Helpers.""" -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, List if TYPE_CHECKING: from typing import List From c5acf462e49fd83134cd003fa8715aeec25e0e1d Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 13:55:00 -0700 Subject: [PATCH 360/373] fmt --- python/langsmith/client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 2ffc1322d..fc1a13638 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3417,6 +3417,7 @@ def search_examples( self, inputs: dict, /, + *, limit: int, dataset_id: ID_TYPE, **kwargs: Any, From 33c111419a89a78df75d1e47adf6c6b815f68b78 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 14:04:24 -0700 Subject: [PATCH 361/373] fmt --- python/langsmith/client.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index fc1a13638..9d8aa075b 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3413,7 +3413,7 @@ def list_examples( break # dataset_name explicitly not supported to avoid extra API calls. - def search_examples( + def similar_examples( self, inputs: dict, /, @@ -3422,7 +3422,7 @@ def search_examples( dataset_id: ID_TYPE, **kwargs: Any, ) -> List[ls_schemas.ExampleSearch]: - """Retrieve the dataset examples whose inputs best match the query. + """Retrieve the dataset examples whose inputs best match the current inputs. **Note**: Must have few-shot indexing enabled for the dataset. See (TODO) method for how to enable indexing. @@ -3436,6 +3436,27 @@ def search_examples( Returns: List of ExampleSearch. + + Example: + .. code-block:: python + + from langsmith import Client + + client = Client() + client.similar_examples( + {"question": "When would i use the runnable generator"}, + limit=3, + dataset_id="...", + ) + + .. code-block:: pycon + + [ + ExampleSearch(dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40'), inputs={'question': 'How do I cache a Chat model? What caches can I use?'}, outputs={'answer': 'You can use LangChain\'s caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you\'re often requesting the same completion multiple times, and speed up your application.\n\n```python\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\')\n\n```\n\nYou can also use SQLite Cache which uses a SQLite database:\n\n```python\n rm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=".langchain.db")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\') \n```\n'}, metadata=None, id=UUID('b2ddd1c4-dff6-49ae-8544-f48e39053398')), + ExampleSearch(dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40'), inputs={'question': "What's a runnable lambda?"}, outputs={'answer': "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."}, metadata=None, id=UUID('f94104a7-2434-4ba7-8293-6a283f4860b4')), + ExampleSearch(dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40'), inputs={'question': 'Show me how to use RecursiveURLLoader'}, outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\n```python\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n```\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'}, metadata=None, id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c')), + ] + """ dataset_id = _as_uuid(dataset_id, "dataset_id") resp = self.request_with_retries( From ba5e64b6546e04957cd1f8970dd62b3854488bd9 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 14:05:22 -0700 Subject: [PATCH 362/373] fmt --- python/langsmith/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 9d8aa075b..a87889995 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3422,7 +3422,7 @@ def similar_examples( dataset_id: ID_TYPE, **kwargs: Any, ) -> List[ls_schemas.ExampleSearch]: - """Retrieve the dataset examples whose inputs best match the current inputs. + r"""Retrieve the dataset examples whose inputs best match the current inputs. **Note**: Must have few-shot indexing enabled for the dataset. See (TODO) method for how to enable indexing. @@ -3457,7 +3457,7 @@ def similar_examples( ExampleSearch(dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40'), inputs={'question': 'Show me how to use RecursiveURLLoader'}, outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\n```python\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n```\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'}, metadata=None, id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c')), ] - """ + """ # noqa: E501 dataset_id = _as_uuid(dataset_id, "dataset_id") resp = self.request_with_retries( "POST", From f3fde6cc483fbb84fbfbb33bca517ea166e11124 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 14:07:57 -0700 Subject: [PATCH 363/373] fmt --- python/langsmith/client.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index a87889995..cdfcd9dd2 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3435,7 +3435,7 @@ def similar_examples( kwargs (Any): Additional keyword args to pass as part of request body. Returns: - List of ExampleSearch. + List of ExampleSearch objects. Example: .. code-block:: python @@ -3452,9 +3452,27 @@ def similar_examples( .. code-block:: pycon [ - ExampleSearch(dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40'), inputs={'question': 'How do I cache a Chat model? What caches can I use?'}, outputs={'answer': 'You can use LangChain\'s caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you\'re often requesting the same completion multiple times, and speed up your application.\n\n```python\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\')\n\n```\n\nYou can also use SQLite Cache which uses a SQLite database:\n\n```python\n rm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=".langchain.db")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\') \n```\n'}, metadata=None, id=UUID('b2ddd1c4-dff6-49ae-8544-f48e39053398')), - ExampleSearch(dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40'), inputs={'question': "What's a runnable lambda?"}, outputs={'answer': "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."}, metadata=None, id=UUID('f94104a7-2434-4ba7-8293-6a283f4860b4')), - ExampleSearch(dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40'), inputs={'question': 'Show me how to use RecursiveURLLoader'}, outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\n```python\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n```\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'}, metadata=None, id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c')), + ExampleSearch( + inputs={'question': 'How do I cache a Chat model? What caches can I use?'}, + outputs={'answer': 'You can use LangChain\'s caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you\'re often requesting the same completion multiple times, and speed up your application.\n\n```python\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\')\n\n```\n\nYou can also use SQLite Cache which uses a SQLite database:\n\n```python\n rm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=".langchain.db")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\') \n```\n'}, + metadata=None, + id=UUID('b2ddd1c4-dff6-49ae-8544-f48e39053398'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ExampleSearch( + inputs={'question': "What's a runnable lambda?"}, + outputs={'answer': "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."}, + metadata=None, + id=UUID('f94104a7-2434-4ba7-8293-6a283f4860b4'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), + ExampleSearch( + inputs={'question': 'Show me how to use RecursiveURLLoader'}, + outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\n```python\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n```\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'}, + metadata=None, + id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c'), + dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40') + ), ] """ # noqa: E501 From 1ebb19764acebe142ce00ece0bb26d27c4bb79a0 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 14:11:07 -0700 Subject: [PATCH 364/373] fmt --- python/langsmith/client.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index cdfcd9dd2..218b8f581 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3412,7 +3412,9 @@ def list_examples( if limit is not None and i + 1 >= limit: break - # dataset_name explicitly not supported to avoid extra API calls. + # dataset_name arg explicitly not supported to avoid extra API calls. + # TODO: Update note on enabling indexing when there's an enable_indexing method. + # TODO: Come up with more interesting example for docstring. def similar_examples( self, inputs: dict, From e1544bcb28aff96927622383be52ed0a4fed3f5f Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 15 Aug 2024 15:43:28 -0700 Subject: [PATCH 365/373] fmt --- python/langsmith/client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 218b8f581..20479e4d3 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -55,6 +55,7 @@ from langsmith import env as ls_env from langsmith import schemas as ls_schemas from langsmith import utils as ls_utils +from langsmith.base import beta if TYPE_CHECKING: import pandas as pd # type: ignore @@ -3415,6 +3416,7 @@ def list_examples( # dataset_name arg explicitly not supported to avoid extra API calls. # TODO: Update note on enabling indexing when there's an enable_indexing method. # TODO: Come up with more interesting example for docstring. + @beta() def similar_examples( self, inputs: dict, From f4815034213ebf65cc0ef67c4c496613dce12d6d Mon Sep 17 00:00:00 2001 From: Bagatur Date: Sat, 17 Aug 2024 23:09:43 -0700 Subject: [PATCH 366/373] fmt --- python/langsmith/client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 20479e4d3..afaca6e9d 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3415,7 +3415,6 @@ def list_examples( # dataset_name arg explicitly not supported to avoid extra API calls. # TODO: Update note on enabling indexing when there's an enable_indexing method. - # TODO: Come up with more interesting example for docstring. @beta() def similar_examples( self, @@ -3428,8 +3427,9 @@ def similar_examples( ) -> List[ls_schemas.ExampleSearch]: r"""Retrieve the dataset examples whose inputs best match the current inputs. - **Note**: Must have few-shot indexing enabled for the dataset. See (TODO) method - for how to enable indexing. + **Note**: Must have few-shot indexing enabled for the dataset. You can do this + in the LangSmith UI: + https://docs.smith.langchain.com/how_to_guides/datasets/index_datasets_for_dynamic_few_shot_example_selection Args: inputs (dict): The inputs to use as a search query. Must match the dataset From f6fd84d7235a4ecb842a9295889fa59cf8e23009 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Sun, 18 Aug 2024 10:41:39 -0700 Subject: [PATCH 367/373] fmt --- python/langsmith/beta/_utils.py | 2 +- python/langsmith/client.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/langsmith/beta/_utils.py b/python/langsmith/beta/_utils.py index d1ebcbe1b..433ade058 100644 --- a/python/langsmith/beta/_utils.py +++ b/python/langsmith/beta/_utils.py @@ -11,7 +11,7 @@ def warn_beta(func: Callable) -> Callable: @functools.wraps(func) def wrapper(*args, **kwargs): warnings.warn( - f"Function {func.__name__} is in beta.", UserWarning, stacklevel=2 + f"Function {func.__name__} is in beta.", LangSmithBetaWarning, stacklevel=2 ) return func(*args, **kwargs) diff --git a/python/langsmith/client.py b/python/langsmith/client.py index afaca6e9d..1f94a99e4 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -55,7 +55,7 @@ from langsmith import env as ls_env from langsmith import schemas as ls_schemas from langsmith import utils as ls_utils -from langsmith.base import beta +from langsmith.beta import _utils as beta_utils if TYPE_CHECKING: import pandas as pd # type: ignore @@ -3415,7 +3415,7 @@ def list_examples( # dataset_name arg explicitly not supported to avoid extra API calls. # TODO: Update note on enabling indexing when there's an enable_indexing method. - @beta() + @beta_utils.warn_beta def similar_examples( self, inputs: dict, From 14315753cef20c9cad597932b1e97f92c14d600d Mon Sep 17 00:00:00 2001 From: Bagatur Date: Sun, 18 Aug 2024 12:13:12 -0700 Subject: [PATCH 368/373] fmt --- .../{beta/_utils.py => _internal/_beta_decorator.py} | 0 python/langsmith/beta/__init__.py | 2 +- python/langsmith/beta/_evals.py | 6 +++--- python/langsmith/client.py | 4 ++-- python/langsmith/evaluation/llm_evaluator.py | 6 +++--- python/tests/integration_tests/test_client.py | 5 +++++ 6 files changed, 14 insertions(+), 9 deletions(-) rename python/langsmith/{beta/_utils.py => _internal/_beta_decorator.py} (100%) diff --git a/python/langsmith/beta/_utils.py b/python/langsmith/_internal/_beta_decorator.py similarity index 100% rename from python/langsmith/beta/_utils.py rename to python/langsmith/_internal/_beta_decorator.py diff --git a/python/langsmith/beta/__init__.py b/python/langsmith/beta/__init__.py index 9240296a3..f712c1adb 100644 --- a/python/langsmith/beta/__init__.py +++ b/python/langsmith/beta/__init__.py @@ -1,6 +1,6 @@ """Beta functionality prone to change.""" +from langsmith._internal._beta_decorator import warn_beta from langsmith.beta._evals import compute_test_metrics, convert_runs_to_test -from langsmith.beta._utils import warn_beta __all__ = ["convert_runs_to_test", "compute_test_metrics", "warn_beta"] diff --git a/python/langsmith/beta/_evals.py b/python/langsmith/beta/_evals.py index 03b099fff..de6103d81 100644 --- a/python/langsmith/beta/_evals.py +++ b/python/langsmith/beta/_evals.py @@ -9,9 +9,9 @@ import uuid from typing import DefaultDict, List, Optional, Sequence, Tuple, TypeVar -import langsmith.beta._utils as beta_utils import langsmith.schemas as ls_schemas from langsmith import evaluation as ls_eval +from langsmith._internal._beta_decorator import warn_beta from langsmith.client import Client @@ -65,7 +65,7 @@ def _convert_root_run(root: ls_schemas.Run, run_to_example_map: dict) -> List[di return result -@beta_utils.warn_beta +@warn_beta def convert_runs_to_test( runs: Sequence[ls_schemas.Run], *, @@ -196,7 +196,7 @@ def _outer_product(list1: List[T], list2: List[U]) -> List[Tuple[T, U]]: return list(itertools.product(list1, list2)) -@beta_utils.warn_beta +@warn_beta def compute_test_metrics( project_name: str, *, diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 1f94a99e4..6aaaf473e 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -55,7 +55,7 @@ from langsmith import env as ls_env from langsmith import schemas as ls_schemas from langsmith import utils as ls_utils -from langsmith.beta import _utils as beta_utils +from langsmith._internal._beta_decorator import warn_beta if TYPE_CHECKING: import pandas as pd # type: ignore @@ -3415,7 +3415,7 @@ def list_examples( # dataset_name arg explicitly not supported to avoid extra API calls. # TODO: Update note on enabling indexing when there's an enable_indexing method. - @beta_utils.warn_beta + @warn_beta def similar_examples( self, inputs: dict, diff --git a/python/langsmith/evaluation/llm_evaluator.py b/python/langsmith/evaluation/llm_evaluator.py index d0ef4fec3..3ae7b333c 100644 --- a/python/langsmith/evaluation/llm_evaluator.py +++ b/python/langsmith/evaluation/llm_evaluator.py @@ -4,7 +4,7 @@ from pydantic import BaseModel -import langsmith.beta._utils as beta_utils +from langsmith._internal._beta_decorator import warn_beta from langsmith.evaluation import EvaluationResult, EvaluationResults, RunEvaluator from langsmith.schemas import Example, Run @@ -201,7 +201,7 @@ def _initialize( chat_model = chat_model.with_structured_output(self.score_schema) self.runnable = self.prompt | chat_model - @beta_utils.warn_beta + @warn_beta def evaluate_run( self, run: Run, example: Optional[Example] = None ) -> Union[EvaluationResult, EvaluationResults]: @@ -210,7 +210,7 @@ def evaluate_run( output: dict = cast(dict, self.runnable.invoke(variables)) return self._parse_output(output) - @beta_utils.warn_beta + @warn_beta async def aevaluate_run( self, run: Run, example: Optional[Example] = None ) -> Union[EvaluationResult, EvaluationResults]: diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index 22f5355c9..b478a2f60 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -268,6 +268,11 @@ def test_list_examples(langchain_client: Client) -> None: langchain_client.delete_dataset(dataset_id=dataset.id) + example_list = langchain_client.similar_examples( + {"text": "hey there"}, k=1, dataset_id=dataset.id + ) + assert len(example_list) == 1 + @pytest.mark.skip(reason="This test is flaky") def test_persist_update_run(langchain_client: Client) -> None: From f1a8808f2e4231cfbc30140a6d6ba812b3225975 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Sun, 18 Aug 2024 12:53:20 -0700 Subject: [PATCH 369/373] fmt --- python/langsmith/client.py | 44 +++++++++++++++--- python/pyproject.toml | 1 + python/tests/integration_tests/conftest.py | 21 +++++++++ python/tests/integration_tests/test_client.py | 45 +++++++++++++++++-- 4 files changed, 101 insertions(+), 10 deletions(-) create mode 100644 python/tests/integration_tests/conftest.py diff --git a/python/langsmith/client.py b/python/langsmith/client.py index 6aaaf473e..b48a16be1 100644 --- a/python/langsmith/client.py +++ b/python/langsmith/client.py @@ -3413,8 +3413,41 @@ def list_examples( if limit is not None and i + 1 >= limit: break - # dataset_name arg explicitly not supported to avoid extra API calls. - # TODO: Update note on enabling indexing when there's an enable_indexing method. + @warn_beta + def index_dataset( + self, + *, + dataset_id: ID_TYPE, + tag: str = "latest", + **kwargs: Any, + ) -> None: + """Enable dataset indexing. Examples are indexed by their inputs. + + This enables searching for similar examples by inputs with + ``client.similar_examples()``. + + Args: + dataset_id (UUID): The ID of the dataset to index. + tag (str, optional): The version of the dataset to index. If 'latest' + then any updates to the dataset (additions, updates, deletions of + examples) will be reflected in the index. + + Returns: + None + + Raises: + requests.HTTPError + """ # noqa: E501 + dataset_id = _as_uuid(dataset_id, "dataset_id") + resp = self.request_with_retries( + "POST", + f"/datasets/{dataset_id}/index", + headers=self._headers, + data=json.dumps({"tag": tag, **kwargs}), + ) + ls_utils.raise_for_status_with_text(resp) + + # NOTE: dataset_name arg explicitly not supported to avoid extra API calls. @warn_beta def similar_examples( self, @@ -3427,15 +3460,14 @@ def similar_examples( ) -> List[ls_schemas.ExampleSearch]: r"""Retrieve the dataset examples whose inputs best match the current inputs. - **Note**: Must have few-shot indexing enabled for the dataset. You can do this - in the LangSmith UI: - https://docs.smith.langchain.com/how_to_guides/datasets/index_datasets_for_dynamic_few_shot_example_selection + **Note**: Must have few-shot indexing enabled for the dataset. See + ``client.index_dataset()``. Args: inputs (dict): The inputs to use as a search query. Must match the dataset input schema. Must be JSON serializable. limit (int): The maximum number of examples to return. - dataset_id (UUID, optional): The ID of the dataset to filter by. + dataset_id (str or UUID): The ID of the dataset to search over. kwargs (Any): Additional keyword args to pass as part of request body. Returns: diff --git a/python/pyproject.toml b/python/pyproject.toml index 7c7c95888..1242fb6d7 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -101,3 +101,4 @@ disallow_untyped_defs = "True" [tool.pytest.ini_options] asyncio_mode = "auto" +markers = [ "slow: long-running tests",] diff --git a/python/tests/integration_tests/conftest.py b/python/tests/integration_tests/conftest.py new file mode 100644 index 000000000..e446d0a1c --- /dev/null +++ b/python/tests/integration_tests/conftest.py @@ -0,0 +1,21 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "slow: mark test as slow to run") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index b478a2f60..9f436f515 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -37,7 +37,7 @@ def wait_for( @pytest.fixture def langchain_client() -> Client: - return Client() + return Client(api_key=os.environ["LANGCHAIN_ORG_API_KEY"]) def test_datasets(langchain_client: Client) -> None: @@ -268,10 +268,47 @@ def test_list_examples(langchain_client: Client) -> None: langchain_client.delete_dataset(dataset_id=dataset.id) - example_list = langchain_client.similar_examples( - {"text": "hey there"}, k=1, dataset_id=dataset.id + +@pytest.mark.slow +def test_similar_examples(langchain_client: Client) -> None: + inputs = [{"text": "how are you"}, {"text": "good bye"}, {"text": "see ya later"}] + outputs = [ + {"response": "good how are you"}, + {"response": "ta ta"}, + {"response": "tootles"}, + ] + dataset_name = "__test_similar_examples" + uuid4().hex[:4] + dataset = langchain_client.create_dataset( + dataset_name=dataset_name, + inputs_schema={ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "text": {"type": "string"}, + }, + "required": ["text"], + "additionalProperties": False, + }, + outputs_schema={ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "response": {"type": "string"}, + }, + "required": ["response"], + "additionalProperties": False, + }, ) - assert len(example_list) == 1 + langchain_client.create_examples( + inputs=inputs, outputs=outputs, dataset_id=dataset.id + ) + langchain_client.index_dataset(dataset_id=dataset.id) + # Need to wait for indexing to finish. + time.sleep(5) + similar_list = langchain_client.similar_examples( + {"text": "howdy"}, limit=2, dataset_id=dataset.id + ) + assert len(similar_list) == 2 @pytest.mark.skip(reason="This test is flaky") From 6a8b7e508ddc419b90606351a7466429156c03ab Mon Sep 17 00:00:00 2001 From: Bagatur Date: Sun, 18 Aug 2024 12:55:28 -0700 Subject: [PATCH 370/373] fmt --- python/tests/integration_tests/conftest.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/python/tests/integration_tests/conftest.py b/python/tests/integration_tests/conftest.py index e446d0a1c..8ad66c3d2 100644 --- a/python/tests/integration_tests/conftest.py +++ b/python/tests/integration_tests/conftest.py @@ -7,10 +7,6 @@ def pytest_addoption(parser): ) -def pytest_configure(config): - config.addinivalue_line("markers", "slow: mark test as slow to run") - - def pytest_collection_modifyitems(config, items): if config.getoption("--runslow"): # --runslow given in cli: do not skip slow tests From bfc64b7a0ab8e616cd5686b38d38e8e8d13b992e Mon Sep 17 00:00:00 2001 From: Bagatur Date: Sun, 18 Aug 2024 12:55:51 -0700 Subject: [PATCH 371/373] fmt --- python/tests/integration_tests/test_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index 9f436f515..97cca837c 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -37,7 +37,7 @@ def wait_for( @pytest.fixture def langchain_client() -> Client: - return Client(api_key=os.environ["LANGCHAIN_ORG_API_KEY"]) + return Client() def test_datasets(langchain_client: Client) -> None: From e779d94ce397feacfbdb7cd60a890021c249fdfd Mon Sep 17 00:00:00 2001 From: Bagatur Date: Sun, 18 Aug 2024 12:56:25 -0700 Subject: [PATCH 372/373] fmt --- python/tests/integration_tests/test_client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/tests/integration_tests/test_client.py b/python/tests/integration_tests/test_client.py index 97cca837c..d939111d4 100644 --- a/python/tests/integration_tests/test_client.py +++ b/python/tests/integration_tests/test_client.py @@ -310,6 +310,8 @@ def test_similar_examples(langchain_client: Client) -> None: ) assert len(similar_list) == 2 + langchain_client.delete_dataset(dataset_id=dataset.id) + @pytest.mark.skip(reason="This test is flaky") def test_persist_update_run(langchain_client: Client) -> None: From 179d606c86a7e833cd6910c4175e37b067cde628 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Mon, 19 Aug 2024 16:58:30 -0700 Subject: [PATCH 373/373] [js] Support LangSmith-prefixed env vars (#929) --- js/package.json | 2 +- js/src/client.ts | 18 +++++++------- js/src/env.ts | 13 ++++------- js/src/evaluation/_runner.ts | 2 +- js/src/index.ts | 2 +- js/src/tests/evaluate.int.test.ts | 39 ++++++++++++++----------------- js/src/utils/env.ts | 9 +++++++ 7 files changed, 43 insertions(+), 42 deletions(-) diff --git a/js/package.json b/js/package.json index 05b2d7e86..3f230796c 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "langsmith", - "version": "0.1.41", + "version": "0.1.42", "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.", "packageManager": "yarn@1.22.19", "files": [ diff --git a/js/src/client.ts b/js/src/client.ts index 86d8de7b5..3dbb23a02 100644 --- a/js/src/client.ts +++ b/js/src/client.ts @@ -37,8 +37,8 @@ import { isLangChainMessage, } from "./utils/messages.js"; import { - getEnvironmentVariable, getLangChainEnvVarsMetadata, + getLangSmithEnvironmentVariable, getRuntimeEnvironment, } from "./utils/env.js"; @@ -286,8 +286,8 @@ async function mergeRuntimeEnvIntoRunCreates(runs: RunCreate[]) { } const getTracingSamplingRate = () => { - const samplingRateStr = getEnvironmentVariable( - "LANGCHAIN_TRACING_SAMPLING_RATE" + const samplingRateStr = getLangSmithEnvironmentVariable( + "TRACING_SAMPLING_RATE" ); if (samplingRateStr === undefined) { return undefined; @@ -295,7 +295,7 @@ const getTracingSamplingRate = () => { const samplingRate = parseFloat(samplingRateStr); if (samplingRate < 0 || samplingRate > 1) { throw new Error( - `LANGCHAIN_TRACING_SAMPLING_RATE must be between 0 and 1 if set. Got: ${samplingRate}` + `LANGSMITH_TRACING_SAMPLING_RATE must be between 0 and 1 if set. Got: ${samplingRate}` ); } return samplingRate; @@ -463,14 +463,14 @@ export class Client { hideInputs?: boolean; hideOutputs?: boolean; } { - const apiKey = getEnvironmentVariable("LANGCHAIN_API_KEY"); + const apiKey = getLangSmithEnvironmentVariable("API_KEY"); const apiUrl = - getEnvironmentVariable("LANGCHAIN_ENDPOINT") ?? + getLangSmithEnvironmentVariable("ENDPOINT") ?? "https://api.smith.langchain.com"; const hideInputs = - getEnvironmentVariable("LANGCHAIN_HIDE_INPUTS") === "true"; + getLangSmithEnvironmentVariable("HIDE_INPUTS") === "true"; const hideOutputs = - getEnvironmentVariable("LANGCHAIN_HIDE_OUTPUTS") === "true"; + getLangSmithEnvironmentVariable("HIDE_OUTPUTS") === "true"; return { apiUrl: apiUrl, apiKey: apiKey, @@ -1017,7 +1017,7 @@ export class Client { sessionId = projectOpts?.projectId; } else { const project = await this.readProject({ - projectName: getEnvironmentVariable("LANGCHAIN_PROJECT") || "default", + projectName: getLangSmithEnvironmentVariable("PROJECT") || "default", }); sessionId = project.id; } diff --git a/js/src/env.ts b/js/src/env.ts index 2847b6e73..9d04037a5 100644 --- a/js/src/env.ts +++ b/js/src/env.ts @@ -1,14 +1,11 @@ -import { getEnvironmentVariable } from "./utils/env.js"; +import { getLangSmithEnvironmentVariable } from "./utils/env.js"; export const isTracingEnabled = (tracingEnabled?: boolean): boolean => { if (tracingEnabled !== undefined) { return tracingEnabled; } - const envVars = [ - "LANGSMITH_TRACING_V2", - "LANGCHAIN_TRACING_V2", - "LANGSMITH_TRACING", - "LANGCHAIN_TRACING", - ]; - return !!envVars.find((envVar) => getEnvironmentVariable(envVar) === "true"); + const envVars = ["TRACING_V2", "TRACING"]; + return !!envVars.find( + (envVar) => getLangSmithEnvironmentVariable(envVar) === "true" + ); }; diff --git a/js/src/evaluation/_runner.ts b/js/src/evaluation/_runner.ts index 69d71ebf7..acdb0db9b 100644 --- a/js/src/evaluation/_runner.ts +++ b/js/src/evaluation/_runner.ts @@ -882,7 +882,7 @@ async function _forward( if (!run) { throw new Error(`Run not created by target function. This is most likely due to tracing not being enabled.\n -Try setting "LANGCHAIN_TRACING_V2=true" in your environment.`); +Try setting "LANGSMITH_TRACING=true" in your environment.`); } return { diff --git a/js/src/index.ts b/js/src/index.ts index faac74776..84abd3886 100644 --- a/js/src/index.ts +++ b/js/src/index.ts @@ -12,4 +12,4 @@ export type { export { RunTree, type RunTreeConfig } from "./run_trees.js"; // Update using yarn bump-version -export const __version__ = "0.1.41"; +export const __version__ = "0.1.42"; diff --git a/js/src/tests/evaluate.int.test.ts b/js/src/tests/evaluate.int.test.ts index 733b68a6d..98ab6c6c8 100644 --- a/js/src/tests/evaluate.int.test.ts +++ b/js/src/tests/evaluate.int.test.ts @@ -7,8 +7,9 @@ import { Example, Run, TracerSession } from "../schemas.js"; import { Client } from "../index.js"; import { afterAll, beforeAll } from "@jest/globals"; import { RunnableLambda, RunnableSequence } from "@langchain/core/runnables"; - -const TESTING_DATASET_NAME = "test_dataset_js_evaluate_123"; +import { v4 as uuidv4 } from "uuid"; +const TESTING_DATASET_NAME = `test_dataset_js_evaluate_${uuidv4()}`; +const TESTING_DATASET_NAME2 = `my_splits_ds_${uuidv4()}`; beforeAll(async () => { const client = new Client(); @@ -46,7 +47,6 @@ afterAll(async () => { test("evaluate can evaluate", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -84,7 +84,6 @@ test("evaluate can evaluate", async () => { test("evaluate can repeat", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -184,7 +183,6 @@ test("evaluate can evaluate with RunEvaluator evaluators", async () => { test("evaluate can evaluate with custom evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -256,7 +254,6 @@ test("evaluate can evaluate with custom evaluators", async () => { test("evaluate can evaluate with summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -314,7 +311,6 @@ test("evaluate can evaluate with summary evaluators", async () => { test.skip("can iterate over evaluate results", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -343,7 +339,6 @@ test.skip("can iterate over evaluate results", async () => { test("can pass multiple evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -391,7 +386,7 @@ test("can pass multiple evaluators", async () => { test("split info saved correctly", async () => { const client = new Client(); // create a new dataset - await client.createDataset("my_splits_ds2", { + await client.createDataset(TESTING_DATASET_NAME2, { description: "For testing purposed. Is created & deleted for each test run.", }); @@ -400,21 +395,22 @@ test("split info saved correctly", async () => { inputs: [{ input: 1 }, { input: 2 }, { input: 3 }], outputs: [{ output: 2 }, { output: 3 }, { output: 4 }], splits: [["test"], ["train"], ["validation", "test"]], - datasetName: "my_splits_ds2", + datasetName: TESTING_DATASET_NAME2, }); const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; }; await evaluate(targetFunc, { - data: client.listExamples({ datasetName: "my_splits_ds2" }), + data: client.listExamples({ datasetName: TESTING_DATASET_NAME2 }), description: "splits info saved correctly", }); - const exp = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + const exp = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); let myExp: TracerSession | null = null; for await (const session of exp) { myExp = session; @@ -425,13 +421,15 @@ test("split info saved correctly", async () => { await evaluate(targetFunc, { data: client.listExamples({ - datasetName: "my_splits_ds2", + datasetName: TESTING_DATASET_NAME2, splits: ["test"], }), description: "splits info saved correctly", }); - const exp2 = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + const exp2 = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); let myExp2: TracerSession | null = null; for await (const session of exp2) { if (myExp2 === null || session.start_time > myExp2.start_time) { @@ -445,13 +443,15 @@ test("split info saved correctly", async () => { await evaluate(targetFunc, { data: client.listExamples({ - datasetName: "my_splits_ds2", + datasetName: TESTING_DATASET_NAME2, splits: ["train"], }), description: "splits info saved correctly", }); - const exp3 = client.listProjects({ referenceDatasetName: "my_splits_ds2" }); + const exp3 = client.listProjects({ + referenceDatasetName: TESTING_DATASET_NAME2, + }); let myExp3: TracerSession | null = null; for await (const session of exp3) { if (myExp3 === null || session.start_time > myExp3.start_time) { @@ -466,7 +466,6 @@ test("split info saved correctly", async () => { test("can pass multiple summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -517,7 +516,6 @@ test("can pass AsyncIterable of Example's to evaluator instead of dataset name", }); const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -551,7 +549,6 @@ test("can pass AsyncIterable of Example's to evaluator instead of dataset name", test("max concurrency works with custom evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -587,7 +584,6 @@ test("max concurrency works with custom evaluators", async () => { test("max concurrency works with summary evaluators", async () => { const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; @@ -704,7 +700,6 @@ test("evaluate can accept array of examples", async () => { } const targetFunc = (input: Record) => { - console.log("__input__", input); return { foo: input.input + 1, }; diff --git a/js/src/utils/env.ts b/js/src/utils/env.ts index 4c073a796..535ef2772 100644 --- a/js/src/utils/env.ts +++ b/js/src/utils/env.ts @@ -200,6 +200,15 @@ export function getEnvironmentVariable(name: string): string | undefined { } } +export function getLangSmithEnvironmentVariable( + name: string +): string | undefined { + return ( + getEnvironmentVariable(`LANGSMITH_${name}`) || + getEnvironmentVariable(`LANGCHAIN_${name}`) + ); +} + export function setEnvironmentVariable(name: string, value: string): void { if (typeof process !== "undefined") { // eslint-disable-next-line no-process-env